diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 9349bded..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = congress -omit = congress/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 32eed501..00000000 --- a/.gitignore +++ /dev/null @@ -1,62 +0,0 @@ -# Congress build/runtime artifacts -Congress.tokens -subunit.log -congress/tests/policy_engines/snapshot/test -congress/tests/policy/snapshot/test -/doc/html - -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -/lib -/lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.testrepository -.venv - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.sw? - -# IDEs -.idea diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 6c1e18a0..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/congress.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index cc92f17b..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# \ No newline at end of file diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 5ebedb49..00000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./congress/tests $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 9fe51e0d..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,25 +0,0 @@ -============ -Contributing -============ - -The Congress wiki page is the authoritative starting point. - - https://wiki.openstack.org/wiki/Congress - -If you would like to contribute to the development of any OpenStack -project including Congress, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/congress diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 431bbbb7..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,5 +0,0 @@ -=========================== -Congress style commandments -=========================== - -Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/Makefile b/Makefile deleted file mode 100644 index 75531f00..00000000 --- a/Makefile +++ /dev/null @@ -1,12 +0,0 @@ - -TOPDIR=$(CURDIR) -SRCDIR=$(TOPDIR)/congress - -all: docs - -clean: - find . -name '*.pyc' -exec rm -f {} \; - rm -Rf $(TOPDIR)/doc/html/* - -docs: $(TOPDIR)/doc/source/*.rst - sphinx-build -b html $(TOPDIR)/doc/source $(TOPDIR)/doc/html diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 21df8cf1..00000000 --- a/README.rst +++ /dev/null @@ -1,465 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/congress.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - - -.. _readme: - -====================================== -Congress Introduction and Installation -====================================== - -1. What is Congress -=================== - -Congress is an open policy framework for the cloud. With Congress, a -cloud operator can declare, monitor, enforce, and audit "policy" in a -heterogeneous cloud environment. Congress gets inputs from a cloud's -various cloud services; for example in OpenStack, Congress fetches -information about VMs from Nova, and network state from Neutron, etc. -Congress then feeds input data from those services into its policy engine -where Congress verifies that the cloud's actual state abides by the cloud -operator's policies. Congress is designed to work with **any policy** and -**any cloud service**. - -2. Why is Policy Important -========================== - -The cloud is a collection of autonomous -services that constantly change the state of the cloud, and it can be -challenging for the cloud operator to know whether the cloud is even -configured correctly. For example, - -* The services are often independent from each other and do not - support transactional consistency across services, so a cloud - management system can change one service (create a VM) without also - making a necessary change to another service (attach the VM to a - network). This can lead to incorrect behavior. - -* Other times, we have seen a cloud operator allocate cloud resources - and then forget to clean them up when the resources are no longer in - use, effectively leaving garbage around the system and wasting - resources. - -* The desired cloud state can also change over time. For example, if - a security vulnerability is discovered in Linux version X, then all - machines with version X that were ok in the past are now in an - undesirable state. A version number policy would detect all the - machines in that undesirable state. This is a trivial example, but - the more complex the policy, the more helpful a policy system - becomes. - -Congress's job is to help people manage that plethora of state across -all cloud services with a succinct policy language. - -3. Using Congress -================= - -Setting up Congress involves writing policies and configuring Congress -to fetch input data from the cloud services. The cloud operator -writes policy in the Congress policy language, which receives input -from the cloud services in the form of tables. The language itself -resembles datalog. For more detail about the policy language and data -format see :ref:`Policy `. - -To add a service as an input data source, the cloud operator configures a Congress -"driver," and the driver queries the service. Congress already -has drivers for several types of service, but if a cloud operator -needs to use an unsupported service, she can write a new driver -without much effort and probably contribute the driver to the -Congress project so that no one else needs to write the same driver. - -Finally, when using Congress, the cloud operator must choose what -Congress should do with the policy it has been given: - -* **monitoring**: detect violations of policy and provide a list of those violations -* **proactive enforcement**: prevent violations before they happen (functionality that requires - other services to consult with Congress before making changes) -* **reactive enforcement**: correct violations after they happen (a manual process that - Congress tries to simplify) - -In the future, Congress -will also help the cloud operator audit policy (analyze the history -of policy and policy violations). - -Congress is free software and is licensed with Apache. - -* Free software: Apache license - -4. Installing Congress -====================== - -There are 2 ways to install Congress. - -* As part of DevStack. Get Congress running alongside other OpenStack services like Nova - and Neutron, all on a single machine. This is a great way to try out Congress for the - first time. - -* Separate install. Get Congress running alongside an existing OpenStack - deployment - -4.1 Devstack-install --------------------- -For integrating Congress with DevStack: - -1. Download DevStack - -.. code-block:: console - - $ git clone https://git.openstack.org/openstack-dev/devstack.git - $ cd devstack - -2. Configure DevStack to use Congress and any other service you want. To do that, modify - the ``local.conf`` file (inside the DevStack directory). Here is what - our file looks like: - -.. code-block:: console - - [[local|localrc]] - - enable_plugin congress https://git.openstack.org/openstack/congress - enable_plugin heat https://git.openstack.org/openstack/heat - enable_plugin aodh https://git.openstack.org/openstack/aodh - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - enable_service s-proxy s-object s-container s-account - -3. Run ``stack.sh``. The default configuration expects the passwords to be 'password' - without the quotes - -.. code-block:: console - - $ ./stack.sh - - -4.2 Separate install --------------------- -Install the following software, if you haven't already. - -* python 2.7: https://www.python.org/download/releases/2.7/ - -* pip: https://pip.pypa.io/en/latest/installing.html - -* java: https://java.com (any reasonably current version should work) - On Ubuntu: console apt-get install default-jre - On Federa: console yum install jre - -* Additionally - -.. code-block:: console - - $ sudo apt-get install git gcc python-dev python-antlr3 libxml2 libxslt1-dev libzip-dev build-essential libssl-dev libffi-dev - $ sudo apt install python-setuptools - $ sudo pip install --upgrade pip virtualenv pbr tox - -Clone Congress - -.. code-block:: console - - $ git clone https://github.com/openstack/congress.git - $ cd congress - -Install requirements - -.. code-block:: console - - $ sudo pip install . - -Install Source code - -.. code-block:: console - - $ sudo python setup.py install - -Configure Congress (Assume you put config files in /etc/congress) - -.. code-block:: console - - $ sudo mkdir -p /etc/congress - $ sudo mkdir -p /etc/congress/snapshot - $ sudo cp etc/api-paste.ini /etc/congress - $ sudo cp etc/policy.json /etc/congress - -Set-up Policy Library [optional] - This step copies the bundled collection Congress policies into the Congress - policy library for easy activation by an administrator. The policies in the - library do not become active until explicitly activated by an administrator. - The step may be skipped if you do not want to load the bundled policies into - the policy library. - -.. code-block:: console - - $ sudo cp -r library /etc/congress/. - -Generate a configuration file as outlined in the Configuration Options section -of the :ref:`Deployment ` document. Note: you may have to run the command with sudo. - -There are several sections in the congress/etc/congress.conf.sample file you may want to change: - -* [DEFAULT] Section - - drivers - - auth_strategy -* "From oslo.log" Section - - log_file - - log_dir (remember to create the directory) -* [database] Section - - connection - -Add drivers: - -.. code-block:: text - - drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver,congress.datasources.nova_driver.NovaDriver,congress.datasources.keystone_driver.KeystoneDriver,congress.datasources.ceilometer_driver.CeilometerDriver,congress.datasources.cinder_driver.CinderDriver,congress.datasources.swift_driver.SwiftDriver,congress.datasources.plexxi_driver.PlexxiDriver,congress.datasources.vCenter_driver.VCenterDriver,congress.datasources.murano_driver.MuranoDriver,congress.datasources.ironic_driver.IronicDriver - - -The default auth_strategy is keystone. To set Congress to use no authorization strategy: - -.. code-block:: text - - auth_strategy = noauth - -If you use noauth, you might want to delete or comment out the [keystone_authtoken] section. - -Set the database connection string in the [database] section (adapt MySQL root password): - -.. code-block:: text - - connection = mysql+pymysql://root:password@127.0.0.1/congress?charset=utf8 - -To use RabbitMQ with Congress, set the transport_url in the "From oslo.messaging" section according to your setup: - -.. code-block:: text - - transport_url = rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672 - -A bare-bones congress.conf is as follows: - -.. code-block:: text - - [DEFAULT] - auth_strategy = noauth - drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver,congress.datasources.nova_driver.NovaDriver,congress.datasources.keystone_driver.KeystoneDriver,congress.datasources.ceilometer_driver.CeilometerDriver,congress.datasources.cinder_driver.CinderDriver,congress.datasources.swift_driver.SwiftDriver,congress.datasources.plexxi_driver.PlexxiDriver,congress.datasources.vCenter_driver.VCenterDriver,congress.datasources.murano_driver.MuranoDriver,congress.datasources.ironic_driver.IronicDriver - log_file=congress.log - log_dir=/var/log/congress - [database] - connection = mysql+pymysql://root:password@127.0.0.1/congress?charset=utf8 - - -When you are finished editing congress.conf.sample, copy it to the /etc/congress directory. - -.. code-block:: console - - sudo cp etc/congress.conf.sample /etc/congress/congress.conf - - -Create database - -.. code-block:: console - - $ mysql -u root -p - $ mysql> CREATE DATABASE congress; - $ mysql> GRANT ALL PRIVILEGES ON congress.* TO 'congress'@'localhost' IDENTIFIED BY 'CONGRESS_DBPASS'; - $ mysql> GRANT ALL PRIVILEGES ON congress.* TO 'congress'@'%' IDENTIFIED BY 'CONGRESS_DBPASS'; - - -Push down schema - -.. code-block:: console - - $ sudo congress-db-manage --config-file /etc/congress/congress.conf upgrade head - - -Set up Congress accounts - Use your OpenStack RC file to set and export required environment variables: - OS_USERNAME, OS_PASSWORD, OS_PROJECT_NAME, OS_TENANT_NAME, OS_AUTH_URL. - - (Adapt parameters according to your environment) - - -.. code-block:: console - - $ ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - $ SERVICE_TENANT=$(openstack project list | awk "/ service / { print \$2 }") - $ CONGRESS_USER=$(openstack user create --password password --project service --email "congress@example.com" congress | awk "/ id / {print \$4 }") - $ openstack role add $ADMIN_ROLE --user $CONGRESS_USER --project $SERVICE_TENANT - $ CONGRESS_SERVICE=$(openstack service create policy --name congress --description "Congress Service" | awk "/ id / { print \$4 }") - - -Create the Congress Service Endpoint - Endpoint creation differs based upon the Identity version. Please see the `endpoint `_ documentation for details. - - -.. code-block:: console - - Identity v2: - $ openstack endpoint create $CONGRESS_SERVICE --region RegionOne --publicurl https://127.0.0.1:1789/ --adminurl https://127.0.0.1:1789/ --internalurl https://127.0.0.1:1789/ - - -.. code-block:: console - - Identity v3: - $ openstack endpoint create --region $OS_REGION_NAME $CONGRESS_SERVICE public https://$SERVICE_HOST:1789 - $ openstack endpoint create --region $OS_REGION_NAME $CONGRESS_SERVICE admin https://$SERVICE_HOST:1789 - $ openstack endpoint create --region $OS_REGION_NAME $CONGRESS_SERVICE internal https://$SERVICE_HOST:1789 - - - -Start Congress - The default behavior is to start the Congress API, Policy Engine, and - Datasource in a single node. For HAHT deployment options, please see the - :ref:`HA Overview ` document. - -.. code-block:: console - - $ sudo /usr/local/bin/congress-server --debug - - -Install the Congress Client - The command line interface (CLI) for Congress resides in a project called python-congressclient. - Follow the installation instructions on the `GitHub page `_. - - -Configure datasource drivers - For this you must have the Congress CLI installed. Run this command for every - service that Congress will poll for data. - Please note that the service name $SERVICE should match the ID of the - datasource driver, e.g. "neutronv2" for Neutron and "glancev2" for Glance; - $OS_USERNAME, $OS_TENANT_NAME, $OS_PASSWORD and $SERVICE_HOST are used to - configure the related datasource driver so that congress knows how to - talk with the service. - -.. code-block:: console - - $ openstack congress datasource create $SERVICE $"SERVICE" \ - --config username=$OS_USERNAME \ - --config tenant_name=$OS_TENANT_NAME - --config password=$OS_PASSWORD - --config auth_url=https://$SERVICE_HOST:5000/v2.0 - - -Install the Congress Dashboard in Horizon - Clone congress-dashboard repo, located here https://github.com/openstack/congress-dashboard - Follow the instructions in the README file located in https://github.com/openstack/congress-dashboard/blob/master/README.rst - for further installation. - - Note: After you install the Congress Dashboard and restart apache, the OpenStack Dashboard may throw - a "You have offline compression enabled..." error, follow the instructions in the error message. - You may have to: - -.. code-block:: console - - $ cd /opt/stack/horizon - $ python manage.py compress - $ sudo service apache2 restart - - -Read the HTML documentation - Install python-sphinx and the oslosphinx extension if missing and build the docs. - After building, open congress/doc/html/index.html in a browser. - -.. code-block:: console - - $ sudo pip install sphinx - $ sudo pip install oslosphinx - $ make docs - - -Test Using the Congress CLI - If you are not familiar with using the OpenStack command-line clients, please read the `OpenStack documentation `_ before proceeding. - - Once you have set up or obtained credentials to use the OpenStack command-line clients, you may begin testing Congress. During installation a number of policies are created. - - To view policies: $ openstack congress policy list - - To view installed datasources: $ openstack congress datasource list - - To list available commands: $ openstack congress --help - -4.3 Unit Tests ------------------------- - -Run unit tests in the Congress directory - -.. code-block:: console - - $ tox -epy27 - -In order to break into the debugger from a unit test we need to insert -a break point to the code: - -.. code-block:: python - - import pdb; pdb.set_trace() - -Then run ``tox`` with the debug environment as one of the following:: - - tox -e debug - tox -e debug test_file_name.TestClass.test_name - -For more information see the `oslotest documentation -`_. - -4.4 Upgrade ------------ - -Here are the instructions for upgrading to a new release of the -Congress server. - -1. Stop the Congress server. - -2. Update the Congress git repo - -.. code-block:: console - - $ cd /path/to/congress - $ git fetch origin - -3. Checkout the release you are interested in, say Mitaka. Note that this -step will not succeed if you have any uncommitted changes in the repo. - -.. code-block:: console - - $ git checkout origin/stable/mitaka - - -If you have changes committed locally that are not merged into the public -repository, you now need to cherry-pick those changes onto the new -branch. - -4. Install dependencies - -.. code-block:: console - - $ sudo pip install - -5. Install source code - -.. code-block:: console - - $ sudo python setup.py install - -6. Migrate the database schema - -.. code-block:: console - - $ sudo congress-db-manage --config-file /etc/congress/congress.conf upgrade head - -7. (optional) Check if the configuration options you are currently using are - still supported and whether there are any new configuration options you - would like to use. To see the current list of configuration options, - use the following command, which will create a sample configuration file - in ``etc/congress.conf.sample`` for you to examine. - -.. code-block:: console - - $ tox -egenconfig - -8. Restart Congress, e.g. - -.. code-block:: console - - $ sudo /usr/local/bin/congress-server --debug diff --git a/antlr3runtime/Python/antlr3 b/antlr3runtime/Python/antlr3 deleted file mode 120000 index b8b214f6..00000000 --- a/antlr3runtime/Python/antlr3 +++ /dev/null @@ -1 +0,0 @@ -../../thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/ \ No newline at end of file diff --git a/antlr3runtime/Python3/antlr3 b/antlr3runtime/Python3/antlr3 deleted file mode 120000 index e1509cfb..00000000 --- a/antlr3runtime/Python3/antlr3 +++ /dev/null @@ -1 +0,0 @@ -../../thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/ \ No newline at end of file diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab81..00000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/bin/congress-server b/bin/congress-server deleted file mode 100755 index fd292ae6..00000000 --- a/bin/congress-server +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import os -import sys - -# If ../congress/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, - 'congress', - '__init__.py')): - sys.path.insert(0, possible_topdir) - - -from congress.server import congress_server - -if __name__ == '__main__': - congress_server.main() diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index b7e1e2ed..00000000 --- a/bindep.txt +++ /dev/null @@ -1,14 +0,0 @@ -python-all-dev -python3-all-dev -libvirt-dev -libxml2-dev -libxslt1-dev -# libmysqlclient-dev -# libpq-dev -libsqlite3-dev -libffi-dev -# mysql-client -# mysql-server -# postgresql -# postgresql-client -rabbitmq-server diff --git a/congress/__init__.py b/congress/__init__.py deleted file mode 100644 index 1a5ed251..00000000 --- a/congress/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -import gettext - -import pbr.version - -gettext.install('congress') -__version__ = pbr.version.VersionInfo( - 'congress').version_string() diff --git a/congress/api/__init__.py b/congress/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/api/action_model.py b/congress/api/action_model.py deleted file mode 100644 index 5d225be8..00000000 --- a/congress/api/action_model.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2015 Intel, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - - -class ActionsModel(base.APIModel): - """Model for handling API requests about Actions.""" - - # Note(dse2): blocking function - def get_items(self, params, context=None): - """Retrieve items from this model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - A dict containing at least a 'actions' key whose value is a list - of items in this model. - """ - # Note: blocking call - caller, source_id = api_utils.get_id_from_context(context) - - try: - rpc_args = {'source_id': source_id} - # Note(dse2): blocking call - return self.invoke_rpc(caller, 'get_actions', rpc_args) - except exception.CongressException as e: - raise webservice.DataModelException( - exception.NotFound.code, str(e), - http_status_code=exception.NotFound.code) diff --git a/congress/api/api_utils.py b/congress/api/api_utils.py deleted file mode 100644 index a31f6db7..00000000 --- a/congress/api/api_utils.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2015 NTT All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.api import base -from congress.api import webservice -from congress.db import datasources as db_datasources - -LOG = logging.getLogger(__name__) - - -def create_table_dict(tablename, schema): - cols = [{'name': x['name'], 'description': x['desc']} - if isinstance(x, dict) - else {'name': x, 'description': 'None'} - for x in schema[tablename]] - return {'table_id': tablename, - 'columns': cols} - - -# Note(thread-safety): blocking function -def get_id_from_context(context): - if 'ds_id' in context: - # Note(thread-safety): blocking call - ds_name = db_datasources.get_datasource_name(context.get('ds_id')) - return ds_name, context.get('ds_id') - elif 'policy_id' in context: - return base.ENGINE_SERVICE_ID, context.get('policy_id') - else: - msg = "Internal error: context %s should have included " % str(context) - "either ds_id or policy_id" - try: # Py3: ensure LOG.exception is inside except - raise webservice.DataModelException('404', msg) - except webservice.DataModelException: - LOG.exception(msg) - raise diff --git a/congress/api/application.py b/congress/api/application.py deleted file mode 100644 index fd6628d3..00000000 --- a/congress/api/application.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import traceback - -from oslo_log import log as logging -import webob -import webob.dec - -from congress.api import webservice -from congress.dse2 import data_service - - -LOG = logging.getLogger(__name__) -API_SERVICE_NAME = '__api' - - -class ApiApplication(object): - """An API web application that binds REST resources to a wsgi server. - - This indirection between the wsgi server and REST resources facilitates - binding the same resource tree to multiple endpoints (e.g. HTTP/HTTPS). - """ - - def __init__(self, resource_mgr): - self.resource_mgr = resource_mgr - - @webob.dec.wsgify(RequestClass=webob.Request) - def __call__(self, request): - try: - handler = self.resource_mgr.get_handler(request) - if handler: - msg = _("Handling request '%(meth)s %(path)s' with %(hndlr)s") - LOG.info(msg, {"meth": request.method, "path": request.path, - "hndlr": handler}) - # TODO(pballand): validation - response = handler.handle_request(request) - else: - response = webservice.NOT_FOUND_RESPONSE - except webservice.DataModelException as e: - # Error raised based on invalid user input - LOG.exception("ApiApplication: found DataModelException") - response = e.rest_response() - except Exception as e: - # Unexpected error raised by API framework or data model - msg = _("Exception caught for request: %s") - LOG.error(msg, request) - LOG.error(traceback.format_exc()) - response = webservice.INTERNAL_ERROR_RESPONSE - return response - - -class ResourceManager(data_service.DataService): - """A container for REST API resources. - - This container is meant to be called from one or more wsgi servers/ports. - - Attributes: - handlers: An array of API resource handlers for registered resources. - """ - - def __init__(self): - self.handlers = [] - super(ResourceManager, self).__init__(API_SERVICE_NAME) - - def register_handler(self, handler, search_index=None): - """Register a new resource handler. - - Args: - handler: The resource handler to register. - search_index: Priority of resource handler to resolve cases where - a request matches multiple handlers. - """ - if search_index is not None: - self.handlers.insert(search_index, handler) - else: - self.handlers.append(handler) - msg = _("Registered API handler: %s") - LOG.info(msg, handler) - - def get_handler(self, request): - """Find a handler for a REST request. - - Args: - request: A webob request object. - - Returns: - A handler instance or None. - """ - for h in self.handlers: - if h.handles_request(request): - return h - return None diff --git a/congress/api/base.py b/congress/api/base.py deleted file mode 100644 index e2a05d8b..00000000 --- a/congress/api/base.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2016 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" Base class for all API models.""" - -from __future__ import absolute_import - -from oslo_config import cfg - -ENGINE_SERVICE_ID = '__engine' -LIBRARY_SERVICE_ID = '__library' -DS_MANAGER_SERVICE_ID = '_ds_manager' - - -class APIModel(object): - """Base Class for handling API requests.""" - - def __init__(self, name, bus=None): - self.name = name - self.dse_long_timeout = cfg.CONF.dse.long_timeout - self.bus = bus - - # Note(thread-safety): blocking function - def invoke_rpc(self, caller, name, kwargs, timeout=None): - local = (caller is ENGINE_SERVICE_ID and - self.bus.node.service_object( - ENGINE_SERVICE_ID) is not None) - return self.bus.rpc( - caller, name, kwargs, timeout=timeout, local=local) diff --git a/congress/api/datasource_model.py b/congress/api/datasource_model.py deleted file mode 100644 index 5cd79beb..00000000 --- a/congress/api/datasource_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from oslo_log import log as logging - -from congress.api import api_utils -from congress.api import base -from congress.api import error_codes -from congress.api import webservice -from congress import exception - -LOG = logging.getLogger(__name__) - - -class DatasourceModel(base.APIModel): - """Model for handling API requests about Datasources.""" - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - - # Note(thread-safety): blocking call - results = self.bus.get_datasources(filter_secret=True) - - # Check that running datasources match the datasources in the - # database since this is going to tell the client about those - # datasources, and the running datasources should match the - # datasources we show the client. - - return {"results": results} - - def get_item(self, id_, params, context=None): - """Get datasource corresponding to id_ in model.""" - try: - datasource = self.bus.get_datasource(id_) - return datasource - except exception.DatasourceNotFound as e: - LOG.exception("Datasource '%s' not found", id_) - raise webservice.DataModelException(e.code, str(e), - http_status_code=e.code) - - # Note(thread-safety): blocking function - def add_item(self, item, params, id_=None, context=None): - """Add item to model. - - Args: - item: The item to add to the model - id_: The ID of the item, or None if an ID should be generated - context: Key-values providing frame of reference of request - - Returns: - Tuple of (ID, newly_created_item) - - Raises: - KeyError: ID already exists. - """ - obj = None - try: - # Note(thread-safety): blocking call - obj = self.invoke_rpc(base.DS_MANAGER_SERVICE_ID, - 'add_datasource', - {'items': item}, - timeout=self.dse_long_timeout) - # Let PE synchronizer take care of creating the policy. - except (exception.BadConfig, - exception.DatasourceNameInUse, - exception.DriverNotFound, - exception.DatasourceCreationError) as e: - LOG.exception(_("Datasource creation failed.")) - raise webservice.DataModelException(e.code, str(e), - http_status_code=e.code) - - return (obj['id'], obj) - - # Note(thread-safety): blocking function - def delete_item(self, id_, params, context=None): - ds_id = context.get('ds_id') - try: - # Note(thread-safety): blocking call - datasource = self.bus.get_datasource(ds_id) - # FIXME(thread-safety): - # by the time greenthread resumes, the - # returned datasource name could refer to a totally different - # datasource, causing the rest of this code to unintentionally - # delete a different datasource - # Fix: check UUID of datasource before operating. - # Abort if mismatch - self.invoke_rpc(base.DS_MANAGER_SERVICE_ID, - 'delete_datasource', - {'datasource': datasource}, - timeout=self.dse_long_timeout) - # Let PE synchronizer takes care of deleting policy - except (exception.DatasourceNotFound, - exception.DanglingReference) as e: - raise webservice.DataModelException(e.code, str(e)) - - # Note(thread-safety): blocking function - def request_refresh_action(self, params, context=None, request=None): - caller, source_id = api_utils.get_id_from_context(context) - try: - args = {'source_id': source_id} - # Note(thread-safety): blocking call - self.invoke_rpc(caller, 'request_refresh', args) - except exception.CongressException as e: - LOG.exception(e) - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def execute_action(self, params, context=None, request=None): - "Execute the action." - service = context.get('ds_id') - body = json.loads(request.body) - action = body.get('name') - action_args = body.get('args', {}) - if (not isinstance(action_args, dict)): - (num, desc) = error_codes.get('execute_action_args_syntax') - raise webservice.DataModelException(num, desc) - - try: - args = {'service_name': service, 'action': action, - 'action_args': action_args} - # TODO(ekcs): perhaps keep execution synchronous when explicitly - # called via API - # Note(thread-safety): blocking call - self.invoke_rpc(base.ENGINE_SERVICE_ID, 'execute_action', args) - except exception.PolicyException as e: - (num, desc) = error_codes.get('execute_error') - raise webservice.DataModelException(num, desc + "::" + str(e)) - - return {} diff --git a/congress/api/error_codes.py b/congress/api/error_codes.py deleted file mode 100644 index e7ab6dbb..00000000 --- a/congress/api/error_codes.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -try: - # For Python 3 - import http.client as httplib -except ImportError: - import httplib - -# TODO(thinrichs): move this out of api directory. Could go into -# the exceptions.py file. The HTTP error codes may make these errors -# look like they are only useful for the API, but actually they are -# just encoding the classification of the error using http codes. -# To make this more explicit, we could have 2 dictionaries where -# one maps an error name (readable for programmers) to an error number -# and another dictionary that maps an error name/number to the HTTP -# classification. But then it would be easy for a programmer when -# adding a new error to forget one or the other. - -# name of unknown error -UNKNOWN = 'unknown' - - -# dict mapping error name to (, , ) -errors = {} -errors[UNKNOWN] = ( - 1000, "Unknown error", httplib.BAD_REQUEST) -errors['add_item_id'] = ( - 1001, "Add item does not support user-chosen ID", httplib.BAD_REQUEST) -errors['rule_syntax'] = ( - 1002, "Syntax error for rule", httplib.BAD_REQUEST) -errors['multiple_rules'] = ( - 1003, "Received string representing more than 1 rule", httplib.BAD_REQUEST) -errors['incomplete_simulate_args'] = ( - 1004, "Simulate requires parameters: query, sequence, action_policy", - httplib.BAD_REQUEST) -errors['simulate_without_policy'] = ( - 1005, "Simulate must be told which policy evaluate the query on", - httplib.BAD_REQUEST) -errors['sequence_syntax'] = ( - 1006, "Syntax error in sequence", httplib.BAD_REQUEST) -errors['simulate_error'] = ( - 1007, "Error in simulate procedure", httplib.INTERNAL_SERVER_ERROR) -errors['rule_already_exists'] = ( - 1008, "Rule already exists", httplib.CONFLICT) -errors['schema_get_item_id'] = ( - 1009, "Get item for schema does not support user-chosen ID", - httplib.BAD_REQUEST) -errors['policy_name_must_be_provided'] = ( - 1010, "A name must be provided when creating a policy", - httplib.BAD_REQUEST) -errors['no_policy_update_owner'] = ( - 1012, "The policy owner_id cannot be updated", - httplib.BAD_REQUEST) -errors['no_policy_update_kind'] = ( - 1013, "The policy kind cannot be updated", - httplib.BAD_REQUEST) -errors['failed_to_create_policy'] = ( - 1014, "A new policy could not be created", - httplib.INTERNAL_SERVER_ERROR) -errors['policy_id_must_not_be_provided'] = ( - 1015, "An ID may not be provided when creating a policy", - httplib.BAD_REQUEST) -errors['execute_error'] = ( - 1016, "Error in execution procedure", httplib.INTERNAL_SERVER_ERROR) -errors['service_action_syntax'] = ( - 1017, "Incorrect action syntax. Requires: :", - httplib.BAD_REQUEST) -errors['execute_action_args_syntax'] = ( - 1018, "Incorrect argument syntax. " - "Requires: {'positional': [], 'named': {:,}}", - httplib.BAD_REQUEST) -errors['rule_not_permitted'] = ( - 1019, "Rules not permitted on non persisted policies.", - httplib.BAD_REQUEST) -errors['policy_not_exist'] = ( - 1020, "The specified policy does not exist.", httplib.NOT_FOUND) -errors['policy_rule_insertion_failure'] = ( - 1021, "The policy rule could not be inserted.", httplib.BAD_REQUEST) -errors['policy_abbreviation_error'] = ( - 1022, "The policy abbreviation must be a string and the length of the " - "string must be equal to or less than 5 characters.", - httplib.BAD_REQUEST) - - -def get(name): - if name not in errors: - name = UNKNOWN - return errors[name][:2] - - -def get_num(name): - if name not in errors: - name = UNKNOWN - return errors[name][0] - - -def get_desc(name): - if name not in errors: - name = UNKNOWN - return errors[name][1] - - -def get_http(name): - if name not in errors: - name = UNKNOWN - return errors[name][2] diff --git a/congress/api/library_policy_model.py b/congress/api/library_policy_model.py deleted file mode 100644 index 73d819ee..00000000 --- a/congress/api/library_policy_model.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2017 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.api import base -from congress.api import error_codes -from congress.api import webservice -from congress import exception - -LOG = logging.getLogger(__name__) - - -class LibraryPolicyModel(base.APIModel): - """Model for handling API requests about Library Policies.""" - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - try: - # Note(thread-safety): blocking call - return {"results": self.invoke_rpc(base.LIBRARY_SERVICE_ID, - 'get_policies', - {})} - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def get_item(self, id_, params, context=None): - """Retrieve item with name name from model. - - Args: - name: The unique name of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if no item named name exists. - """ - try: - # Note(thread-safety): blocking call - return self.invoke_rpc(base.LIBRARY_SERVICE_ID, - 'get_policy', - {'id_': id_, 'include_rules': True}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def add_item(self, item, params, id_=None, context=None): - """Add item to model. - - Args: - item: The item to add to the model - params: A dict-like object containing parameters - from the request query string and body. - id_: The unique name of the item - context: Key-values providing frame of reference of request - - Returns: - Tuple of (ID, newly_created_item) - - Raises: - KeyError: ID already exists. - DataModelException: Addition cannot be performed. - """ - if id_ is not None: - (num, desc) = error_codes.get('policy_id_must_not_be_provided') - raise webservice.DataModelException(num, desc) - - try: - # Note(thread-safety): blocking call - policy_metadata = self.invoke_rpc( - base.LIBRARY_SERVICE_ID, 'create_policy', - {'policy_dict': item}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - return (policy_metadata['id'], policy_metadata) - - # Note(thread-safety): blocking function - def delete_item(self, id_, params, context=None): - """Remove item from model. - - Args: - id_: The unique name of the item to be removed - params: - context: Key-values providing frame of reference of request - - Returns: - The removed item. - - Raises: - KeyError: Item with specified id_ not present. - """ - # Note(thread-safety): blocking call - return self.invoke_rpc(base.LIBRARY_SERVICE_ID, - 'delete_policy', - {'id_': id_}) - - def update_item(self, id_, item, params, context=None): - """Update item with id_ with new data. - - Args: - id_: The ID of the item to be updated - item: The new item - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The updated item. - - Raises: - KeyError: Item with specified id_ not present. - """ - # Note(thread-safety): blocking call - try: - return self.invoke_rpc(base.LIBRARY_SERVICE_ID, - 'replace_policy', - {'id_': id_, - 'policy_dict': item}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) diff --git a/congress/api/policy_model.py b/congress/api/policy_model.py deleted file mode 100644 index f1da870b..00000000 --- a/congress/api/policy_model.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import re - -import six - -from congress.api import base -from congress.api import error_codes -from congress.api import webservice -from congress import exception -from congress.library_service import library_service - - -class PolicyModel(base.APIModel): - """Model for handling API requests about Policies.""" - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - try: - # Note(thread-safety): blocking call - return {"results": self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_get_policies', - {})} - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if id_ does not exist. - """ - try: - # Note(thread-safety): blocking call - return self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_get_policy', - {'id_': id_}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def add_item(self, item, params, id_=None, context=None): - """Add item to model. - - Args: - item: The item to add to the model - params: A dict-like object containing parameters - from the request query string and body. - id_: The ID of the item, or None if an ID should be generated - context: Key-values providing frame of reference of request - - Returns: - Tuple of (ID, newly_created_item) - - Raises: - KeyError: ID already exists. - DataModelException: Addition cannot be performed. - BadRequest: library_policy parameter and request body both present - """ - # case 1: parameter gives library policy UUID - if 'library_policy' in params: - if item is not None: - raise exception.BadRequest( - 'Policy creation reqest with `library_policy` parameter ' - 'must not have body.') - try: - # Note(thread-safety): blocking call - library_policy_object = self.invoke_rpc( - base.LIBRARY_SERVICE_ID, - 'get_policy', {'id_': params['library_policy']}) - - policy_metadata = self.invoke_rpc( - base.ENGINE_SERVICE_ID, - 'persistent_create_policy_with_rules', - {'policy_rules_obj': library_policy_object}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - return (policy_metadata['id'], policy_metadata) - - # case 2: item contains rules - if 'rules' in item: - try: - library_service.validate_policy_item(item) - # Note(thread-safety): blocking call - policy_metadata = self.invoke_rpc( - base.ENGINE_SERVICE_ID, - 'persistent_create_policy_with_rules', - {'policy_rules_obj': item}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - return (policy_metadata['id'], policy_metadata) - - # case 3: item does not contain rules - self._check_create_policy(id_, item) - name = item['name'] - try: - # Note(thread-safety): blocking call - policy_metadata = self.invoke_rpc( - base.ENGINE_SERVICE_ID, 'persistent_create_policy', - {'name': name, - 'abbr': item.get('abbreviation'), - 'kind': item.get('kind'), - 'desc': item.get('description')}) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - return (policy_metadata['id'], policy_metadata) - - def _check_create_policy(self, id_, item): - if id_ is not None: - (num, desc) = error_codes.get('policy_id_must_not_be_provided') - raise webservice.DataModelException(num, desc) - if 'name' not in item: - (num, desc) = error_codes.get('policy_name_must_be_provided') - raise webservice.DataModelException(num, desc) - abbr = item.get('abbreviation') - if abbr: - # the length of abbreviation column is 5 chars in policy DB table, - # check it in API layer and raise exception if it's too long. - if not isinstance(abbr, six.string_types) or len(abbr) > 5: - (num, desc) = error_codes.get('policy_abbreviation_error') - raise webservice.DataModelException(num, desc) - - # Note(thread-safety): blocking function - def delete_item(self, id_, params, context=None): - """Remove item from model. - - Args: - id_: The ID or name of the item to be removed - params: - context: Key-values providing frame of reference of request - - Returns: - The removed item. - - Raises: - KeyError: Item with specified id_ not present. - """ - # Note(thread-safety): blocking call - return self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_delete_policy', - {'name_or_id': id_}) - - def _get_boolean_param(self, key, params): - if key not in params: - return False - value = params[key] - return value.lower() == "true" or value == "1" - - # Note(thread-safety): blocking function - def simulate_action(self, params, context=None, request=None): - """Simulate the effects of executing a sequence of updates. - - :returns: the result of a query. - """ - # grab string arguments - theory = context.get('policy_id') or params.get('policy') - if theory is None: - (num, desc) = error_codes.get('simulate_without_policy') - raise webservice.DataModelException(num, desc) - - body = json.loads(request.body) - query = body.get('query') - sequence = body.get('sequence') - actions = body.get('action_policy') - delta = self._get_boolean_param('delta', params) - trace = self._get_boolean_param('trace', params) - if query is None or sequence is None or actions is None: - (num, desc) = error_codes.get('incomplete_simulate_args') - raise webservice.DataModelException(num, desc) - - try: - args = {'query': query, 'theory': theory, 'sequence': sequence, - 'action_theory': actions, 'delta': delta, - 'trace': trace, 'as_list': True} - # Note(thread-safety): blocking call - result = self.invoke_rpc(base.ENGINE_SERVICE_ID, 'simulate', - args, timeout=self.dse_long_timeout) - except exception.PolicyException as e: - (num, desc) = error_codes.get('simulate_error') - raise webservice.DataModelException(num, desc + "::" + str(e)) - - # always return dict - if trace: - return {'result': result[0], - 'trace': result[1]} - return {'result': result} - - # Note(thread-safety): blocking function - def execute_action(self, params, context=None, request=None): - """Execute the action.""" - body = json.loads(request.body) - # e.g. name = 'nova:disconnectNetwork' - items = re.split(':', body.get('name')) - if len(items) != 2: - (num, desc) = error_codes.get('service_action_syntax') - raise webservice.DataModelException(num, desc) - service = items[0].strip() - action = items[1].strip() - action_args = body.get('args', {}) - if (not isinstance(action_args, dict)): - (num, desc) = error_codes.get('execute_action_args_syntax') - raise webservice.DataModelException(num, desc) - - try: - args = {'service_name': service, - 'action': action, - 'action_args': action_args} - # Note(thread-safety): blocking call - self.invoke_rpc(base.ENGINE_SERVICE_ID, 'execute_action', args) - except exception.PolicyException as e: - (num, desc) = error_codes.get('execute_error') - raise webservice.DataModelException(num, desc + "::" + str(e)) - - return {} diff --git a/congress/api/router.py b/congress/api/router.py deleted file mode 100644 index c23fff65..00000000 --- a/congress/api/router.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import versions -from congress.api import webservice - - -class APIRouterV1(object): - - def __init__(self, resource_mgr, process_dict): - """Bootstrap data models and handlers for the API definition.""" - - # Setup /v1/ - version_v1_handler = versions.VersionV1Handler(r'/v1[/]?') - resource_mgr.register_handler(version_v1_handler) - - policies = process_dict['api-policy'] - - policy_collection_handler = webservice.CollectionHandler( - r'/v1/policies', - policies) - resource_mgr.register_handler(policy_collection_handler) - policy_path = r'/v1/policies/(?P[^/]+)' - policy_element_handler = webservice.ElementHandler( - policy_path, - policies, - policy_collection_handler, - allow_update=False, - allow_replace=False) - resource_mgr.register_handler(policy_element_handler) - - library_policies = process_dict['api-library-policy'] - - library_policy_collection_handler = webservice.CollectionHandler( - r'/v1/librarypolicies', - library_policies) - resource_mgr.register_handler(library_policy_collection_handler) - library_policy_path = r'/v1/librarypolicies/(?P[^/]+)' - library_policy_element_handler = webservice.ElementHandler( - library_policy_path, - library_policies, - library_policy_collection_handler, - allow_update=False, - allow_replace=True) - resource_mgr.register_handler(library_policy_element_handler) - - policy_rules = process_dict['api-rule'] - rule_collection_handler = webservice.CollectionHandler( - r'/v1/policies/(?P[^/]+)/rules', - policy_rules, - "{policy_id}") - resource_mgr.register_handler(rule_collection_handler) - rule_path = (r'/v1/policies/(?P[^/]+)' + - r'/rules/(?P[^/]+)') - rule_element_handler = webservice.ElementHandler( - rule_path, - policy_rules, - "{policy_id}") - resource_mgr.register_handler(rule_element_handler) - - # Setup /v1/data-sources - data_sources = process_dict['api-datasource'] - ds_collection_handler = webservice.CollectionHandler( - r'/v1/data-sources', - data_sources) - resource_mgr.register_handler(ds_collection_handler) - - # Setup /v1/data-sources/ - ds_path = r'/v1/data-sources/(?P[^/]+)' - ds_element_handler = webservice.ElementHandler(ds_path, data_sources) - resource_mgr.register_handler(ds_element_handler) - - # Setup /v1/data-sources//schema - schema = process_dict['api-schema'] - schema_path = "%s/schema" % ds_path - schema_element_handler = webservice.ElementHandler(schema_path, schema) - resource_mgr.register_handler(schema_element_handler) - - # Setup /v1/data-sources//tables//spec - table_schema_path = "%s/tables/(?P[^/]+)/spec" % ds_path - table_schema_element_handler = webservice.ElementHandler( - table_schema_path, - schema) - resource_mgr.register_handler(table_schema_element_handler) - - # Setup action handlers - actions = process_dict['api-action'] - ds_actions_path = "%s/actions" % ds_path - ds_actions_collection_handler = webservice.CollectionHandler( - ds_actions_path, actions) - resource_mgr.register_handler(ds_actions_collection_handler) - - # Setup status handlers - statuses = process_dict['api-status'] - ds_status_path = "%s/status" % ds_path - ds_status_element_handler = webservice.ElementHandler(ds_status_path, - statuses) - resource_mgr.register_handler(ds_status_element_handler) - policy_status_path = "%s/status" % policy_path - policy_status_element_handler = webservice.ElementHandler( - policy_status_path, - statuses) - resource_mgr.register_handler(policy_status_element_handler) - rule_status_path = "%s/status" % rule_path - rule_status_element_handler = webservice.ElementHandler( - rule_status_path, - statuses) - resource_mgr.register_handler(rule_status_element_handler) - - tables = process_dict['api-table'] - tables_path = "(%s|%s)/tables" % (ds_path, policy_path) - table_collection_handler = webservice.CollectionHandler( - tables_path, - tables) - resource_mgr.register_handler(table_collection_handler) - table_path = "%s/(?P[^/]+)" % tables_path - table_element_handler = webservice.ElementHandler(table_path, tables) - resource_mgr.register_handler(table_element_handler) - - table_rows = process_dict['api-row'] - rows_path = "%s/rows" % table_path - row_collection_handler = webservice.CollectionHandler( - rows_path, - table_rows, allow_update=True) - resource_mgr.register_handler(row_collection_handler) - row_path = "%s/(?P[^/]+)" % rows_path - row_element_handler = webservice.ElementHandler(row_path, table_rows) - resource_mgr.register_handler(row_element_handler) - - # Setup /v1/system/datasource-drivers - system = process_dict['api-system'] - # NOTE(arosen): start url out with datasource-drivers since we don't - # yet implement /v1/system/ yet. - system_collection_handler = webservice.CollectionHandler( - r'/v1/system/drivers', - system) - resource_mgr.register_handler(system_collection_handler) - - # Setup /v1/system/datasource-drivers/ - driver_path = r'/v1/system/drivers/(?P[^/]+)' - driver_element_handler = webservice.ElementHandler(driver_path, system) - resource_mgr.register_handler(driver_element_handler) diff --git a/congress/api/row_model.py b/congress/api/row_model.py deleted file mode 100644 index f77f7247..00000000 --- a/congress/api/row_model.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - - -LOG = logging.getLogger(__name__) - - -class RowModel(base.APIModel): - """Model for handling API requests about Rows.""" - - # TODO(thinrichs): No rows have IDs right now. Maybe eventually - # could make ID the hash of the row, but then might as well - # just make the ID a string repr of the row. No use case - # for it as of now since all rows are read-only. - # def get_item(self, id_, context=None): - # """Retrieve item with id id_ from model. - - # Args: - # id_: The ID of the item to retrieve - # context: Key-values providing frame of reference of request - - # Returns: - # The matching item or None if item with id_ does not exist. - # """ - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - LOG.info("get_items(context=%s)", context) - gen_trace = False - if 'trace' in params and params['trace'].lower() == 'true': - gen_trace = True - - # Get the caller, it should be either policy or datasource - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # It would have saved us if table_id was an UUID rather than a name, - # but it appears that table_id is just another word for tablename. - # Fix: check UUID of datasource before operating. Abort if mismatch - table_id = context['table_id'] - try: - args = {'table_id': table_id, 'source_id': source_id, - 'trace': gen_trace} - if caller is base.ENGINE_SERVICE_ID: - # allow extra time for row policy engine query - # Note(thread-safety): blocking call - result = self.invoke_rpc( - caller, 'get_row_data', args, - timeout=self.dse_long_timeout) - else: - # Note(thread-safety): blocking call - result = self.invoke_rpc(caller, 'get_row_data', args) - except exception.CongressException as e: - m = ("Error occurred while processing source_id '%s' for row " - "data of the table '%s'" % (source_id, table_id)) - LOG.exception(m) - raise webservice.DataModelException.create(e) - - if gen_trace and caller is base.ENGINE_SERVICE_ID: - # DSE2 returns lists instead of tuples, so correct that. - results = [{'data': tuple(x['data'])} for x in result[0]] - return {'results': results, - 'trace': result[1] or "Not available"} - else: - result = [{'data': tuple(x['data'])} for x in result] - return {'results': result} - - # Note(thread-safety): blocking function - def update_items(self, items, params, context=None): - """Updates all data in a table. - - Args: - id_: A table id for updating all row - items: A data for new rows - params: A dict-like object containing parameters from - request query - context: Key-values providing frame of reference of request - Returns: None - Raises: - KeyError: table id doesn't exist - DataModelException: any error occurs during replacing rows. - """ - LOG.info("update_items(context=%s)", context) - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # It would have saved us if table_id was an UUID rather than a name, - # but it appears that table_id is just another word for tablename. - # Fix: check UUID of datasource before operating. Abort if mismatch - table_id = context['table_id'] - try: - args = {'table_id': table_id, 'source_id': source_id, - 'objs': items} - # Note(thread-safety): blocking call - self.invoke_rpc(caller, 'update_entire_data', args) - except exception.CongressException as e: - LOG.exception("Error occurred while processing updating rows " - "for source_id '%s' and table_id '%s'", - source_id, table_id) - raise webservice.DataModelException.create(e) - LOG.info("finish update_items(context=%s)", context) - LOG.debug("updated table %s with row items: %s", - table_id, str(items)) - - # TODO(thinrichs): It makes sense to sometimes allow users to create - # a new row for internal data sources. But since we don't have - # those yet all tuples are read-only from the API. - - # def add_item(self, item, id_=None, context=None): - # """Add item to model. - - # Args: - # item: The item to add to the model - # id_: The ID of the item, or None if an ID should be generated - # context: Key-values providing frame of reference of request - - # Returns: - # Tuple of (ID, newly_created_item) - - # Raises: - # KeyError: ID already exists. - # """ - - # TODO(thinrichs): once we have internal data sources, - # add the ability to update a row. (Or maybe not and implement - # via add+delete.) - # def update_item(self, id_, item, context=None): - # """Update item with id_ with new data. - - # Args: - # id_: The ID of the item to be updated - # item: The new item - # context: Key-values providing frame of reference of request - - # Returns: - # The updated item. - - # Raises: - # KeyError: Item with specified id_ not present. - # """ - # # currently a noop since the owner_id cannot be changed - # if id_ not in self.items: - # raise KeyError("Cannot update item with ID '%s': " - # "ID does not exist") - # return item - - # TODO(thinrichs): once we can create, we should be able to delete - # def delete_item(self, id_, context=None): - # """Remove item from model. - - # Args: - # id_: The ID of the item to be removed - # context: Key-values providing frame of reference of request - - # Returns: - # The removed item. - - # Raises: - # KeyError: Item with specified id_ not present. - # """ diff --git a/congress/api/rule_model.py b/congress/api/rule_model.py deleted file mode 100644 index 1de782f1..00000000 --- a/congress/api/rule_model.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import base -from congress.api import error_codes -from congress.api import webservice -from congress import exception - - -class RuleModel(base.APIModel): - """Model for handling API requests about policy Rules.""" - - def policy_name(self, context): - if 'ds_id' in context: - return context['ds_id'] - elif 'policy_id' in context: - # Note: policy_id is actually policy name - return context['policy_id'] - - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - try: - args = {'id_': id_, 'policy_name': self.policy_name(context)} - # Note(thread-safety): blocking call - return self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_get_rule', args) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - try: - args = {'policy_name': self.policy_name(context)} - # Note(thread-safety): blocking call - rules = self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_get_rules', args) - return {'results': rules} - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def add_item(self, item, params, id_=None, context=None): - """Add item to model. - - Args: - item: The item to add to the model - params: A dict-like object containing parameters - from the request query string and body. - id_: The ID of the item, or None if an ID should be generated - context: Key-values providing frame of reference of request - - Returns: - Tuple of (ID, newly_created_item) - - Raises: - KeyError: ID already exists. - """ - if id_ is not None: - raise webservice.DataModelException( - *error_codes.get('add_item_id')) - try: - args = {'policy_name': self.policy_name(context), - 'str_rule': item.get('rule'), - 'rule_name': item.get('name'), - 'comment': item.get('comment')} - # Note(thread-safety): blocking call - return self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_insert_rule', args, - timeout=self.dse_long_timeout) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) - - # Note(thread-safety): blocking function - def delete_item(self, id_, params, context=None): - """Remove item from model. - - Args: - id_: The ID of the item to be removed - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The removed item. - - Raises: - KeyError: Item with specified id_ not present. - """ - try: - args = {'id_': id_, 'policy_name_or_id': self.policy_name(context)} - # Note(thread-safety): blocking call - return self.invoke_rpc(base.ENGINE_SERVICE_ID, - 'persistent_delete_rule', args, - timeout=self.dse_long_timeout) - except exception.CongressException as e: - raise webservice.DataModelException.create(e) diff --git a/congress/api/schema_model.py b/congress/api/schema_model.py deleted file mode 100644 index 3b53587b..00000000 --- a/congress/api/schema_model.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - - -class SchemaModel(base.APIModel): - """Model for handling API requests about Schemas.""" - - # Note(thread-safety): blocking function - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # Fix: check UUID of datasource before operating. Abort if mismatch - table = context.get('table_id') - args = {'source_id': source_id} - try: - # Note(thread-safety): blocking call - schema = self.invoke_rpc(caller, 'get_datasource_schema', args) - except exception.CongressException as e: - raise webservice.DataModelException(e.code, str(e), - http_status_code=e.code) - - # request to see the schema for one table - if table: - if table not in schema: - raise webservice.DataModelException( - 404, ("Table '{}' for datasource '{}' has no " - "schema ".format(id_, source_id)), - http_status_code=404) - return api_utils.create_table_dict(table, schema) - - tables = [api_utils.create_table_dict(table_, schema) - for table_ in schema] - return {'tables': tables} diff --git a/congress/api/status_model.py b/congress/api/status_model.py deleted file mode 100644 index de9c68aa..00000000 --- a/congress/api/status_model.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - - -class StatusModel(base.APIModel): - """Model for handling API requests about Statuses.""" - - # Note(thread-safety): blocking function - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # Fix: check UUID of datasource before operating. Abort if mismatch - - try: - rpc_args = {'params': context, 'source_id': source_id} - # Note(thread-safety): blocking call - status = self.invoke_rpc(caller, 'get_status', rpc_args) - except exception.CongressException as e: - raise webservice.DataModelException( - exception.NotFound.code, str(e), - http_status_code=exception.NotFound.code) - - return status diff --git a/congress/api/system/__init__.py b/congress/api/system/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/api/system/driver_model.py b/congress/api/system/driver_model.py deleted file mode 100644 index 1fc4827b..00000000 --- a/congress/api/system/driver_model.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - - -class DatasourceDriverModel(base.APIModel): - """Model for handling API requests about DatasourceDriver.""" - - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - drivers = self.bus.get_drivers_info() - fields = ['id', 'description'] - results = [self.bus.make_datasource_dict( - drivers[driver], fields=fields) - for driver in drivers] - return {"results": results} - - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - datasource = context.get('driver_id') - try: - driver = self.bus.get_driver_info(datasource) - schema = self.bus.get_driver_schema(datasource) - except exception.DriverNotFound as e: - raise webservice.DataModelException(e.code, str(e), - http_status_code=e.code) - - tables = [api_utils.create_table_dict(table_, schema) - for table_ in schema] - driver['tables'] = tables - return driver diff --git a/congress/api/table_model.py b/congress/api/table_model.py deleted file mode 100644 index 68d82143..00000000 --- a/congress/api/table_model.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.api import api_utils -from congress.api import base -from congress.api import webservice -from congress import exception - -LOG = logging.getLogger(__name__) - - -class TableModel(base.APIModel): - """Model for handling API requests about Tables.""" - - # Note(thread-safety): blocking function - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # Fix: check UUID of datasource before operating. Abort if mismatch - - args = {'source_id': source_id, 'table_id': id_} - try: - # Note(thread-safety): blocking call - tablename = self.invoke_rpc(caller, 'get_tablename', args) - except exception.CongressException as e: - LOG.exception("Exception occurred while retrieving table %s" - "from datasource %s", id_, source_id) - raise webservice.DataModelException.create(e) - - if tablename: - return {'id': tablename} - - LOG.info('table id %s is not found in datasource %s', id_, source_id) - - # Note(thread-safety): blocking function - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - LOG.info('get_items has context %s', context) - - # Note(thread-safety): blocking call - caller, source_id = api_utils.get_id_from_context(context) - # FIXME(threod-safety): in DSE2, the returned caller can be a - # datasource name. But the datasource name may now refer to a new, - # unrelated datasource. Causing the rest of this code to operate on - # an unintended datasource. - # Fix: check UUID of datasource before operating. Abort if mismatch - - try: - # Note(thread-safety): blocking call - tablenames = self.invoke_rpc(caller, 'get_tablenames', - {'source_id': source_id}) - except exception.CongressException as e: - LOG.exception("Exception occurred while retrieving tables" - "from datasource %s", source_id) - raise webservice.DataModelException.create(e) - # when the source_id doesn't have any table, 'tablenames' is set([]) - if isinstance(tablenames, set) or isinstance(tablenames, list): - return {'results': [{'id': x} for x in tablenames]} - - # Tables can only be created/updated/deleted by writing policy - # or by adding new data sources. Once we have internal data sources - # we need to implement all of these. - - # def add_item(self, item, id_=None, context=None): - # """Add item to model. - - # Args: - # item: The item to add to the model - # id_: The ID of the item, or None if an ID should be generated - # context: Key-values providing frame of reference of request - - # Returns: - # Tuple of (ID, newly_created_item) - - # Raises: - # KeyError: ID already exists. - # """ - - # def update_item(self, id_, item, context=None): - # """Update item with id_ with new data. - - # Args: - # id_: The ID of the item to be updated - # item: The new item - # context: Key-values providing frame of reference of request - - # Returns: - # The updated item. - - # Raises: - # KeyError: Item with specified id_ not present. - # """ - # # currently a noop since the owner_id cannot be changed - # if id_ not in self.items: - # raise KeyError("Cannot update item with ID '%s': " - # "ID does not exist") - # return item - - # def delete_item(self, id_, context=None): - # """Remove item from model. - - # Args: - # id_: The ID of the item to be removed - # context: Key-values providing frame of reference of request - - # Returns: - # The removed item. - - # Raises: - # KeyError: Item with specified id_ not present. - # """ diff --git a/congress/api/versions.py b/congress/api/versions.py deleted file mode 100644 index c3e741b3..00000000 --- a/congress/api/versions.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2015 Huawei. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import json -import os - -from six.moves import http_client -import webob -import webob.dec - -from congress.api import webservice - - -VERSIONS = { - "v1": { - "id": "v1", - "status": "CURRENT", - "updated": "2013-08-12T17:42:13Z", - "links": [ - { - "rel": "describedby", - "type": "text/html", - "href": "http://congress.readthedocs.org/", - }, - ], - }, -} - - -def _get_view_builder(request): - base_url = request.application_url - return ViewBuilder(base_url) - - -class ViewBuilder(object): - - def __init__(self, base_url): - """:param base_url: url of the root wsgi application.""" - self.base_url = base_url - - def build_choices(self, versions, request): - version_objs = [] - for version in sorted(versions.keys()): - version = versions[version] - version_objs.append({ - "id": version['id'], - "status": version['status'], - "updated": version['updated'], - "links": self._build_links(version, request.path), - }) - - return dict(choices=version_objs) - - def build_versions(self, versions): - version_objs = [] - for version in sorted(versions.keys()): - version = versions[version] - version_objs.append({ - "id": version['id'], - "status": version['status'], - "updated": version['updated'], - "links": self._build_links(version), - }) - - return dict(versions=version_objs) - - def build_version(self, version): - reval = copy.deepcopy(version) - reval['links'].insert(0, { - "rel": "self", - "href": self.base_url.rstrip('/') + '/', - }) - return dict(version=reval) - - def _build_links(self, version_data, path=None): - """Generate a container of links that refer to the provided version.""" - href = self._generate_href(version_data['id'], path) - - links = [ - { - "rel": "self", - "href": href, - }, - ] - - return links - - def _generate_href(self, version, path=None): - """Create an url that refers to a specific version.""" - if path: - path = path.strip('/') - return os.path.join(self.base_url, version, path) - else: - return os.path.join(self.base_url, version) + '/' - - -class Versions(object): - - @classmethod - def factory(cls, global_config, **local_config): - return cls() - - @webob.dec.wsgify(RequestClass=webob.Request) - def __call__(self, request): - """Respond to a request for all Congress API versions.""" - - builder = _get_view_builder(request) - if request.path == '/': - body = builder.build_versions(VERSIONS) - status = http_client.OK - else: - body = builder.build_choices(VERSIONS, request) - status = http_client.MULTIPLE_CHOICES - - return webob.Response(body="%s\n" % json.dumps(body), - status=status, - content_type='application/json', - charset='UTF-8') - - -class VersionV1Handler(webservice.AbstractApiHandler): - - def handle_request(self, request): - builder = _get_view_builder(request) - body = builder.build_version(VERSIONS['v1']) - return webob.Response(body="%s\n" % json.dumps(body), - status=http_client.OK, - content_type='application/json', - charset='UTF-8') diff --git a/congress/api/webservice.py b/congress/api/webservice.py deleted file mode 100644 index c2af6c9d..00000000 --- a/congress/api/webservice.py +++ /dev/null @@ -1,629 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -try: - # For Python 3 - import http.client as httplib -except ImportError: - import httplib -import json -import re - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -from oslo_utils import uuidutils -import six -import webob -import webob.dec - -from congress.api import error_codes -from congress.common import policy -from congress import exception - - -LOG = logging.getLogger(__name__) - - -def error_response(status, error_code, description, data=None): - """Construct and return an error response. - - Args: - status: The HTTP status code of the response. - error_code: The application-specific error code. - description: Friendly G11N-enabled string corresponding to error_code. - data: Additional data (not G11N-enabled) for the API consumer. - """ - raw_body = {'error': { - 'message': description, - 'error_code': error_code, - 'error_data': data - } - } - body = '%s\n' % json.dumps(raw_body) - return webob.Response(body=body, status=status, - content_type='application/json', - charset='UTF-8') - - -NOT_FOUND_RESPONSE = error_response(httplib.NOT_FOUND, - httplib.NOT_FOUND, - "The resouce could not be found.") -NOT_SUPPORTED_RESPONSE = error_response(httplib.NOT_IMPLEMENTED, - httplib.NOT_IMPLEMENTED, - "Method not supported") -INTERNAL_ERROR_RESPONSE = error_response(httplib.INTERNAL_SERVER_ERROR, - httplib.INTERNAL_SERVER_ERROR, - "Internal server error") - - -def original_msg(e): - '''Undo oslo-messaging added traceback to return original exception msg''' - msg = e.args[0].split('\nTraceback (most recent call last):')[0] - if len(msg) != len(e.args[0]): - if len(msg) > 0 and msg[-1] in ("'", '"'): - msg = msg[:-1] - if len(msg) > 1 and msg[0:2] in ('u"', "u'"): - msg = msg[2:] - elif len(msg) > 0 and msg[0] in ("'", '"'): - msg = msg[1:] - return msg - else: # return untouched message is format not as expected - return e.args[0] - - -class DataModelException(Exception): - """Congress API Data Model Exception - - Custom exception raised by API Data Model methods to communicate errors to - the API framework. - """ - - def __init__(self, error_code, description, data=None, - http_status_code=httplib.BAD_REQUEST): - super(DataModelException, self).__init__(description) - self.error_code = error_code - self.description = description - self.data = data - self.http_status_code = http_status_code - - @classmethod - def create(cls, error): - """Generate a DataModelException from an existing CongressException. - - :param error has a 'name' field corresponding to an error_codes - error-name. It may also have a 'data' field. - Returns a DataModelException properly populated. - """ - name = getattr(error, "name", None) - if name: - error_code = error_codes.get_num(name) - description = error_codes.get_desc(name) - http_status_code = error_codes.get_http(name) - else: - # Check if it's default http error or else return 'Unknown error' - error_code = error.code or httplib.BAD_REQUEST - if error_code not in httplib.responses: - error_code = httplib.BAD_REQUEST - description = httplib.responses.get(error_code, "Unknown error") - http_status_code = error_code - - if str(error): - description += "::" + original_msg(error) - return cls(error_code=error_code, - description=description, - data=getattr(error, 'data', None), - http_status_code=http_status_code) - - def rest_response(self): - return error_response(self.http_status_code, self.error_code, - self.description, self.data) - - -class AbstractApiHandler(object): - """Abstract handler for API requests. - - Attributes: - path_regex: The regular expression matching paths supported by this - handler. - """ - - def __init__(self, path_regex): - if path_regex[-1] != '$': - path_regex += "$" - # we only use 'match' so no need to mark the beginning of string - self.path_regex = path_regex - self.path_re = re.compile(path_regex) - - def __str__(self): - return "%s(%s)" % (self.__class__.__name__, self.path_re.pattern) - - def _get_context(self, request): - """Return dict of variables in request path.""" - m = self.path_re.match(request.path) - # remove all the None values before returning - return dict([(k, v) for k, v in m.groupdict().items() - if v is not None]) - - def _parse_json_body(self, request): - content_type = (request.content_type or "application/json").lower() - if content_type != 'application/json': - raise DataModelException( - 400, "Unsupported Content-Type; must be 'application/json'") - if request.charset != 'UTF-8': - raise DataModelException( - 400, "Unsupported charset: must be 'UTF-8'") - try: - request.parsed_body = json.loads(request.body.decode('utf-8')) - except ValueError as e: - msg = "Failed to parse body as %s: %s" % (content_type, e) - raise DataModelException(400, msg) - return request.parsed_body - - def handles_request(self, request): - """Return true iff handler supports the request.""" - m = self.path_re.match(request.path) - return m is not None - - def handle_request(self, request): - """Handle a REST request. - - Args: - request: A webob request object. - - Returns: - A webob response object. - """ - return NOT_SUPPORTED_RESPONSE - - -class ElementHandler(AbstractApiHandler): - """API handler for REST element resources. - - REST elements represent individual entities in the data model, and often - support the following operations: - - Read a representation of the element - - Update (replace) the entire element with a new version - - Update (patch) parts of the element with new values - - Delete the element - - Elements may also exhibit 'controller' semantics for RPC-style method - invocation, however this is not currently supported. - """ - - def __init__(self, path_regex, model, - collection_handler=None, allow_read=True, allow_actions=True, - allow_replace=True, allow_update=True, allow_delete=True): - """Initialize an element handler. - - Args: - path_regex: A regular expression that matches the full path - to the element. If multiple handlers match a request path, - the handler with the highest registration search_index wins. - model: A resource data model instance - collection_handler: The collection handler this element - is a member of or None if the element is not a member of a - collection. (Used for named creation of elements) - allow_read: True if element supports read - allow_replace: True if element supports replace - allow_update: True if element supports update - allow_delete: True if element supports delete - - """ - super(ElementHandler, self).__init__(path_regex) - self.model = model - self.collection_handler = collection_handler - self.allow_read = allow_read - self.allow_actions = allow_actions - self.allow_replace = allow_replace - self.allow_update = allow_update - self.allow_delete = allow_delete - - def _get_element_id(self, request): - m = self.path_re.match(request.path) - if m.groups(): - return m.groups()[-1] # TODO(pballand): make robust - return None - - def handle_request(self, request): - """Handle a REST request. - - Args: - request: A webob request object. - - Returns: - A webob response object. - """ - try: - if request.method == 'GET' and self.allow_read: - return self.read(request) - elif request.method == 'POST' and self.allow_actions: - return self.action(request) - elif request.method == 'PUT' and self.allow_replace: - return self.replace(request) - elif request.method == 'PATCH' and self.allow_update: - return self.update(request) - elif request.method == 'DELETE' and self.allow_delete: - return self.delete(request) - return NOT_SUPPORTED_RESPONSE - except db_exc.DBError: - LOG.exception('Database backend experienced an unknown error.') - raise exception.DatabaseError - - def read(self, request): - if not hasattr(self.model, 'get_item'): - return NOT_SUPPORTED_RESPONSE - - id_ = self._get_element_id(request) - item = self.model.get_item(id_, request.params, - context=self._get_context(request)) - if item is None: - return error_response(httplib.NOT_FOUND, 404, 'Not found') - return webob.Response(body="%s\n" % json.dumps(item), - status=httplib.OK, - content_type='application/json', - charset='UTF-8') - - def action(self, request): - # Non-CRUD operations must specify an 'action' parameter - action = request.params.getall('action') - if len(action) != 1: - if len(action) > 1: - errstr = "Action parameter may not be provided multiple times." - else: - errstr = "Missing required action parameter." - return error_response(httplib.BAD_REQUEST, 400, errstr) - model_method = "%s_action" % action[0].replace('-', '_') - f = getattr(self.model, model_method, None) - if f is None: - return NOT_SUPPORTED_RESPONSE - try: - response = f(request.params, context=self._get_context(request), - request=request) - if isinstance(response, webob.Response): - return response - return webob.Response(body="%s\n" % json.dumps(response), - status=httplib.OK, - content_type='application/json', - charset='UTF-8') - except TypeError: - LOG.exception("Error occurred") - return NOT_SUPPORTED_RESPONSE - - def replace(self, request): - if not hasattr(self.model, 'update_item'): - return NOT_SUPPORTED_RESPONSE - - id_ = self._get_element_id(request) - try: - item = self._parse_json_body(request) - self.model.update_item(id_, item, request.params, - context=self._get_context(request)) - except KeyError as e: - if (self.collection_handler and - getattr(self.collection_handler, 'allow_named_create', - False)): - return self.collection_handler.create_member(request, id_=id_) - return error_response(httplib.NOT_FOUND, 404, - original_msg(e) or 'Not found') - return webob.Response(body="%s\n" % json.dumps(item), - status=httplib.OK, - content_type='application/json', - charset='UTF-8') - - def update(self, request): - if not (hasattr(self.model, 'update_item') or - hasattr(self.model, 'get_item')): - return NOT_SUPPORTED_RESPONSE - - context = self._get_context(request) - id_ = self._get_element_id(request) - item = self.model.get_item(id_, request.params, context=context) - if item is None: - return error_response(httplib.NOT_FOUND, 404, 'Not found') - - updates = self._parse_json_body(request) - item.update(updates) - self.model.update_item(id_, item, request.params, context=context) - return webob.Response(body="%s\n" % json.dumps(item), - status=httplib.OK, - content_type='application/json', - charset='UTF-8') - - def delete(self, request): - if not hasattr(self.model, 'delete_item'): - return NOT_SUPPORTED_RESPONSE - - id_ = self._get_element_id(request) - try: - item = self.model.delete_item( - id_, request.params, context=self._get_context(request)) - return webob.Response(body="%s\n" % json.dumps(item), - status=httplib.OK, - content_type='application/json', - charset='UTF-8') - except KeyError as e: - LOG.exception("Error occurred") - return error_response(httplib.NOT_FOUND, 404, - original_msg(e) or 'Not found') - - -class CollectionHandler(AbstractApiHandler): - """API handler for REST collection resources. - - REST collections represent collections of entities in the data model, and - often support the following operations: - - List elements in the collection - - Create new element in the collection - - The following less-common collection operations are NOT SUPPORTED: - - Replace all elements in the collection - - Delete all elements in the collection - """ - - def __init__(self, path_regex, model, - allow_named_create=True, allow_list=True, allow_create=True, - allow_update=False): - """Initialize a collection handler. - - Args: - path_regex: A regular expression matching the collection base path. - model: A resource data model instance - allow_named_create: True if caller can specify ID of new items. - allow_list: True if collection supports listing elements. - allow_create: True if collection supports creating elements. - """ - super(CollectionHandler, self).__init__(path_regex) - self.model = model - self.allow_named_create = allow_named_create - self.allow_list = allow_list - self.allow_create = allow_create - self.allow_update = allow_update - - def handle_request(self, request): - """Handle a REST request. - - Args: - request: A webob request object. - - Returns: - A webob response object. - """ - # NOTE(arosen): only do policy.json if keystone is used for now. - if cfg.CONF.auth_strategy == "keystone": - context = request.environ['congress.context'] - target = { - 'project_id': context.project_id, - 'user_id': context.user_id - } - # NOTE(arosen): today congress only enforces API policy on which - # API calls we allow tenants to make with their given roles. - action_type = self._get_action_type(request.method) - # FIXME(arosen): There should be a cleaner way to do this. - model_name = self.path_regex.split('/')[1] - action = "%s_%s" % (action_type, model_name) - # TODO(arosen): we should handle serializing the - # response in one place - try: - policy.enforce(context, action, target) - except exception.PolicyNotAuthorized as e: - LOG.info(e) - return webob.Response(body=six.text_type(e), status=e.code, - content_type='application/json', - charset='UTF-8') - if request.method == 'GET' and self.allow_list: - return self.list_members(request) - elif request.method == 'POST' and self.allow_create: - return self.create_member(request) - elif request.method == 'PUT' and self.allow_update: - return self.update_members(request) - return NOT_SUPPORTED_RESPONSE - - def _get_action_type(self, method): - if method == 'GET': - return 'get' - elif method == 'POST': - return 'create' - elif method == 'DELETE': - return 'delete' - elif method == 'PUT' or method == 'PATCH': - return 'update' - else: - # should never get here but just in case ;) - # FIXME(arosen) raise NotImplemented instead and - # make sure we return that as an http code. - raise TypeError("Invalid HTTP Method") - - def list_members(self, request): - if not hasattr(self.model, 'get_items'): - return NOT_SUPPORTED_RESPONSE - items = self.model.get_items(request.params, - context=self._get_context(request)) - if items is None: - return error_response(httplib.NOT_FOUND, 404, 'Not found') - elif 'results' not in items: - return error_response(httplib.NOT_FOUND, 404, 'Not found') - - body = "%s\n" % json.dumps(items, indent=2) - return webob.Response(body=body, status=httplib.OK, - content_type='application/json', - charset='UTF-8') - - def create_member(self, request, id_=None): - if not hasattr(self.model, 'add_item'): - return NOT_SUPPORTED_RESPONSE - item = self._parse_json_body(request) - context = self._get_context(request) - try: - id_, item = self.model.add_item( - item, request.params, id_, context=context) - except KeyError as e: - LOG.exception("Error occurred") - return error_response(httplib.CONFLICT, httplib.CONFLICT, - original_msg(e) or 'Element already exists') - item['id'] = id_ - - return webob.Response(body="%s\n" % json.dumps(item), - status=httplib.CREATED, - content_type='application/json', - location="%s/%s" % (request.path, id_), - charset='UTF-8') - - def update_members(self, request): - if not hasattr(self.model, 'update_items'): - return NOT_SUPPORTED_RESPONSE - items = self._parse_json_body(request) - context = self._get_context(request) - try: - self.model.update_items(items, request.params, context) - except KeyError as e: - LOG.exception("Error occurred") - return error_response(httplib.BAD_REQUEST, httplib.BAD_REQUEST, - original_msg(e) or - 'Update %s Failed' % context['table_id']) - return webob.Response(body="", status=httplib.OK, - content_type='application/json', - charset='UTF-8') - - -class SimpleDataModel(object): - """A container providing access to a single type of data.""" - - def __init__(self, model_name): - self.model_name = model_name - self.items = {} - - @staticmethod - def _context_str(context): - context = context or {} - return ".".join( - ["%s:%s" % (k, context[k]) for k in sorted(context.keys())]) - - def get_items(self, params, context=None): - """Get items in model. - - Args: - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: A dict containing at least a 'results' key whose value is - a list of items in the model. Additional keys set in the - dict will also be rendered for the user. - """ - cstr = self._context_str(context) - results = list(self.items.setdefault(cstr, {}).values()) - return {'results': results} - - def add_item(self, item, params, id_=None, context=None): - """Add item to model. - - Args: - item: The item to add to the model - params: A dict-like object containing parameters - from the request query string and body. - id_: The ID of the item, or None if an ID should be generated - context: Key-values providing frame of reference of request - - Returns: - Tuple of (ID, newly_created_item) - - Raises: - KeyError: ID already exists. - DataModelException: Addition cannot be performed. - """ - cstr = self._context_str(context) - if id_ is None: - id_ = uuidutils.generate_uuid() - if id_ in self.items.setdefault(cstr, {}): - raise KeyError("Cannot create item with ID '%s': " - "ID already exists" % id_) - self.items[cstr][id_] = item - return (id_, item) - - def get_item(self, id_, params, context=None): - """Retrieve item with id id_ from model. - - Args: - id_: The ID of the item to retrieve - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The matching item or None if item with id_ does not exist. - """ - cstr = self._context_str(context) - return self.items.setdefault(cstr, {}).get(id_) - - def update_item(self, id_, item, params, context=None): - """Update item with id_ with new data. - - Args: - id_: The ID of the item to be updated - item: The new item - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The updated item. - - Raises: - KeyError: Item with specified id_ not present. - DataModelException: Update cannot be performed. - """ - cstr = self._context_str(context) - if id_ not in self.items.setdefault(cstr, {}): - raise KeyError("Cannot update item with ID '%s': " - "ID does not exist" % id_) - self.items.setdefault(cstr, {})[id_] = item - return item - - def delete_item(self, id_, params, context=None): - """Remove item from model. - - Args: - id_: The ID of the item to be removed - params: A dict-like object containing parameters - from the request query string and body. - context: Key-values providing frame of reference of request - - Returns: - The removed item. - - Raises: - KeyError: Item with specified id_ not present. - """ - cstr = self._context_str(context) - ret = self.items.setdefault(cstr, {})[id_] - del self.items[cstr][id_] - return ret - - def update_items(self, items, params, context=None): - """Update items in the model. - - Args: - items: A dict-like object containing new data - params: A dict-like object containing parameters - context: Key-values providing frame of reference of request - Returns: - None. - """ - self.items = items diff --git a/congress/auth.py b/congress/auth.py deleted file mode 100644 index ce54270d..00000000 --- a/congress/auth.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_middleware import request_id -import webob.dec -import webob.exc - -from congress.common import config -from congress.common import wsgi -from congress import context - - -LOG = logging.getLogger(__name__) - - -class CongressKeystoneContext(wsgi.Middleware): - """Make a request context from keystone headers.""" - - @webob.dec.wsgify - def __call__(self, req): - # Determine the user ID - user_id = req.headers.get('X_USER_ID') - if not user_id: - LOG.debug("X_USER_ID is not found in request") - return webob.exc.HTTPUnauthorized() - - # Determine the tenant - tenant_id = req.headers.get('X_PROJECT_ID') - - # Suck out the roles - roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')] - - # Human-friendly names - tenant_name = req.headers.get('X_PROJECT_NAME') - user_name = req.headers.get('X_USER_NAME') - - # Use request_id if already set - req_id = req.environ.get(request_id.ENV_REQUEST_ID) - - # Create a context with the authentication data - ctx = context.RequestContext(user_id, tenant_id, roles=roles, - user_name=user_name, - tenant_name=tenant_name, - request_id=req_id) - - # Inject the context... - req.environ['congress.context'] = ctx - - return self.application - - -def pipeline_factory(loader, global_conf, **local_conf): - """Create a paste pipeline based on the 'auth_strategy' config option.""" - config.set_config_defaults() - pipeline = local_conf[cfg.CONF.auth_strategy] - pipeline = pipeline.split() - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for filter in filters: - app = filter(app) - return app diff --git a/congress/common/__init__.py b/congress/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/common/config.py b/congress/common/config.py deleted file mode 100644 index 7b0f79c8..00000000 --- a/congress/common/config.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2014 VMware -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import os -import socket - -from oslo_config import cfg -from oslo_db import options as db_options -from oslo_log import log as logging -from oslo_middleware import cors -from oslo_policy import opts as policy_opts - -from congress import version - -LOG = logging.getLogger(__name__) - -core_opts = [ - cfg.HostAddressOpt('bind_host', default='0.0.0.0', - help="The host IP to bind to"), - cfg.PortOpt('bind_port', default=1789, - help="The port to bind to"), - cfg.IntOpt('max_simultaneous_requests', default=1024, - help="Thread pool size for eventlet."), - cfg.BoolOpt('tcp_keepalive', default=False, - help='Set this to true to enable TCP_KEEALIVE socket option ' - 'on connections received by the API server.'), - cfg.IntOpt('tcp_keepidle', - default=600, - help='Sets the value of TCP_KEEPIDLE in seconds for each ' - 'server socket. Only applies if tcp_keepalive is ' - 'true. Not supported on OS X.'), - cfg.StrOpt('policy_path', - help="The path to the latest policy dump", - deprecated_for_removal=True, - deprecated_reason='No longer used'), - cfg.StrOpt('datasource_file', - deprecated_for_removal=True, - help="The file containing datasource configuration"), - cfg.StrOpt('root_path', - deprecated_for_removal=True, - deprecated_reason='automatically calculated its path in ' - 'initializing steps.', - help="The absolute path to the congress repo"), - cfg.IntOpt('api_workers', default=1, - help='The number of worker processes to serve the congress ' - 'API application.'), - cfg.StrOpt('api_paste_config', default='api-paste.ini', - help=_('The API paste config file to use')), - cfg.StrOpt('auth_strategy', default='keystone', - help=_('The type of authentication to use')), - cfg.ListOpt('drivers', - default=[], - help=_('List of driver class paths to import.')), - cfg.IntOpt('datasource_sync_period', default=60, - help='The number of seconds to wait between synchronizing ' - 'datasource config from the database'), - cfg.BoolOpt('enable_execute_action', default=True, - help='Set the flag to False if you don\'t want Congress ' - 'to execute actions.'), - cfg.BoolOpt('replicated_policy_engine', default=False, - help='Set the flag to use congress with replicated policy ' - 'engines.'), - cfg.StrOpt('policy_library_path', default='/etc/congress/library', - help=_('The directory containing library policy files.')), - cfg.BoolOpt('distributed_architecture', - deprecated_for_removal=True, - deprecated_reason='distributed architecture is now the only ' - 'supported configuration.', - help="Set the flag to use congress distributed architecture."), -] - -# Register the configuration options -cfg.CONF.register_opts(core_opts) - -dse_opts = [ - cfg.StrOpt('bus_id', default='bus', - help='Unique ID of this DSE bus'), - cfg.IntOpt('ping_timeout', default=5, - help='RPC short timeout in seconds; used to ping destination'), - cfg.IntOpt('long_timeout', default=120, - help='RPC long timeout in seconds; used on potentially long ' - 'running requests such as datasource action and PE row ' - 'query'), - cfg.IntOpt('time_to_resub', default=10, - help='Time in seconds which a subscriber will wait for missing ' - 'update before attempting to resubscribe from publisher'), - cfg.BoolOpt('execute_action_retry', default=False, - help='Set the flag to True to make Congress retry execute ' - 'actions; may cause duplicate executions.'), - cfg.IntOpt('execute_action_retry_timeout', default=600, - help='The number of seconds to retry execute action before ' - 'giving up. Zero or negative value means never give up.'), -] - -# Register dse opts -cfg.CONF.register_opts(dse_opts, group='dse') - -policy_opts.set_defaults(cfg.CONF, 'policy.json') -logging.register_options(cfg.CONF) - -_SQL_CONNECTION_DEFAULT = 'sqlite://' -# Update the default QueuePool parameters. These can be tweaked by the -# configuration variables - max_pool_size, max_overflow and pool_timeout -db_options.set_defaults(cfg.CONF, - connection=_SQL_CONNECTION_DEFAULT, - max_pool_size=10, max_overflow=20, pool_timeout=10) - -# Command line options -cli_opts = [ - cfg.BoolOpt('datasources', default=False, - help='Use this option to deploy the datasources.'), - cfg.BoolOpt('api', default=False, - help='Use this option to deploy API service'), - cfg.BoolOpt('policy-engine', default=False, - help='Use this option to deploy policy engine service.'), - cfg.StrOpt('node-id', default=socket.gethostname(), - help='A unique ID for this node. Must be unique across all ' - 'nodes with the same bus_id.'), - cfg.BoolOpt('delete-missing-driver-datasources', default=False, - help='Use this option to delete datasources with missing ' - 'drivers from DB') -] -cfg.CONF.register_cli_opts(cli_opts) - - -def init(args, **kwargs): - cfg.CONF(args=args, project='congress', - version='%%(prog)s %s' % version.version_info.release_string(), - **kwargs) - - -def setup_logging(): - """Sets up logging for the congress package.""" - logging.setup(cfg.CONF, 'congress') - - -def find_paste_config(): - config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) - - if not config_path: - raise cfg.ConfigFilesNotFoundError( - config_files=[cfg.CONF.api_paste_config]) - config_path = os.path.abspath(config_path) - LOG.info(("Config paste file: %s"), config_path) - return config_path - - -def set_config_defaults(): - """This method updates all configuration default values.""" - # CORS Defaults - # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ - cfg.set_defaults(cors.CORS_OPTS, - allow_headers=['X-Auth-Token', - 'X-OpenStack-Request-ID', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id'], - expose_headers=['X-Auth-Token', - 'X-OpenStack-Request-ID', - 'X-Subject-Token', - 'X-Service-Token'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH'] - ) diff --git a/congress/common/eventlet_server.py b/congress/common/eventlet_server.py deleted file mode 100644 index d0a34157..00000000 --- a/congress/common/eventlet_server.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import errno -import re -import socket -import ssl -import sys - -import eventlet -import eventlet.wsgi -import greenlet -import json -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import service -from paste import deploy - -from congress.dse2 import dse_node -from congress import exception - -LOG = logging.getLogger(__name__) - - -class EventletFilteringLogger(object): - # NOTE(morganfainberg): This logger is designed to filter out specific - # Tracebacks to limit the amount of data that eventlet can log. In the - # case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge - # volume of data being written to the logs due to ~14 lines+ per traceback. - # The traceback in these cases are, at best, useful for limited debugging - # cases. - def __init__(self, logger): - self.logger = logger - self.level = logger.logger.level - self.regex = re.compile(r'errno (%d|%d)' % - (errno.EPIPE, errno.ECONNRESET), re.IGNORECASE) - - def write(self, msg): - m = self.regex.search(msg) - if m: - self.logger.log(logging.logging.DEBUG, - 'Error(%s) writing to socket.', - m.group(1)) - else: - self.logger.log(self.level, msg.rstrip()) - - -class Server(service.Service): - """Server class to Data Service Node without API services.""" - def __init__(self, name, bus_id=None): - super(Server, self).__init__() - self.name = name - self.node = dse_node.DseNode(cfg.CONF, self.name, [], - partition_id=bus_id) - - def start(self): - self.node.start() - - def stop(self): - self.node.stop() - - -class APIServer(service.ServiceBase): - """Server class to Data Service Node with API services. - - This server has All API services in itself. - """ - - def __init__(self, app_conf, name, host=None, port=None, threads=1000, - keepalive=False, keepidle=None, bus_id=None, **kwargs): - self.app_conf = app_conf - self.name = name - self.application = None - self.host = host or '0.0.0.0' - self.port = port or 0 - self.pool = eventlet.GreenPool(threads) - self.socket_info = {} - self.greenthread = None - self.do_ssl = False - self.cert_required = False - self.keepalive = keepalive - self.keepidle = keepidle - self.socket = None - self.bus_id = bus_id - # store API, policy-engine, datasource flags; for use in start() - self.flags = kwargs - - # TODO(masa): To support Active-Active HA with DseNode on any - # driver of oslo.messaging, make sure to use same partition_id - # among multi DseNodes sharing same message topic namespace. - - def start(self, key=None, backlog=128): - """Run a WSGI server with the given application.""" - if self.socket is None: - self.listen(key=key, backlog=backlog) - - try: - kwargs = {'global_conf': - {'node_id': self.name, - 'bus_id': self.bus_id, - 'flags': json.dumps(self.flags)}} - self.application = deploy.loadapp('config:%s' % self.app_conf, - name='congress', **kwargs) - except Exception: - LOG.exception('Failed to Start %s server', self.name) - raise exception.CongressException( - 'Failed to Start initializing %s server' % self.name) - - self.greenthread = self.pool.spawn(self._run, - self.application, - self.socket) - - def listen(self, key=None, backlog=128): - """Create and start listening on socket. - - Call before forking worker processes. - - Raises Exception if this has already been called. - """ - - if self.socket is not None: - raise Exception(_('Server can only listen once.')) - - LOG.info(('Starting %(arg0)s on %(host)s:%(port)s'), - {'arg0': sys.argv[0], - 'host': self.host, - 'port': self.port}) - - # TODO(dims): eventlet's green dns/socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - info = socket.getaddrinfo(self.host, - self.port, - socket.AF_UNSPEC, - socket.SOCK_STREAM)[0] - _socket = eventlet.listen(info[-1], - family=info[0], - backlog=backlog) - if key: - self.socket_info[key] = _socket.getsockname() - # SSL is enabled - if self.do_ssl: - if self.cert_required: - cert_reqs = ssl.CERT_REQUIRED - else: - cert_reqs = ssl.CERT_NONE - sslsocket = eventlet.wrap_ssl(_socket, certfile=self.certfile, - keyfile=self.keyfile, - server_side=True, - cert_reqs=cert_reqs, - ca_certs=self.ca_certs) - _socket = sslsocket - - # Optionally enable keepalive on the wsgi socket. - if self.keepalive: - _socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE') and self.keepidle is not None: - _socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, - self.keepidle) - - self.socket = _socket - - def set_ssl(self, certfile, keyfile=None, ca_certs=None, - cert_required=True): - self.certfile = certfile - self.keyfile = keyfile - self.ca_certs = ca_certs - self.cert_required = cert_required - self.do_ssl = True - - def kill(self): - if self.greenthread is not None: - self.greenthread.kill() - - def stop(self): - self.kill() - # We're not able to stop the DseNode in this case. Is there a need to - # stop the ApiServer without also exiting the process? - - def reset(self): - LOG.info("reset() not implemented yet") - - def wait(self): - """Wait until all servers have completed running.""" - try: - self.pool.waitall() - except KeyboardInterrupt: - pass - except greenlet.GreenletExit: - pass - - def _run(self, application, socket): - """Start a WSGI server in a new green thread.""" - logger = logging.getLogger('eventlet.wsgi.server') - try: - eventlet.wsgi.server(socket, application, max_size=1000, - log=EventletFilteringLogger(logger), - debug=False) - except greenlet.GreenletExit: - # Wait until all servers have completed running - pass - except Exception: - LOG.exception(_('Server error')) - raise diff --git a/congress/common/policy.py b/congress/common/policy.py deleted file mode 100644 index e569cf69..00000000 --- a/congress/common/policy.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Auth on API calls.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_config import cfg -from oslo_policy import policy - -from congress import exception - - -_ENFORCER = None - - -def reset(): - global _ENFORCER - if _ENFORCER: - _ENFORCER.clear() - _ENFORCER = None - - -def init(policy_file=None, rules=None, default_rule=None, use_conf=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is specified, - `CONF.policy_file` will be used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. - :param default_rule: Default rule to use, CONF.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from config file. - """ - global _ENFORCER - if not _ENFORCER: - _ENFORCER = policy.Enforcer(cfg.CONF, policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf) - - -def set_rules(rules, overwrite=True, use_conf=False): - """Set rules based on the provided dict of rules. - - :param rules: New rules to use. It should be an instance of dict. - :param overwrite: Whether to overwrite current rules or update them - with the new rules. - :param use_conf: Whether to reload rules from config file. - """ - init(use_conf=False) - _ENFORCER.set_rules(rules, overwrite, use_conf) - - -def enforce(context, action, target, do_raise=True, exc=None): - """Verifies that the action is valid on the target in this context. - - :param context: congress context - :param action: string representing the action to be checked - this should be colon separated for clarity. - i.e. ``compute:create_instance``, - ``compute:attach_volume``, - ``volume:attach_volume`` - :param target: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}`` - :param do_raise: if True (the default), raises PolicyNotAuthorized; - if False, returns False - - :raises congress.exception.PolicyNotAuthorized: if verification fails - and do_raise is True. - - :return: returns a non-False value (not necessarily "True") if - authorized, and the exact value False if not authorized and - do_raise is False. - """ - init() - credentials = context.to_dict() - if not exc: - exc = exception.PolicyNotAuthorized - return _ENFORCER.enforce(action, target, credentials, do_raise=do_raise, - exc=exc, action=action) - - -def check_is_admin(context): - """Whether or not roles contains 'admin' role according to policy setting. - - """ - init() - # the target is user-self - credentials = context.to_dict() - target = credentials - return _ENFORCER.enforce('context_is_admin', target, credentials) - - -@policy.register('is_admin') -class IsAdminCheck(policy.Check): - """An explicit check for is_admin.""" - - def __init__(self, kind, match): - """Initialize the check.""" - - self.expected = (match.lower() == 'true') - - super(IsAdminCheck, self).__init__(kind, str(self.expected)) - - def __call__(self, target, creds, enforcer): - """Determine whether is_admin matches the requested value.""" - - return creds['is_admin'] == self.expected - - -def get_rules(): - if _ENFORCER: - return _ENFORCER.rules diff --git a/congress/common/wsgi.py b/congress/common/wsgi.py deleted file mode 100644 index 8185f2a2..00000000 --- a/congress/common/wsgi.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for working with WSGI servers.""" - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys - -import routes.middleware -import webob.dec -import webob.exc - - -class Request(webob.Request): - pass - - -class Application(object): - """Base WSGI application wrapper. Subclasses need to implement __call__.""" - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [app:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [app:wadl] - latest_version = 1.3 - paste.app_factory = nova.api.fancy_api:Wadl.factory - - which would result in a call to the `Wadl` class as - - import nova.api.fancy_api - fancy_api.Wadl(latest_version='1.3') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - return cls(**local_config) - - def __call__(self, environ, start_response): - r"""Subclasses will probably want to implement __call__ like this: - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - # Any of the following objects work as responses: - - # Option 1: simple string - res = 'message\n' - - # Option 2: a nicely formatted HTTP exception page - res = exc.HTTPForbidden(explanation='Nice try') - - # Option 3: a webob Response object (in case you need to play with - # headers, or you want to be treated like an iterable, or or or) - res = Response(); - res.app_iter = open('somefile') - - # Option 4: any wsgi app to be run next - res = self.application - - # Option 5: you can get a Response object for a wsgi app, too, to - # play with headers etc - res = req.get_response(self.application) - - # You can then just return your response... - return res - # ... or set req.response and return None. - req.response = res - - See the end of http://pythonpaste.org/webob/modules/dec.html - for more info. - - """ - raise NotImplementedError(_('You must implement __call__')) - - -class Middleware(Application): - """Base WSGI middleware. - - These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - - """ - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [filter:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [filter:analytics] - redis_host = 127.0.0.1 - paste.filter_factory = nova.api.analytics:Analytics.factory - - which would result in a call to the `Analytics` class as - - import nova.api.analytics - analytics.Analytics(app_from_paste, redis_host='127.0.0.1') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - def _factory(app): - return cls(app, **local_config) - return _factory - - def __init__(self, application): - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) - - -class Debug(Middleware): - """Helper class for debugging a WSGI application. - - Can be inserted into any WSGI application chain to get information - about the request and response. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - print(('*' * 40) + ' REQUEST ENVIRON') - for key, value in req.environ.items(): - print(key, '=', value) - print() - resp = req.get_response(self.application) - - print(('*' * 40) + ' RESPONSE HEADERS') - for (key, value) in resp.headers.items(): - print(key, '=', value) - print() - - resp.app_iter = self.print_generator(resp.app_iter) - - return resp - - @staticmethod - def print_generator(app_iter): - """Iterator that prints the contents of a wrapper string.""" - print(('*' * 40) + ' BODY') - for part in app_iter: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print() - - -class Router(object): - """WSGI middleware that maps incoming requests to WSGI apps.""" - - def __init__(self, mapper): - """Create a router for the given routes.Mapper. - - Each route in `mapper` must specify a 'controller', which is a - WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be an object that can route - the request to the action-specific method. - - Examples: - mapper = routes.Mapper() - sc = ServerController() - - # Explicit mapping of one route to a controller+action - mapper.connect(None, '/svrlist', controller=sc, action='list') - - # Actions are all implicitly defined - mapper.resource('server', 'servers', controller=sc) - - # Pointing to an arbitrary WSGI app. You can specify the - # {path_info:.*} parameter so the target app can be handed just that - # section of the URL. - mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) - - """ - self.map = mapper - self._router = routes.middleware.RoutesMiddleware(self._dispatch, - self.map) - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Route the incoming request to a controller based on self.map. - - If no match, return a 404. - - """ - return self._router - - @staticmethod - @webob.dec.wsgify(RequestClass=Request) - def _dispatch(req): - """Dispatch the request to the appropriate controller. - - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. Either returns 404 - or the routed WSGI app's response. - - """ - match = req.environ['wsgiorg.routing_args'][1] - if not match: - return webob.exc.HTTPNotFound() - app = match['controller'] - return app diff --git a/congress/context.py b/congress/context.py deleted file mode 100644 index 66de59f4..00000000 --- a/congress/context.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""RequestContext: context for requests that persist through congress.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import datetime - -from oslo_context import context as common_context -from oslo_log import log as logging - -from congress.common import policy - - -LOG = logging.getLogger(__name__) - - -class RequestContext(common_context.RequestContext): - """Security context and request information. - - Represents the user taking a given action within the system. - - """ - - def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no", - roles=None, timestamp=None, load_admin_roles=True, - request_id=None, tenant_name=None, user_name=None, - overwrite=True, **kwargs): - """Object initialization. - - :param read_deleted: 'no' indicates deleted records are hidden, 'yes' - indicates deleted records are visible, 'only' indicates that - *only* deleted records are visible. - - :param overwrite: Set to False to ensure that the greenthread local - copy of the index is not overwritten. - - :param kwargs: Extra arguments that might be present, but we ignore - because they possibly came in from older rpc messages. - """ - super(RequestContext, self).__init__(user=user_id, tenant=tenant_id, - is_admin=is_admin, - request_id=request_id, - overwrite=overwrite, - roles=roles) - self.user_name = user_name - self.tenant_name = tenant_name - - self.read_deleted = read_deleted - if not timestamp: - timestamp = datetime.datetime.utcnow() - self.timestamp = timestamp - self._session = None - if self.is_admin is None: - self.is_admin = policy.check_is_admin(self) - - # Log only once the context has been configured to prevent - # format errors. - if kwargs: - LOG.debug(('Arguments dropped when creating ' - 'context: %s'), kwargs) - - @property - def project_id(self): - return self.tenant - - @property - def tenant_id(self): - return self.tenant - - @tenant_id.setter - def tenant_id(self, tenant_id): - self.tenant = tenant_id - - @property - def user_id(self): - return self.user - - @user_id.setter - def user_id(self, user_id): - self.user = user_id - - def _get_read_deleted(self): - return self._read_deleted - - def _set_read_deleted(self, read_deleted): - if read_deleted not in ('no', 'yes', 'only'): - raise ValueError(_("read_deleted can only be one of 'no', " - "'yes' or 'only', not %r") % read_deleted) - self._read_deleted = read_deleted - - def _del_read_deleted(self): - del self._read_deleted - - read_deleted = property(_get_read_deleted, _set_read_deleted, - _del_read_deleted) - - def to_dict(self): - ret = super(RequestContext, self).to_dict() - ret.update({'user_id': self.user_id, - 'tenant_id': self.tenant_id, - 'project_id': self.project_id, - 'read_deleted': self.read_deleted, - 'timestamp': str(self.timestamp), - 'tenant_name': self.tenant_name, - 'project_name': self.tenant_name, - 'user_name': self.user_name}) - return ret - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def elevated(self, read_deleted=None): - """Return a version of this context with admin flag set.""" - context = copy.copy(self) - context.is_admin = True - - if 'admin' not in [x.lower() for x in context.roles]: - context.roles.append('admin') - - if read_deleted is not None: - context.read_deleted = read_deleted - - return context - - -def get_admin_context(read_deleted="no", load_admin_roles=True): - return RequestContext(user_id=None, - tenant_id=None, - is_admin=True, - read_deleted=read_deleted, - load_admin_roles=load_admin_roles, - overwrite=False) diff --git a/congress/datalog/Congress.g b/congress/datalog/Congress.g deleted file mode 100644 index 94b04ba0..00000000 --- a/congress/datalog/Congress.g +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (c) 2013 VMware, Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. -// - -grammar Congress; - -options { - language=Python; - output=AST; - ASTLabelType=CommonTree; -} - -tokens { - PROG; - COMMA=','; - COLONMINUS=':-'; - LPAREN='('; - RPAREN=')'; - RBRACKET=']'; - LBRACKET='['; - - // Structure - THEORY; - STRUCTURED_NAME; - - // Kinds of Formulas - EVENT; - RULE; - LITERAL; - MODAL; - ATOM; - NOT; - AND; - - // Terms - NAMED_PARAM; - COLUMN_NAME; - COLUMN_NUMBER; - VARIABLE; - STRING_OBJ; - INTEGER_OBJ; - FLOAT_OBJ; - SYMBOL_OBJ; -} - -// a program can be one or more statements or empty -prog - : statement+ EOF -> ^(THEORY statement+) - | EOF - ; - -// a statement is either a formula or a comment -// let the lexer handle comments directly for efficiency -statement - : formula formula_terminator? -> formula - | COMMENT - ; - -formula - : rule - | fact - | event - ; - -// An Event represents the insertion/deletion of policy statements. -// Events always include :-. This is to avoid ambiguity in the grammar -// for the case of insert[p(1)]. Without the requirement that an event -// includes a :-, insert[p(1)] could either represent the event where p(1) -// is inserted or simply a policy statement with an empty body and the modal -// 'insert' in the head. -// This means that to represent the event where p(1) is inserted, you must write -// insert[p(1) :- true]. To represent the query that asks if insert[p(1)] is true -// you write insert[p(1)]. - -event - : event_op LBRACKET rule (formula_terminator STRING)? RBRACKET -> ^(EVENT event_op rule STRING?) - ; - -event_op - : 'insert' - | 'delete' - ; - -formula_terminator - : ';' - | '.' - ; - -rule - : literal_list COLONMINUS literal_list -> ^(RULE literal_list literal_list) - ; - -literal_list - : literal (COMMA literal)* -> ^(AND literal+) - ; - -literal - : fact -> fact - | NEGATION fact -> ^(NOT fact) - ; - -// Note: if we replace modal_op with ID, it tries to force statements -// like insert[p(x)] :- q(x) to be events instead of rules. Bug? -fact - : atom - | modal_op LBRACKET atom RBRACKET -> ^(MODAL modal_op atom) - ; - -modal_op - : 'execute' - | 'insert' - | 'delete' - ; - -atom - : relation_constant (LPAREN parameter_list? RPAREN)? -> ^(ATOM relation_constant parameter_list?) - ; - -parameter_list - : parameter (COMMA parameter)* -> parameter+ - ; - -parameter - : term -> term - | column_ref EQUAL term -> ^(NAMED_PARAM column_ref term) - ; - -column_ref - : ID -> ^(COLUMN_NAME ID) - | INT -> ^(COLUMN_NUMBER INT) - ; - -term - : object_constant - | variable - ; - -object_constant - : INT -> ^(INTEGER_OBJ INT) - | FLOAT -> ^(FLOAT_OBJ FLOAT) - | STRING -> ^(STRING_OBJ STRING) - ; - -variable - : ID -> ^(VARIABLE ID) - ; - -relation_constant - : ID (':' ID)* SIGN? -> ^(STRUCTURED_NAME ID+ SIGN?) - ; - -// start of the lexer -// first, define keywords to ensure they have lexical priority - -NEGATION - : 'not' - | 'NOT' - | '!' - ; - -EQUAL - : '=' - ; - -SIGN - : '+' | '-' - ; - -// Python integers, conformant to 3.4.2 spec -// Note that leading zeros in a non-zero decimal number are not allowed -// This is taken care of by the first and second alternatives -INT - : '1'..'9' ('0'..'9')* - | '0'+ - | '0' ('o' | 'O') ('0'..'7')+ - | '0' ('x' | 'X') (HEX_DIGIT)+ - | '0' ('b' | 'B') ('0' | '1')+ - ; - -// Python floating point literals, conformant to 3.4.2 spec -// The integer and exponent parts are always interpreted using radix 10 -FLOAT - : FLOAT_NO_EXP - | FLOAT_EXP - ; - -// String literals according to Python 3.4.2 grammar -// THIS VERSION IMPLEMENTS STRING AND BYTE LITERALS -// AS WELL AS TRIPLE QUOTED STRINGS -// Python strings: -// - can be enclosed in matching single quotes (') or double quotes (") -// - can be enclosed in matching groups of three single or double quotes -// - a backslash (\) character is used to escape characters that otherwise -// have a special meaning (e.g., newline, backslash, or a quote) -// - can be prefixed with a u to simplify maintenance of 2.x and 3.x code -// - 'ur' is NOT allowed -// - unescpaed newlines and quotes are allowed in triple-quoted literal -// EXCEPT that three unescaped contiguous quotes terminate the literal -// -// Byte String Literals according to Python 3.4.2 grammar -// Bytes are always prefixed with 'b' or 'B', and can only contain ASCII -// Any byte with a numeric value of >= 128 must be escaped -// -// Also implemented code refactoring to reduce runtime size of parser - -STRING - : (STRPREFIX)? (SLSTRING)+ - | (BYTESTRPREFIX) (SLBYTESTRING)+ - ; - -// moved this rule so we could differentiate between .123 and .1aa -// (i.e., relying on lexical priority) -ID - : ('a'..'z'|'A'..'Z'|'_'|'.') ('a'..'z'|'A'..'Z'|'0'..'9'|'_'|'.')* - ; - -// added Pythonesque comments -COMMENT - : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - | '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;} - | '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - ; - -WS - : ( ' ' - | '\t' - | '\r' - | '\n' - ) {$channel=HIDDEN;} - ; - - -// fragment rules -// these are helper rules that are used by other lexical rules -// they do NOT generate tokens -fragment -EXPONENT - : ('e'|'E') ('+'|'-')? ('0'..'9')+ - ; - -fragment -HEX_DIGIT - : ('0'..'9'|'a'..'f'|'A'..'F') - ; - -fragment -DIGIT - : ('0'..'9') - ; - -fragment -FLOAT_NO_EXP - : INT_PART? FRAC_PART - | INT_PART '.' - ; - -fragment -FLOAT_EXP - : ( INT_PART | FLOAT_NO_EXP ) EXPONENT - ; - -fragment -INT_PART - : DIGIT+ - ; - -fragment -FRAC_PART - : '.' DIGIT+ - ; - -// The following fragments are for string handling - -// any form of 'ur' is illegal -fragment -STRPREFIX - : 'r' | 'R' | 'u' | 'U' - ; - -fragment -STRING_ESC - : '\\' . - ; - -// The first two are single-line string with single- and double-quotes -// The second two are multi-line strings with single- and double quotes -fragment -SLSTRING - : '\'' (STRING_ESC | ~('\\' | '\r' | '\n' | '\'') )* '\'' - | '"' (STRING_ESC | ~('\\' | '\r' | '\n' | '"') )* '"' - | '\'\'\'' (STRING_ESC | ~('\\') )* '\'\'\'' - | '"""' (STRING_ESC | ~('\\') )* '"""' - ; - - -// Python Byte Literals -// Each byte within a byte literal can be an ASCII character or an -// encoded hex number from \x00 to \xff (i.e., 0-255) -// EXCEPT the backslash, newline, or quote - -fragment -BYTESTRPREFIX - : 'b' | 'B' | 'br' | 'Br' | 'bR' | 'BR' | 'rb' | 'rB' | 'Rb' | 'RB' - ; - -fragment -SLBYTESTRING - : '\'' (BYTES_CHAR_SQ | BYTES_ESC)* '\'' - | '"' (BYTES_CHAR_DQ | BYTES_ESC)* '"' - | '\'\'\'' (BYTES_CHAR_SQ | BYTES_TESC)* '\'\'\'' - | '"""' (BYTES_CHAR_DQ | BYTES_TESC)* '"""' - ; - -fragment -BYTES_CHAR_SQ - : '\u0000'..'\u0009' - | '\u000B'..'\u000C' - | '\u000E'..'\u0026' - | '\u0028'..'\u005B' - | '\u005D'..'\u007F' - ; - -fragment -BYTES_CHAR_DQ - : '\u0000'..'\u0009' - | '\u000B'..'\u000C' - | '\u000E'..'\u0021' - | '\u0023'..'\u005B' - | '\u005D'..'\u007F' - ; - -fragment -BYTES_ESC - : '\\' '\u0000'..'\u007F' - ; - - -fragment -BYTES_TESC - : '\u0000'..'\u005B' - | '\u005D'..'\u007F' - ; diff --git a/congress/datalog/Python2/CongressLexer.py b/congress/datalog/Python2/CongressLexer.py deleted file mode 100644 index bf8ebab4..00000000 --- a/congress/datalog/Python2/CongressLexer.py +++ /dev/null @@ -1,2734 +0,0 @@ -# $ANTLR 3.5 /Users/tim/opencode/congress/congress/datalog/Congress.g 2015-08-03 09:06:22 - -import sys -from antlr3 import * -from antlr3.compat import set, frozenset - - - -# for convenience in actions -HIDDEN = BaseRecognizer.HIDDEN - -# token types -EOF=-1 -T__53=53 -T__54=54 -T__55=55 -T__56=56 -T__57=57 -T__58=58 -AND=4 -ATOM=5 -BYTESTRPREFIX=6 -BYTES_CHAR_DQ=7 -BYTES_CHAR_SQ=8 -BYTES_ESC=9 -BYTES_TESC=10 -COLONMINUS=11 -COLUMN_NAME=12 -COLUMN_NUMBER=13 -COMMA=14 -COMMENT=15 -DIGIT=16 -EQUAL=17 -EVENT=18 -EXPONENT=19 -FLOAT=20 -FLOAT_EXP=21 -FLOAT_NO_EXP=22 -FLOAT_OBJ=23 -FRAC_PART=24 -HEX_DIGIT=25 -ID=26 -INT=27 -INTEGER_OBJ=28 -INT_PART=29 -LBRACKET=30 -LITERAL=31 -LPAREN=32 -MODAL=33 -NAMED_PARAM=34 -NEGATION=35 -NOT=36 -PROG=37 -RBRACKET=38 -RPAREN=39 -RULE=40 -SIGN=41 -SLBYTESTRING=42 -SLSTRING=43 -STRING=44 -STRING_ESC=45 -STRING_OBJ=46 -STRPREFIX=47 -STRUCTURED_NAME=48 -SYMBOL_OBJ=49 -THEORY=50 -VARIABLE=51 -WS=52 - - -class CongressLexer(Lexer): - - grammarFileName = "/Users/tim/opencode/congress/congress/datalog/Congress.g" - api_version = 1 - - def __init__(self, input=None, state=None): - if state is None: - state = RecognizerSharedState() - super(CongressLexer, self).__init__(input, state) - - self.delegates = [] - - self.dfa8 = self.DFA8( - self, 8, - eot = self.DFA8_eot, - eof = self.DFA8_eof, - min = self.DFA8_min, - max = self.DFA8_max, - accept = self.DFA8_accept, - special = self.DFA8_special, - transition = self.DFA8_transition - ) - - self.dfa23 = self.DFA23( - self, 23, - eot = self.DFA23_eot, - eof = self.DFA23_eof, - min = self.DFA23_min, - max = self.DFA23_max, - accept = self.DFA23_accept, - special = self.DFA23_special, - transition = self.DFA23_transition - ) - - self.dfa24 = self.DFA24( - self, 24, - eot = self.DFA24_eot, - eof = self.DFA24_eof, - min = self.DFA24_min, - max = self.DFA24_max, - accept = self.DFA24_accept, - special = self.DFA24_special, - transition = self.DFA24_transition - ) - - self.dfa38 = self.DFA38( - self, 38, - eot = self.DFA38_eot, - eof = self.DFA38_eof, - min = self.DFA38_min, - max = self.DFA38_max, - accept = self.DFA38_accept, - special = self.DFA38_special, - transition = self.DFA38_transition - ) - - - - - - - # $ANTLR start "COLONMINUS" - def mCOLONMINUS(self, ): - try: - _type = COLONMINUS - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:7:12: ( ':-' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:7:14: ':-' - pass - self.match(":-") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COLONMINUS" - - - - # $ANTLR start "COMMA" - def mCOMMA(self, ): - try: - _type = COMMA - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:8:7: ( ',' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:8:9: ',' - pass - self.match(44) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COMMA" - - - - # $ANTLR start "LBRACKET" - def mLBRACKET(self, ): - try: - _type = LBRACKET - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:9:10: ( '[' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:9:12: '[' - pass - self.match(91) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "LBRACKET" - - - - # $ANTLR start "LPAREN" - def mLPAREN(self, ): - try: - _type = LPAREN - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:10:8: ( '(' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:10:10: '(' - pass - self.match(40) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "LPAREN" - - - - # $ANTLR start "RBRACKET" - def mRBRACKET(self, ): - try: - _type = RBRACKET - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:11:10: ( ']' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:11:12: ']' - pass - self.match(93) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "RBRACKET" - - - - # $ANTLR start "RPAREN" - def mRPAREN(self, ): - try: - _type = RPAREN - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:12:8: ( ')' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:12:10: ')' - pass - self.match(41) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "RPAREN" - - - - # $ANTLR start "T__53" - def mT__53(self, ): - try: - _type = T__53 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:13:7: ( '.' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:13:9: '.' - pass - self.match(46) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__53" - - - - # $ANTLR start "T__54" - def mT__54(self, ): - try: - _type = T__54 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:14:7: ( ':' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:14:9: ':' - pass - self.match(58) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__54" - - - - # $ANTLR start "T__55" - def mT__55(self, ): - try: - _type = T__55 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:15:7: ( ';' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:15:9: ';' - pass - self.match(59) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__55" - - - - # $ANTLR start "T__56" - def mT__56(self, ): - try: - _type = T__56 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:16:7: ( 'delete' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:16:9: 'delete' - pass - self.match("delete") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__56" - - - - # $ANTLR start "T__57" - def mT__57(self, ): - try: - _type = T__57 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:17:7: ( 'execute' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:17:9: 'execute' - pass - self.match("execute") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__57" - - - - # $ANTLR start "T__58" - def mT__58(self, ): - try: - _type = T__58 - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:18:7: ( 'insert' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:18:9: 'insert' - pass - self.match("insert") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__58" - - - - # $ANTLR start "NEGATION" - def mNEGATION(self, ): - try: - _type = NEGATION - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:167:5: ( 'not' | 'NOT' | '!' ) - alt1 = 3 - LA1 = self.input.LA(1) - if LA1 == 110: - alt1 = 1 - elif LA1 == 78: - alt1 = 2 - elif LA1 == 33: - alt1 = 3 - else: - nvae = NoViableAltException("", 1, 0, self.input) - - raise nvae - - - if alt1 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:167:7: 'not' - pass - self.match("not") - - - - elif alt1 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:168:7: 'NOT' - pass - self.match("NOT") - - - - elif alt1 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:169:7: '!' - pass - self.match(33) - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "NEGATION" - - - - # $ANTLR start "EQUAL" - def mEQUAL(self, ): - try: - _type = EQUAL - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:173:5: ( '=' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:173:8: '=' - pass - self.match(61) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "EQUAL" - - - - # $ANTLR start "SIGN" - def mSIGN(self, ): - try: - _type = SIGN - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:177:5: ( '+' | '-' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if self.input.LA(1) == 43 or self.input.LA(1) == 45: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "SIGN" - - - - # $ANTLR start "INT" - def mINT(self, ): - try: - _type = INT - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:184:5: ( '1' .. '9' ( '0' .. '9' )* | ( '0' )+ | '0' ( 'o' | 'O' ) ( '0' .. '7' )+ | '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ | '0' ( 'b' | 'B' ) ( '0' | '1' )+ ) - alt7 = 5 - LA7_0 = self.input.LA(1) - - if ((49 <= LA7_0 <= 57)) : - alt7 = 1 - elif (LA7_0 == 48) : - LA7 = self.input.LA(2) - if LA7 == 79 or LA7 == 111: - alt7 = 3 - elif LA7 == 88 or LA7 == 120: - alt7 = 4 - elif LA7 == 66 or LA7 == 98: - alt7 = 5 - else: - alt7 = 2 - - else: - nvae = NoViableAltException("", 7, 0, self.input) - - raise nvae - - - if alt7 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:184:7: '1' .. '9' ( '0' .. '9' )* - pass - self.matchRange(49, 57) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:184:16: ( '0' .. '9' )* - while True: #loop2 - alt2 = 2 - LA2_0 = self.input.LA(1) - - if ((48 <= LA2_0 <= 57)) : - alt2 = 1 - - - if alt2 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop2 - - - - elif alt7 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:185:7: ( '0' )+ - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:185:7: ( '0' )+ - cnt3 = 0 - while True: #loop3 - alt3 = 2 - LA3_0 = self.input.LA(1) - - if (LA3_0 == 48) : - alt3 = 1 - - - if alt3 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:185:7: '0' - pass - self.match(48) - - - else: - if cnt3 >= 1: - break #loop3 - - eee = EarlyExitException(3, self.input) - raise eee - - cnt3 += 1 - - - - elif alt7 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:186:7: '0' ( 'o' | 'O' ) ( '0' .. '7' )+ - pass - self.match(48) - - if self.input.LA(1) == 79 or self.input.LA(1) == 111: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:186:23: ( '0' .. '7' )+ - cnt4 = 0 - while True: #loop4 - alt4 = 2 - LA4_0 = self.input.LA(1) - - if ((48 <= LA4_0 <= 55)) : - alt4 = 1 - - - if alt4 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 55): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt4 >= 1: - break #loop4 - - eee = EarlyExitException(4, self.input) - raise eee - - cnt4 += 1 - - - - elif alt7 == 4: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:187:7: '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ - pass - self.match(48) - - if self.input.LA(1) == 88 or self.input.LA(1) == 120: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:187:23: ( HEX_DIGIT )+ - cnt5 = 0 - while True: #loop5 - alt5 = 2 - LA5_0 = self.input.LA(1) - - if ((48 <= LA5_0 <= 57) or (65 <= LA5_0 <= 70) or (97 <= LA5_0 <= 102)) : - alt5 = 1 - - - if alt5 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt5 >= 1: - break #loop5 - - eee = EarlyExitException(5, self.input) - raise eee - - cnt5 += 1 - - - - elif alt7 == 5: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:188:7: '0' ( 'b' | 'B' ) ( '0' | '1' )+ - pass - self.match(48) - - if self.input.LA(1) == 66 or self.input.LA(1) == 98: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:188:23: ( '0' | '1' )+ - cnt6 = 0 - while True: #loop6 - alt6 = 2 - LA6_0 = self.input.LA(1) - - if ((48 <= LA6_0 <= 49)) : - alt6 = 1 - - - if alt6 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 49): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt6 >= 1: - break #loop6 - - eee = EarlyExitException(6, self.input) - raise eee - - cnt6 += 1 - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "INT" - - - - # $ANTLR start "FLOAT" - def mFLOAT(self, ): - try: - _type = FLOAT - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:194:5: ( FLOAT_NO_EXP | FLOAT_EXP ) - alt8 = 2 - alt8 = self.dfa8.predict(self.input) - if alt8 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:194:7: FLOAT_NO_EXP - pass - self.mFLOAT_NO_EXP() - - - - elif alt8 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:195:7: FLOAT_EXP - pass - self.mFLOAT_EXP() - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "FLOAT" - - - - # $ANTLR start "STRING" - def mSTRING(self, ): - try: - _type = STRING - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:218:5: ( ( STRPREFIX )? ( SLSTRING )+ | ( BYTESTRPREFIX ) ( SLBYTESTRING )+ ) - alt12 = 2 - LA12 = self.input.LA(1) - if LA12 == 114: - LA12_1 = self.input.LA(2) - - if (LA12_1 == 66 or LA12_1 == 98) : - alt12 = 2 - elif (LA12_1 == 34 or LA12_1 == 39) : - alt12 = 1 - else: - nvae = NoViableAltException("", 12, 1, self.input) - - raise nvae - - - elif LA12 == 34 or LA12 == 39 or LA12 == 85 or LA12 == 117: - alt12 = 1 - elif LA12 == 66 or LA12 == 98: - alt12 = 2 - elif LA12 == 82: - LA12_4 = self.input.LA(2) - - if (LA12_4 == 66 or LA12_4 == 98) : - alt12 = 2 - elif (LA12_4 == 34 or LA12_4 == 39) : - alt12 = 1 - else: - nvae = NoViableAltException("", 12, 4, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 12, 0, self.input) - - raise nvae - - - if alt12 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:218:7: ( STRPREFIX )? ( SLSTRING )+ - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:218:7: ( STRPREFIX )? - alt9 = 2 - LA9_0 = self.input.LA(1) - - if (LA9_0 == 82 or LA9_0 == 85 or LA9_0 == 114 or LA9_0 == 117) : - alt9 = 1 - if alt9 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if self.input.LA(1) == 82 or self.input.LA(1) == 85 or self.input.LA(1) == 114 or self.input.LA(1) == 117: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:218:20: ( SLSTRING )+ - cnt10 = 0 - while True: #loop10 - alt10 = 2 - LA10_0 = self.input.LA(1) - - if (LA10_0 == 34 or LA10_0 == 39) : - alt10 = 1 - - - if alt10 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:218:21: SLSTRING - pass - self.mSLSTRING() - - - - else: - if cnt10 >= 1: - break #loop10 - - eee = EarlyExitException(10, self.input) - raise eee - - cnt10 += 1 - - - - elif alt12 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:219:7: ( BYTESTRPREFIX ) ( SLBYTESTRING )+ - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:219:7: ( BYTESTRPREFIX ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:219:8: BYTESTRPREFIX - pass - self.mBYTESTRPREFIX() - - - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:219:23: ( SLBYTESTRING )+ - cnt11 = 0 - while True: #loop11 - alt11 = 2 - LA11_0 = self.input.LA(1) - - if (LA11_0 == 34 or LA11_0 == 39) : - alt11 = 1 - - - if alt11 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:219:24: SLBYTESTRING - pass - self.mSLBYTESTRING() - - - - else: - if cnt11 >= 1: - break #loop11 - - eee = EarlyExitException(11, self.input) - raise eee - - cnt11 += 1 - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "STRING" - - - - # $ANTLR start "ID" - def mID(self, ): - try: - _type = ID - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:225:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '.' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:225:7: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '.' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* - pass - if self.input.LA(1) == 46 or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:225:35: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* - while True: #loop13 - alt13 = 2 - LA13_0 = self.input.LA(1) - - if (LA13_0 == 46 or (48 <= LA13_0 <= 57) or (65 <= LA13_0 <= 90) or LA13_0 == 95 or (97 <= LA13_0 <= 122)) : - alt13 = 1 - - - if alt13 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if self.input.LA(1) == 46 or (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop13 - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "ID" - - - - # $ANTLR start "COMMENT" - def mCOMMENT(self, ): - try: - _type = COMMENT - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:230:5: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' | '/*' ( options {greedy=false; } : . )* '*/' | '#' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' ) - alt19 = 3 - LA19_0 = self.input.LA(1) - - if (LA19_0 == 47) : - LA19_1 = self.input.LA(2) - - if (LA19_1 == 47) : - alt19 = 1 - elif (LA19_1 == 42) : - alt19 = 2 - else: - nvae = NoViableAltException("", 19, 1, self.input) - - raise nvae - - - elif (LA19_0 == 35) : - alt19 = 3 - else: - nvae = NoViableAltException("", 19, 0, self.input) - - raise nvae - - - if alt19 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:230:7: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' - pass - self.match("//") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:230:12: (~ ( '\\n' | '\\r' ) )* - while True: #loop14 - alt14 = 2 - LA14_0 = self.input.LA(1) - - if ((0 <= LA14_0 <= 9) or (11 <= LA14_0 <= 12) or (14 <= LA14_0 <= 65535)) : - alt14 = 1 - - - if alt14 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop14 - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:230:26: ( '\\r' )? - alt15 = 2 - LA15_0 = self.input.LA(1) - - if (LA15_0 == 13) : - alt15 = 1 - if alt15 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:230:26: '\\r' - pass - self.match(13) - - - - - self.match(10) - - #action start - _channel=HIDDEN; - #action end - - - - elif alt19 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:231:7: '/*' ( options {greedy=false; } : . )* '*/' - pass - self.match("/*") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:231:12: ( options {greedy=false; } : . )* - while True: #loop16 - alt16 = 2 - LA16_0 = self.input.LA(1) - - if (LA16_0 == 42) : - LA16_1 = self.input.LA(2) - - if (LA16_1 == 47) : - alt16 = 2 - elif ((0 <= LA16_1 <= 46) or (48 <= LA16_1 <= 65535)) : - alt16 = 1 - - - elif ((0 <= LA16_0 <= 41) or (43 <= LA16_0 <= 65535)) : - alt16 = 1 - - - if alt16 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:231:40: . - pass - self.matchAny() - - - else: - break #loop16 - - - self.match("*/") - - - #action start - _channel=HIDDEN; - #action end - - - - elif alt19 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:232:7: '#' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' - pass - self.match(35) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:232:11: (~ ( '\\n' | '\\r' ) )* - while True: #loop17 - alt17 = 2 - LA17_0 = self.input.LA(1) - - if ((0 <= LA17_0 <= 9) or (11 <= LA17_0 <= 12) or (14 <= LA17_0 <= 65535)) : - alt17 = 1 - - - if alt17 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop17 - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:232:25: ( '\\r' )? - alt18 = 2 - LA18_0 = self.input.LA(1) - - if (LA18_0 == 13) : - alt18 = 1 - if alt18 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:232:25: '\\r' - pass - self.match(13) - - - - - self.match(10) - - #action start - _channel=HIDDEN; - #action end - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COMMENT" - - - - # $ANTLR start "WS" - def mWS(self, ): - try: - _type = WS - _channel = DEFAULT_CHANNEL - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:236:5: ( ( ' ' | '\\t' | '\\r' | '\\n' ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:236:7: ( ' ' | '\\t' | '\\r' | '\\n' ) - pass - if (9 <= self.input.LA(1) <= 10) or self.input.LA(1) == 13 or self.input.LA(1) == 32: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - #action start - _channel=HIDDEN; - #action end - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "WS" - - - - # $ANTLR start "EXPONENT" - def mEXPONENT(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:250:5: ( ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:250:7: ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+ - pass - if self.input.LA(1) == 69 or self.input.LA(1) == 101: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:250:17: ( '+' | '-' )? - alt20 = 2 - LA20_0 = self.input.LA(1) - - if (LA20_0 == 43 or LA20_0 == 45) : - alt20 = 1 - if alt20 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if self.input.LA(1) == 43 or self.input.LA(1) == 45: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:250:28: ( '0' .. '9' )+ - cnt21 = 0 - while True: #loop21 - alt21 = 2 - LA21_0 = self.input.LA(1) - - if ((48 <= LA21_0 <= 57)) : - alt21 = 1 - - - if alt21 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt21 >= 1: - break #loop21 - - eee = EarlyExitException(21, self.input) - raise eee - - cnt21 += 1 - - - - - - finally: - pass - - # $ANTLR end "EXPONENT" - - - - # $ANTLR start "HEX_DIGIT" - def mHEX_DIGIT(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:255:5: ( ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "HEX_DIGIT" - - - - # $ANTLR start "DIGIT" - def mDIGIT(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:260:5: ( ( '0' .. '9' ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "DIGIT" - - - - # $ANTLR start "FLOAT_NO_EXP" - def mFLOAT_NO_EXP(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:265:5: ( ( INT_PART )? FRAC_PART | INT_PART '.' ) - alt23 = 2 - alt23 = self.dfa23.predict(self.input) - if alt23 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:265:7: ( INT_PART )? FRAC_PART - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:265:7: ( INT_PART )? - alt22 = 2 - LA22_0 = self.input.LA(1) - - if ((48 <= LA22_0 <= 57)) : - alt22 = 1 - if alt22 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:265:7: INT_PART - pass - self.mINT_PART() - - - - - - self.mFRAC_PART() - - - - elif alt23 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:266:7: INT_PART '.' - pass - self.mINT_PART() - - - self.match(46) - - - - finally: - pass - - # $ANTLR end "FLOAT_NO_EXP" - - - - # $ANTLR start "FLOAT_EXP" - def mFLOAT_EXP(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:271:5: ( ( INT_PART | FLOAT_NO_EXP ) EXPONENT ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:271:7: ( INT_PART | FLOAT_NO_EXP ) EXPONENT - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:271:7: ( INT_PART | FLOAT_NO_EXP ) - alt24 = 2 - alt24 = self.dfa24.predict(self.input) - if alt24 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:271:9: INT_PART - pass - self.mINT_PART() - - - - elif alt24 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:271:20: FLOAT_NO_EXP - pass - self.mFLOAT_NO_EXP() - - - - - - self.mEXPONENT() - - - - - - finally: - pass - - # $ANTLR end "FLOAT_EXP" - - - - # $ANTLR start "INT_PART" - def mINT_PART(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:276:5: ( ( DIGIT )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:276:7: ( DIGIT )+ - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:276:7: ( DIGIT )+ - cnt25 = 0 - while True: #loop25 - alt25 = 2 - LA25_0 = self.input.LA(1) - - if ((48 <= LA25_0 <= 57)) : - alt25 = 1 - - - if alt25 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt25 >= 1: - break #loop25 - - eee = EarlyExitException(25, self.input) - raise eee - - cnt25 += 1 - - - - - - finally: - pass - - # $ANTLR end "INT_PART" - - - - # $ANTLR start "FRAC_PART" - def mFRAC_PART(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:281:5: ( '.' ( DIGIT )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:281:7: '.' ( DIGIT )+ - pass - self.match(46) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:281:11: ( DIGIT )+ - cnt26 = 0 - while True: #loop26 - alt26 = 2 - LA26_0 = self.input.LA(1) - - if ((48 <= LA26_0 <= 57)) : - alt26 = 1 - - - if alt26 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (48 <= self.input.LA(1) <= 57): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt26 >= 1: - break #loop26 - - eee = EarlyExitException(26, self.input) - raise eee - - cnt26 += 1 - - - - - - finally: - pass - - # $ANTLR end "FRAC_PART" - - - - # $ANTLR start "STRPREFIX" - def mSTRPREFIX(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:289:5: ( 'r' | 'R' | 'u' | 'U' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if self.input.LA(1) == 82 or self.input.LA(1) == 85 or self.input.LA(1) == 114 or self.input.LA(1) == 117: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "STRPREFIX" - - - - # $ANTLR start "STRING_ESC" - def mSTRING_ESC(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:294:5: ( '\\\\' . ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:294:7: '\\\\' . - pass - self.match(92) - - self.matchAny() - - - - - finally: - pass - - # $ANTLR end "STRING_ESC" - - - - # $ANTLR start "SLSTRING" - def mSLSTRING(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:301:5: ( '\\'' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* '\\'' | '\"' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* '\"' | '\\'\\'\\'' ( STRING_ESC |~ ( '\\\\' ) )* '\\'\\'\\'' | '\"\"\"' ( STRING_ESC |~ ( '\\\\' ) )* '\"\"\"' ) - alt31 = 4 - LA31_0 = self.input.LA(1) - - if (LA31_0 == 39) : - LA31_1 = self.input.LA(2) - - if (LA31_1 == 39) : - LA31_3 = self.input.LA(3) - - if (LA31_3 == 39) : - alt31 = 3 - else: - alt31 = 1 - - elif ((0 <= LA31_1 <= 9) or (11 <= LA31_1 <= 12) or (14 <= LA31_1 <= 38) or (40 <= LA31_1 <= 65535)) : - alt31 = 1 - else: - nvae = NoViableAltException("", 31, 1, self.input) - - raise nvae - - - elif (LA31_0 == 34) : - LA31_2 = self.input.LA(2) - - if (LA31_2 == 34) : - LA31_5 = self.input.LA(3) - - if (LA31_5 == 34) : - alt31 = 4 - else: - alt31 = 2 - - elif ((0 <= LA31_2 <= 9) or (11 <= LA31_2 <= 12) or (14 <= LA31_2 <= 33) or (35 <= LA31_2 <= 65535)) : - alt31 = 2 - else: - nvae = NoViableAltException("", 31, 2, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 31, 0, self.input) - - raise nvae - - - if alt31 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:301:7: '\\'' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* '\\'' - pass - self.match(39) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:301:12: ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* - while True: #loop27 - alt27 = 3 - LA27_0 = self.input.LA(1) - - if (LA27_0 == 92) : - alt27 = 1 - elif ((0 <= LA27_0 <= 9) or (11 <= LA27_0 <= 12) or (14 <= LA27_0 <= 38) or (40 <= LA27_0 <= 91) or (93 <= LA27_0 <= 65535)) : - alt27 = 2 - - - if alt27 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:301:13: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt27 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:301:26: ~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop27 - - - self.match(39) - - - elif alt31 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:302:7: '\"' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* '\"' - pass - self.match(34) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:302:11: ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* - while True: #loop28 - alt28 = 3 - LA28_0 = self.input.LA(1) - - if (LA28_0 == 92) : - alt28 = 1 - elif ((0 <= LA28_0 <= 9) or (11 <= LA28_0 <= 12) or (14 <= LA28_0 <= 33) or (35 <= LA28_0 <= 91) or (93 <= LA28_0 <= 65535)) : - alt28 = 2 - - - if alt28 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:302:12: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt28 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:302:25: ~ ( '\\\\' | '\\r' | '\\n' | '\"' ) - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop28 - - - self.match(34) - - - elif alt31 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:303:7: '\\'\\'\\'' ( STRING_ESC |~ ( '\\\\' ) )* '\\'\\'\\'' - pass - self.match("'''") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:303:16: ( STRING_ESC |~ ( '\\\\' ) )* - while True: #loop29 - alt29 = 3 - LA29_0 = self.input.LA(1) - - if (LA29_0 == 39) : - LA29_1 = self.input.LA(2) - - if (LA29_1 == 39) : - LA29_4 = self.input.LA(3) - - if (LA29_4 == 39) : - LA29_5 = self.input.LA(4) - - if ((0 <= LA29_5 <= 65535)) : - alt29 = 2 - - - elif ((0 <= LA29_4 <= 38) or (40 <= LA29_4 <= 65535)) : - alt29 = 2 - - - elif ((0 <= LA29_1 <= 38) or (40 <= LA29_1 <= 65535)) : - alt29 = 2 - - - elif (LA29_0 == 92) : - alt29 = 1 - elif ((0 <= LA29_0 <= 38) or (40 <= LA29_0 <= 91) or (93 <= LA29_0 <= 65535)) : - alt29 = 2 - - - if alt29 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:303:17: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt29 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:303:30: ~ ( '\\\\' ) - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop29 - - - self.match("'''") - - - - elif alt31 == 4: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:304:7: '\"\"\"' ( STRING_ESC |~ ( '\\\\' ) )* '\"\"\"' - pass - self.match("\"\"\"") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:304:13: ( STRING_ESC |~ ( '\\\\' ) )* - while True: #loop30 - alt30 = 3 - LA30_0 = self.input.LA(1) - - if (LA30_0 == 34) : - LA30_1 = self.input.LA(2) - - if (LA30_1 == 34) : - LA30_4 = self.input.LA(3) - - if (LA30_4 == 34) : - LA30_5 = self.input.LA(4) - - if ((0 <= LA30_5 <= 65535)) : - alt30 = 2 - - - elif ((0 <= LA30_4 <= 33) or (35 <= LA30_4 <= 65535)) : - alt30 = 2 - - - elif ((0 <= LA30_1 <= 33) or (35 <= LA30_1 <= 65535)) : - alt30 = 2 - - - elif (LA30_0 == 92) : - alt30 = 1 - elif ((0 <= LA30_0 <= 33) or (35 <= LA30_0 <= 91) or (93 <= LA30_0 <= 65535)) : - alt30 = 2 - - - if alt30 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:304:14: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt30 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:304:27: ~ ( '\\\\' ) - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop30 - - - self.match("\"\"\"") - - - - - finally: - pass - - # $ANTLR end "SLSTRING" - - - - # $ANTLR start "BYTESTRPREFIX" - def mBYTESTRPREFIX(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:5: ( 'b' | 'B' | 'br' | 'Br' | 'bR' | 'BR' | 'rb' | 'rB' | 'Rb' | 'RB' ) - alt32 = 10 - LA32 = self.input.LA(1) - if LA32 == 98: - LA32 = self.input.LA(2) - if LA32 == 114: - alt32 = 3 - elif LA32 == 82: - alt32 = 5 - else: - alt32 = 1 - - elif LA32 == 66: - LA32 = self.input.LA(2) - if LA32 == 114: - alt32 = 4 - elif LA32 == 82: - alt32 = 6 - else: - alt32 = 2 - - elif LA32 == 114: - LA32_3 = self.input.LA(2) - - if (LA32_3 == 98) : - alt32 = 7 - elif (LA32_3 == 66) : - alt32 = 8 - else: - nvae = NoViableAltException("", 32, 3, self.input) - - raise nvae - - - elif LA32 == 82: - LA32_4 = self.input.LA(2) - - if (LA32_4 == 98) : - alt32 = 9 - elif (LA32_4 == 66) : - alt32 = 10 - else: - nvae = NoViableAltException("", 32, 4, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 32, 0, self.input) - - raise nvae - - - if alt32 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:7: 'b' - pass - self.match(98) - - - elif alt32 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:13: 'B' - pass - self.match(66) - - - elif alt32 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:19: 'br' - pass - self.match("br") - - - - elif alt32 == 4: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:26: 'Br' - pass - self.match("Br") - - - - elif alt32 == 5: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:33: 'bR' - pass - self.match("bR") - - - - elif alt32 == 6: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:40: 'BR' - pass - self.match("BR") - - - - elif alt32 == 7: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:47: 'rb' - pass - self.match("rb") - - - - elif alt32 == 8: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:54: 'rB' - pass - self.match("rB") - - - - elif alt32 == 9: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:61: 'Rb' - pass - self.match("Rb") - - - - elif alt32 == 10: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:315:68: 'RB' - pass - self.match("RB") - - - - - finally: - pass - - # $ANTLR end "BYTESTRPREFIX" - - - - # $ANTLR start "SLBYTESTRING" - def mSLBYTESTRING(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:320:5: ( '\\'' ( BYTES_CHAR_SQ | BYTES_ESC )* '\\'' | '\"' ( BYTES_CHAR_DQ | BYTES_ESC )* '\"' | '\\'\\'\\'' ( BYTES_CHAR_SQ | BYTES_TESC )* '\\'\\'\\'' | '\"\"\"' ( BYTES_CHAR_DQ | BYTES_TESC )* '\"\"\"' ) - alt37 = 4 - LA37_0 = self.input.LA(1) - - if (LA37_0 == 39) : - LA37_1 = self.input.LA(2) - - if (LA37_1 == 39) : - LA37_3 = self.input.LA(3) - - if (LA37_3 == 39) : - alt37 = 3 - else: - alt37 = 1 - - elif ((0 <= LA37_1 <= 9) or (11 <= LA37_1 <= 12) or (14 <= LA37_1 <= 38) or (40 <= LA37_1 <= 127)) : - alt37 = 1 - else: - nvae = NoViableAltException("", 37, 1, self.input) - - raise nvae - - - elif (LA37_0 == 34) : - LA37_2 = self.input.LA(2) - - if (LA37_2 == 34) : - LA37_5 = self.input.LA(3) - - if (LA37_5 == 34) : - alt37 = 4 - else: - alt37 = 2 - - elif ((0 <= LA37_2 <= 9) or (11 <= LA37_2 <= 12) or (14 <= LA37_2 <= 33) or (35 <= LA37_2 <= 127)) : - alt37 = 2 - else: - nvae = NoViableAltException("", 37, 2, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 37, 0, self.input) - - raise nvae - - - if alt37 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:320:7: '\\'' ( BYTES_CHAR_SQ | BYTES_ESC )* '\\'' - pass - self.match(39) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:320:12: ( BYTES_CHAR_SQ | BYTES_ESC )* - while True: #loop33 - alt33 = 3 - LA33_0 = self.input.LA(1) - - if ((0 <= LA33_0 <= 9) or (11 <= LA33_0 <= 12) or (14 <= LA33_0 <= 38) or (40 <= LA33_0 <= 91) or (93 <= LA33_0 <= 127)) : - alt33 = 1 - elif (LA33_0 == 92) : - alt33 = 2 - - - if alt33 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:320:13: BYTES_CHAR_SQ - pass - self.mBYTES_CHAR_SQ() - - - - elif alt33 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:320:29: BYTES_ESC - pass - self.mBYTES_ESC() - - - - else: - break #loop33 - - - self.match(39) - - - elif alt37 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:321:7: '\"' ( BYTES_CHAR_DQ | BYTES_ESC )* '\"' - pass - self.match(34) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:321:11: ( BYTES_CHAR_DQ | BYTES_ESC )* - while True: #loop34 - alt34 = 3 - LA34_0 = self.input.LA(1) - - if ((0 <= LA34_0 <= 9) or (11 <= LA34_0 <= 12) or (14 <= LA34_0 <= 33) or (35 <= LA34_0 <= 91) or (93 <= LA34_0 <= 127)) : - alt34 = 1 - elif (LA34_0 == 92) : - alt34 = 2 - - - if alt34 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:321:12: BYTES_CHAR_DQ - pass - self.mBYTES_CHAR_DQ() - - - - elif alt34 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:321:28: BYTES_ESC - pass - self.mBYTES_ESC() - - - - else: - break #loop34 - - - self.match(34) - - - elif alt37 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:322:7: '\\'\\'\\'' ( BYTES_CHAR_SQ | BYTES_TESC )* '\\'\\'\\'' - pass - self.match("'''") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:322:16: ( BYTES_CHAR_SQ | BYTES_TESC )* - while True: #loop35 - alt35 = 2 - LA35_0 = self.input.LA(1) - - if (LA35_0 == 39) : - LA35_1 = self.input.LA(2) - - if (LA35_1 == 39) : - LA35_3 = self.input.LA(3) - - if (LA35_3 == 39) : - LA35_4 = self.input.LA(4) - - if ((0 <= LA35_4 <= 91) or (93 <= LA35_4 <= 127)) : - alt35 = 1 - - - elif ((0 <= LA35_3 <= 38) or (40 <= LA35_3 <= 91) or (93 <= LA35_3 <= 127)) : - alt35 = 1 - - - elif ((0 <= LA35_1 <= 38) or (40 <= LA35_1 <= 91) or (93 <= LA35_1 <= 127)) : - alt35 = 1 - - - elif ((0 <= LA35_0 <= 38) or (40 <= LA35_0 <= 91) or (93 <= LA35_0 <= 127)) : - alt35 = 1 - - - if alt35 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (14 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop35 - - - self.match("'''") - - - - elif alt37 == 4: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:323:7: '\"\"\"' ( BYTES_CHAR_DQ | BYTES_TESC )* '\"\"\"' - pass - self.match("\"\"\"") - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:323:13: ( BYTES_CHAR_DQ | BYTES_TESC )* - while True: #loop36 - alt36 = 2 - LA36_0 = self.input.LA(1) - - if (LA36_0 == 34) : - LA36_1 = self.input.LA(2) - - if (LA36_1 == 34) : - LA36_3 = self.input.LA(3) - - if (LA36_3 == 34) : - LA36_4 = self.input.LA(4) - - if ((0 <= LA36_4 <= 91) or (93 <= LA36_4 <= 127)) : - alt36 = 1 - - - elif ((0 <= LA36_3 <= 33) or (35 <= LA36_3 <= 91) or (93 <= LA36_3 <= 127)) : - alt36 = 1 - - - elif ((0 <= LA36_1 <= 33) or (35 <= LA36_1 <= 91) or (93 <= LA36_1 <= 127)) : - alt36 = 1 - - - elif ((0 <= LA36_0 <= 33) or (35 <= LA36_0 <= 91) or (93 <= LA36_0 <= 127)) : - alt36 = 1 - - - if alt36 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (14 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop36 - - - self.match("\"\"\"") - - - - - finally: - pass - - # $ANTLR end "SLBYTESTRING" - - - - # $ANTLR start "BYTES_CHAR_SQ" - def mBYTES_CHAR_SQ(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:328:5: ( '\\u0000' .. '\\u0009' | '\\u000B' .. '\\u000C' | '\\u000E' .. '\\u0026' | '\\u0028' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_CHAR_SQ" - - - - # $ANTLR start "BYTES_CHAR_DQ" - def mBYTES_CHAR_DQ(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:337:5: ( '\\u0000' .. '\\u0009' | '\\u000B' .. '\\u000C' | '\\u000E' .. '\\u0021' | '\\u0023' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_CHAR_DQ" - - - - # $ANTLR start "BYTES_ESC" - def mBYTES_ESC(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:346:5: ( '\\\\' '\\u0000' .. '\\u007F' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:346:7: '\\\\' '\\u0000' .. '\\u007F' - pass - self.match(92) - - self.matchRange(0, 127) - - - - - finally: - pass - - # $ANTLR end "BYTES_ESC" - - - - # $ANTLR start "BYTES_TESC" - def mBYTES_TESC(self, ): - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:352:5: ( '\\u0000' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127): - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_TESC" - - - - def mTokens(self): - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:8: ( COLONMINUS | COMMA | LBRACKET | LPAREN | RBRACKET | RPAREN | T__53 | T__54 | T__55 | T__56 | T__57 | T__58 | NEGATION | EQUAL | SIGN | INT | FLOAT | STRING | ID | COMMENT | WS ) - alt38 = 21 - alt38 = self.dfa38.predict(self.input) - if alt38 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:10: COLONMINUS - pass - self.mCOLONMINUS() - - - - elif alt38 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:21: COMMA - pass - self.mCOMMA() - - - - elif alt38 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:27: LBRACKET - pass - self.mLBRACKET() - - - - elif alt38 == 4: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:36: LPAREN - pass - self.mLPAREN() - - - - elif alt38 == 5: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:43: RBRACKET - pass - self.mRBRACKET() - - - - elif alt38 == 6: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:52: RPAREN - pass - self.mRPAREN() - - - - elif alt38 == 7: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:59: T__53 - pass - self.mT__53() - - - - elif alt38 == 8: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:65: T__54 - pass - self.mT__54() - - - - elif alt38 == 9: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:71: T__55 - pass - self.mT__55() - - - - elif alt38 == 10: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:77: T__56 - pass - self.mT__56() - - - - elif alt38 == 11: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:83: T__57 - pass - self.mT__57() - - - - elif alt38 == 12: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:89: T__58 - pass - self.mT__58() - - - - elif alt38 == 13: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:95: NEGATION - pass - self.mNEGATION() - - - - elif alt38 == 14: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:104: EQUAL - pass - self.mEQUAL() - - - - elif alt38 == 15: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:110: SIGN - pass - self.mSIGN() - - - - elif alt38 == 16: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:115: INT - pass - self.mINT() - - - - elif alt38 == 17: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:119: FLOAT - pass - self.mFLOAT() - - - - elif alt38 == 18: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:125: STRING - pass - self.mSTRING() - - - - elif alt38 == 19: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:132: ID - pass - self.mID() - - - - elif alt38 == 20: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:135: COMMENT - pass - self.mCOMMENT() - - - - elif alt38 == 21: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:1:143: WS - pass - self.mWS() - - - - - - - - - # lookup tables for DFA #8 - - DFA8_eot = DFA.unpack( - u"\3\uffff\1\6\1\uffff\1\6\1\uffff" - ) - - DFA8_eof = DFA.unpack( - u"\7\uffff" - ) - - DFA8_min = DFA.unpack( - u"\2\56\2\60\1\uffff\1\60\1\uffff" - ) - - DFA8_max = DFA.unpack( - u"\1\71\1\145\1\71\1\145\1\uffff\1\145\1\uffff" - ) - - DFA8_accept = DFA.unpack( - u"\4\uffff\1\2\1\uffff\1\1" - ) - - DFA8_special = DFA.unpack( - u"\7\uffff" - ) - - - DFA8_transition = [ - DFA.unpack(u"\1\2\1\uffff\12\1"), - DFA.unpack(u"\1\3\1\uffff\12\1\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack(u"\12\5"), - DFA.unpack(u"\12\5\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack(u""), - DFA.unpack(u"\12\5\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack(u"") - ] - - # class definition for DFA #8 - - class DFA8(DFA): - pass - - - # lookup tables for DFA #23 - - DFA23_eot = DFA.unpack( - u"\3\uffff\1\4\1\uffff" - ) - - DFA23_eof = DFA.unpack( - u"\5\uffff" - ) - - DFA23_min = DFA.unpack( - u"\2\56\1\uffff\1\60\1\uffff" - ) - - DFA23_max = DFA.unpack( - u"\2\71\1\uffff\1\71\1\uffff" - ) - - DFA23_accept = DFA.unpack( - u"\2\uffff\1\1\1\uffff\1\2" - ) - - DFA23_special = DFA.unpack( - u"\5\uffff" - ) - - - DFA23_transition = [ - DFA.unpack(u"\1\2\1\uffff\12\1"), - DFA.unpack(u"\1\3\1\uffff\12\1"), - DFA.unpack(u""), - DFA.unpack(u"\12\2"), - DFA.unpack(u"") - ] - - # class definition for DFA #23 - - class DFA23(DFA): - pass - - - # lookup tables for DFA #24 - - DFA24_eot = DFA.unpack( - u"\4\uffff" - ) - - DFA24_eof = DFA.unpack( - u"\4\uffff" - ) - - DFA24_min = DFA.unpack( - u"\2\56\2\uffff" - ) - - DFA24_max = DFA.unpack( - u"\1\71\1\145\2\uffff" - ) - - DFA24_accept = DFA.unpack( - u"\2\uffff\1\2\1\1" - ) - - DFA24_special = DFA.unpack( - u"\4\uffff" - ) - - - DFA24_transition = [ - DFA.unpack(u"\1\2\1\uffff\12\1"), - DFA.unpack(u"\1\2\1\uffff\12\1\13\uffff\1\3\37\uffff\1\3"), - DFA.unpack(u""), - DFA.unpack(u"") - ] - - # class definition for DFA #24 - - class DFA24(DFA): - pass - - - # lookup tables for DFA #38 - - DFA38_eot = DFA.unpack( - u"\1\uffff\1\35\5\uffff\1\36\1\uffff\5\31\3\uffff\2\46\1\31\1\uffff" - u"\4\31\6\uffff\1\47\5\31\1\46\2\uffff\1\46\14\31\2\16\1\47\6\31" - u"\1\101\1\31\1\103\1\uffff\1\104\2\uffff" - ) - - DFA38_eof = DFA.unpack( - u"\105\uffff" - ) - - DFA38_min = DFA.unpack( - u"\1\11\1\55\5\uffff\1\56\1\uffff\1\145\1\170\1\156\1\157\1\117\3" - u"\uffff\2\56\1\42\1\uffff\4\42\6\uffff\1\56\1\154\1\145\1\163\1" - u"\164\1\124\1\56\2\uffff\1\56\10\42\1\53\1\145\1\143\1\145\3\56" - u"\1\164\1\165\1\162\1\145\2\164\1\56\1\145\1\56\1\uffff\1\56\2\uffff" - ) - - DFA38_max = DFA.unpack( - u"\1\172\1\55\5\uffff\1\172\1\uffff\1\145\1\170\1\156\1\157\1\117" - u"\3\uffff\2\145\1\142\1\uffff\2\162\1\142\1\47\6\uffff\1\172\1\154" - u"\1\145\1\163\1\164\1\124\1\145\2\uffff\1\145\10\47\1\71\1\145\1" - u"\143\1\145\3\172\1\164\1\165\1\162\1\145\2\164\1\172\1\145\1\172" - u"\1\uffff\1\172\2\uffff" - ) - - DFA38_accept = DFA.unpack( - u"\2\uffff\1\2\1\3\1\4\1\5\1\6\1\uffff\1\11\5\uffff\1\15\1\16\1\17" - u"\3\uffff\1\22\4\uffff\1\23\1\24\1\25\1\1\1\10\1\7\7\uffff\1\20" - u"\1\21\31\uffff\1\12\1\uffff\1\14\1\13" - ) - - DFA38_special = DFA.unpack( - u"\105\uffff" - ) - - - DFA38_transition = [ - DFA.unpack(u"\2\33\2\uffff\1\33\22\uffff\1\33\1\16\1\24\1\32\3\uffff" - u"\1\24\1\4\1\6\1\uffff\1\20\1\2\1\20\1\7\1\32\1\22\11\21\1\1\1\10" - u"\1\uffff\1\17\3\uffff\1\31\1\26\13\31\1\15\3\31\1\27\2\31\1\30" - u"\5\31\1\3\1\uffff\1\5\1\uffff\1\31\1\uffff\1\31\1\25\1\31\1\11" - u"\1\12\3\31\1\13\4\31\1\14\3\31\1\23\2\31\1\30\5\31"), - DFA.unpack(u"\1\34"), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u"\1\31\1\uffff\12\37\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u""), - DFA.unpack(u"\1\40"), - DFA.unpack(u"\1\41"), - DFA.unpack(u"\1\42"), - DFA.unpack(u"\1\43"), - DFA.unpack(u"\1\44"), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u"\1\47\1\uffff\12\45\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack(u"\1\47\1\uffff\1\50\11\47\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack(u"\1\24\4\uffff\1\24\32\uffff\1\52\37\uffff\1\51"), - DFA.unpack(u""), - DFA.unpack(u"\1\24\4\uffff\1\24\52\uffff\1\54\37\uffff\1\53"), - DFA.unpack(u"\1\24\4\uffff\1\24\52\uffff\1\56\37\uffff\1\55"), - DFA.unpack(u"\1\24\4\uffff\1\24\32\uffff\1\60\37\uffff\1\57"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u"\1\31\1\uffff\12\37\7\uffff\4\31\1\61\25\31\4\uffff" - u"\1\31\1\uffff\4\31\1\61\25\31"), - DFA.unpack(u"\1\62"), - DFA.unpack(u"\1\63"), - DFA.unpack(u"\1\64"), - DFA.unpack(u"\1\65"), - DFA.unpack(u"\1\66"), - DFA.unpack(u"\1\47\1\uffff\12\45\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack(u""), - DFA.unpack(u""), - DFA.unpack(u"\1\47\1\uffff\1\50\11\47\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\24\4\uffff\1\24"), - DFA.unpack(u"\1\47\1\uffff\1\47\2\uffff\12\67"), - DFA.unpack(u"\1\70"), - DFA.unpack(u"\1\71"), - DFA.unpack(u"\1\72"), - DFA.unpack(u"\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u"\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u"\1\31\1\uffff\12\67\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u"\1\73"), - DFA.unpack(u"\1\74"), - DFA.unpack(u"\1\75"), - DFA.unpack(u"\1\76"), - DFA.unpack(u"\1\77"), - DFA.unpack(u"\1\100"), - DFA.unpack(u"\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u"\1\102"), - DFA.unpack(u"\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u""), - DFA.unpack(u"\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - u"\32\31"), - DFA.unpack(u""), - DFA.unpack(u"") - ] - - # class definition for DFA #38 - - class DFA38(DFA): - pass - - - - - - -def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): - from antlr3.main import LexerMain - main = LexerMain(CongressLexer) - - main.stdin = stdin - main.stdout = stdout - main.stderr = stderr - main.execute(argv) - - - -if __name__ == '__main__': - main(sys.argv) diff --git a/congress/datalog/Python2/CongressParser.py b/congress/datalog/Python2/CongressParser.py deleted file mode 100644 index 57240982..00000000 --- a/congress/datalog/Python2/CongressParser.py +++ /dev/null @@ -1,2710 +0,0 @@ -# $ANTLR 3.5 /Users/tim/opencode/congress/congress/datalog/Congress.g 2015-08-03 09:06:21 - -import sys -from antlr3 import * -from antlr3.compat import set, frozenset - -from antlr3.tree import * - - - - -# for convenience in actions -HIDDEN = BaseRecognizer.HIDDEN - -# token types -EOF=-1 -T__53=53 -T__54=54 -T__55=55 -T__56=56 -T__57=57 -T__58=58 -AND=4 -ATOM=5 -BYTESTRPREFIX=6 -BYTES_CHAR_DQ=7 -BYTES_CHAR_SQ=8 -BYTES_ESC=9 -BYTES_TESC=10 -COLONMINUS=11 -COLUMN_NAME=12 -COLUMN_NUMBER=13 -COMMA=14 -COMMENT=15 -DIGIT=16 -EQUAL=17 -EVENT=18 -EXPONENT=19 -FLOAT=20 -FLOAT_EXP=21 -FLOAT_NO_EXP=22 -FLOAT_OBJ=23 -FRAC_PART=24 -HEX_DIGIT=25 -ID=26 -INT=27 -INTEGER_OBJ=28 -INT_PART=29 -LBRACKET=30 -LITERAL=31 -LPAREN=32 -MODAL=33 -NAMED_PARAM=34 -NEGATION=35 -NOT=36 -PROG=37 -RBRACKET=38 -RPAREN=39 -RULE=40 -SIGN=41 -SLBYTESTRING=42 -SLSTRING=43 -STRING=44 -STRING_ESC=45 -STRING_OBJ=46 -STRPREFIX=47 -STRUCTURED_NAME=48 -SYMBOL_OBJ=49 -THEORY=50 -VARIABLE=51 -WS=52 - -# token names -tokenNames = [ - "", "", "", "", - "AND", "ATOM", "BYTESTRPREFIX", "BYTES_CHAR_DQ", "BYTES_CHAR_SQ", "BYTES_ESC", - "BYTES_TESC", "COLONMINUS", "COLUMN_NAME", "COLUMN_NUMBER", "COMMA", - "COMMENT", "DIGIT", "EQUAL", "EVENT", "EXPONENT", "FLOAT", "FLOAT_EXP", - "FLOAT_NO_EXP", "FLOAT_OBJ", "FRAC_PART", "HEX_DIGIT", "ID", "INT", - "INTEGER_OBJ", "INT_PART", "LBRACKET", "LITERAL", "LPAREN", "MODAL", - "NAMED_PARAM", "NEGATION", "NOT", "PROG", "RBRACKET", "RPAREN", "RULE", - "SIGN", "SLBYTESTRING", "SLSTRING", "STRING", "STRING_ESC", "STRING_OBJ", - "STRPREFIX", "STRUCTURED_NAME", "SYMBOL_OBJ", "THEORY", "VARIABLE", - "WS", "'.'", "':'", "';'", "'delete'", "'execute'", "'insert'" -] - - - - -class CongressParser(Parser): - grammarFileName = "/Users/tim/opencode/congress/congress/datalog/Congress.g" - api_version = 1 - tokenNames = tokenNames - - def __init__(self, input, state=None, *args, **kwargs): - if state is None: - state = RecognizerSharedState() - - super(CongressParser, self).__init__(input, state, *args, **kwargs) - - self.dfa5 = self.DFA5( - self, 5, - eot = self.DFA5_eot, - eof = self.DFA5_eof, - min = self.DFA5_min, - max = self.DFA5_max, - accept = self.DFA5_accept, - special = self.DFA5_special, - transition = self.DFA5_transition - ) - - - - - self.delegates = [] - - self._adaptor = None - self.adaptor = CommonTreeAdaptor() - - - - def getTreeAdaptor(self): - return self._adaptor - - def setTreeAdaptor(self, adaptor): - self._adaptor = adaptor - - adaptor = property(getTreeAdaptor, setTreeAdaptor) - - - class prog_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.prog_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "prog" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:58:1: prog : ( ( statement )+ EOF -> ^( THEORY ( statement )+ ) | EOF ); - def prog(self, ): - retval = self.prog_return() - retval.start = self.input.LT(1) - - - root_0 = None - - EOF2 = None - EOF3 = None - statement1 = None - - EOF2_tree = None - EOF3_tree = None - stream_EOF = RewriteRuleTokenStream(self._adaptor, "token EOF") - stream_statement = RewriteRuleSubtreeStream(self._adaptor, "rule statement") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:5: ( ( statement )+ EOF -> ^( THEORY ( statement )+ ) | EOF ) - alt2 = 2 - LA2_0 = self.input.LA(1) - - if (LA2_0 == COMMENT or LA2_0 == ID or LA2_0 == NEGATION or (56 <= LA2_0 <= 58)) : - alt2 = 1 - elif (LA2_0 == EOF) : - alt2 = 2 - else: - nvae = NoViableAltException("", 2, 0, self.input) - - raise nvae - - - if alt2 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:7: ( statement )+ EOF - pass - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:7: ( statement )+ - cnt1 = 0 - while True: #loop1 - alt1 = 2 - LA1_0 = self.input.LA(1) - - if (LA1_0 == COMMENT or LA1_0 == ID or LA1_0 == NEGATION or (56 <= LA1_0 <= 58)) : - alt1 = 1 - - - if alt1 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:7: statement - pass - self._state.following.append(self.FOLLOW_statement_in_prog265) - statement1 = self.statement() - - self._state.following.pop() - stream_statement.add(statement1.tree) - - - - else: - if cnt1 >= 1: - break #loop1 - - eee = EarlyExitException(1, self.input) - raise eee - - cnt1 += 1 - - - EOF2 = self.match(self.input, EOF, self.FOLLOW_EOF_in_prog268) - stream_EOF.add(EOF2) - - - # AST Rewrite - # elements: statement - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 59:22: -> ^( THEORY ( statement )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:25: ^( THEORY ( statement )+ ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(THEORY, "THEORY") - , root_1) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:59:34: ( statement )+ - if not (stream_statement.hasNext()): - raise RewriteEarlyExitException() - - while stream_statement.hasNext(): - self._adaptor.addChild(root_1, stream_statement.nextTree()) - - - stream_statement.reset() - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt2 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:60:7: EOF - pass - root_0 = self._adaptor.nil() - - - EOF3 = self.match(self.input, EOF, self.FOLLOW_EOF_in_prog285) - EOF3_tree = self._adaptor.createWithPayload(EOF3) - self._adaptor.addChild(root_0, EOF3_tree) - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "prog" - - - class statement_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.statement_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "statement" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:65:1: statement : ( formula ( formula_terminator )? -> formula | COMMENT ); - def statement(self, ): - retval = self.statement_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMENT6 = None - formula4 = None - formula_terminator5 = None - - COMMENT6_tree = None - stream_formula_terminator = RewriteRuleSubtreeStream(self._adaptor, "rule formula_terminator") - stream_formula = RewriteRuleSubtreeStream(self._adaptor, "rule formula") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:66:5: ( formula ( formula_terminator )? -> formula | COMMENT ) - alt4 = 2 - LA4_0 = self.input.LA(1) - - if (LA4_0 == ID or LA4_0 == NEGATION or (56 <= LA4_0 <= 58)) : - alt4 = 1 - elif (LA4_0 == COMMENT) : - alt4 = 2 - else: - nvae = NoViableAltException("", 4, 0, self.input) - - raise nvae - - - if alt4 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:66:7: formula ( formula_terminator )? - pass - self._state.following.append(self.FOLLOW_formula_in_statement304) - formula4 = self.formula() - - self._state.following.pop() - stream_formula.add(formula4.tree) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:66:15: ( formula_terminator )? - alt3 = 2 - LA3_0 = self.input.LA(1) - - if (LA3_0 == 53 or LA3_0 == 55) : - alt3 = 1 - if alt3 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:66:15: formula_terminator - pass - self._state.following.append(self.FOLLOW_formula_terminator_in_statement306) - formula_terminator5 = self.formula_terminator() - - self._state.following.pop() - stream_formula_terminator.add(formula_terminator5.tree) - - - - - - # AST Rewrite - # elements: formula - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 66:35: -> formula - self._adaptor.addChild(root_0, stream_formula.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt4 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:67:7: COMMENT - pass - root_0 = self._adaptor.nil() - - - COMMENT6 = self.match(self.input, COMMENT, self.FOLLOW_COMMENT_in_statement319) - COMMENT6_tree = self._adaptor.createWithPayload(COMMENT6) - self._adaptor.addChild(root_0, COMMENT6_tree) - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "statement" - - - class formula_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.formula_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "formula" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:70:1: formula : ( rule | fact | event ); - def formula(self, ): - retval = self.formula_return() - retval.start = self.input.LT(1) - - - root_0 = None - - rule7 = None - fact8 = None - event9 = None - - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:71:5: ( rule | fact | event ) - alt5 = 3 - alt5 = self.dfa5.predict(self.input) - if alt5 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:71:7: rule - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_rule_in_formula336) - rule7 = self.rule() - - self._state.following.pop() - self._adaptor.addChild(root_0, rule7.tree) - - - - elif alt5 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:72:7: fact - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_fact_in_formula344) - fact8 = self.fact() - - self._state.following.pop() - self._adaptor.addChild(root_0, fact8.tree) - - - - elif alt5 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:73:7: event - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_event_in_formula352) - event9 = self.event() - - self._state.following.pop() - self._adaptor.addChild(root_0, event9.tree) - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "formula" - - - class event_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.event_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "event" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:86:1: event : event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET -> ^( EVENT event_op rule ( STRING )? ) ; - def event(self, ): - retval = self.event_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LBRACKET11 = None - STRING14 = None - RBRACKET15 = None - event_op10 = None - rule12 = None - formula_terminator13 = None - - LBRACKET11_tree = None - STRING14_tree = None - RBRACKET15_tree = None - stream_LBRACKET = RewriteRuleTokenStream(self._adaptor, "token LBRACKET") - stream_STRING = RewriteRuleTokenStream(self._adaptor, "token STRING") - stream_RBRACKET = RewriteRuleTokenStream(self._adaptor, "token RBRACKET") - stream_event_op = RewriteRuleSubtreeStream(self._adaptor, "rule event_op") - stream_formula_terminator = RewriteRuleSubtreeStream(self._adaptor, "rule formula_terminator") - stream_rule = RewriteRuleSubtreeStream(self._adaptor, "rule rule") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:5: ( event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET -> ^( EVENT event_op rule ( STRING )? ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:7: event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET - pass - self._state.following.append(self.FOLLOW_event_op_in_event379) - event_op10 = self.event_op() - - self._state.following.pop() - stream_event_op.add(event_op10.tree) - - - LBRACKET11 = self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_event381) - stream_LBRACKET.add(LBRACKET11) - - - self._state.following.append(self.FOLLOW_rule_in_event383) - rule12 = self.rule() - - self._state.following.pop() - stream_rule.add(rule12.tree) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:30: ( formula_terminator STRING )? - alt6 = 2 - LA6_0 = self.input.LA(1) - - if (LA6_0 == 53 or LA6_0 == 55) : - alt6 = 1 - if alt6 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:31: formula_terminator STRING - pass - self._state.following.append(self.FOLLOW_formula_terminator_in_event386) - formula_terminator13 = self.formula_terminator() - - self._state.following.pop() - stream_formula_terminator.add(formula_terminator13.tree) - - - STRING14 = self.match(self.input, STRING, self.FOLLOW_STRING_in_event388) - stream_STRING.add(STRING14) - - - - - - RBRACKET15 = self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_event392) - stream_RBRACKET.add(RBRACKET15) - - - # AST Rewrite - # elements: rule, STRING, event_op - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 87:68: -> ^( EVENT event_op rule ( STRING )? ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:71: ^( EVENT event_op rule ( STRING )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(EVENT, "EVENT") - , root_1) - - self._adaptor.addChild(root_1, stream_event_op.nextTree()) - - self._adaptor.addChild(root_1, stream_rule.nextTree()) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:87:93: ( STRING )? - if stream_STRING.hasNext(): - self._adaptor.addChild(root_1, - stream_STRING.nextNode() - ) - - - stream_STRING.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "event" - - - class event_op_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.event_op_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "event_op" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:90:1: event_op : ( 'insert' | 'delete' ); - def event_op(self, ): - retval = self.event_op_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set16 = None - - set16_tree = None - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:91:5: ( 'insert' | 'delete' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - root_0 = self._adaptor.nil() - - - set16 = self.input.LT(1) - - if self.input.LA(1) == 56 or self.input.LA(1) == 58: - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set16)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "event_op" - - - class formula_terminator_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.formula_terminator_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "formula_terminator" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:95:1: formula_terminator : ( ';' | '.' ); - def formula_terminator(self, ): - retval = self.formula_terminator_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set17 = None - - set17_tree = None - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:96:5: ( ';' | '.' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - root_0 = self._adaptor.nil() - - - set17 = self.input.LT(1) - - if self.input.LA(1) == 53 or self.input.LA(1) == 55: - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set17)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "formula_terminator" - - - class rule_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.rule_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "rule" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:100:1: rule : literal_list COLONMINUS literal_list -> ^( RULE literal_list literal_list ) ; - def rule(self, ): - retval = self.rule_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COLONMINUS19 = None - literal_list18 = None - literal_list20 = None - - COLONMINUS19_tree = None - stream_COLONMINUS = RewriteRuleTokenStream(self._adaptor, "token COLONMINUS") - stream_literal_list = RewriteRuleSubtreeStream(self._adaptor, "rule literal_list") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:101:5: ( literal_list COLONMINUS literal_list -> ^( RULE literal_list literal_list ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:101:7: literal_list COLONMINUS literal_list - pass - self._state.following.append(self.FOLLOW_literal_list_in_rule472) - literal_list18 = self.literal_list() - - self._state.following.pop() - stream_literal_list.add(literal_list18.tree) - - - COLONMINUS19 = self.match(self.input, COLONMINUS, self.FOLLOW_COLONMINUS_in_rule474) - stream_COLONMINUS.add(COLONMINUS19) - - - self._state.following.append(self.FOLLOW_literal_list_in_rule476) - literal_list20 = self.literal_list() - - self._state.following.pop() - stream_literal_list.add(literal_list20.tree) - - - # AST Rewrite - # elements: literal_list, literal_list - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 101:44: -> ^( RULE literal_list literal_list ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:101:47: ^( RULE literal_list literal_list ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(RULE, "RULE") - , root_1) - - self._adaptor.addChild(root_1, stream_literal_list.nextTree()) - - self._adaptor.addChild(root_1, stream_literal_list.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "rule" - - - class literal_list_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.literal_list_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "literal_list" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:104:1: literal_list : literal ( COMMA literal )* -> ^( AND ( literal )+ ) ; - def literal_list(self, ): - retval = self.literal_list_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMA22 = None - literal21 = None - literal23 = None - - COMMA22_tree = None - stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA") - stream_literal = RewriteRuleSubtreeStream(self._adaptor, "rule literal") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:5: ( literal ( COMMA literal )* -> ^( AND ( literal )+ ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:7: literal ( COMMA literal )* - pass - self._state.following.append(self.FOLLOW_literal_in_literal_list503) - literal21 = self.literal() - - self._state.following.pop() - stream_literal.add(literal21.tree) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:15: ( COMMA literal )* - while True: #loop7 - alt7 = 2 - LA7_0 = self.input.LA(1) - - if (LA7_0 == COMMA) : - alt7 = 1 - - - if alt7 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:16: COMMA literal - pass - COMMA22 = self.match(self.input, COMMA, self.FOLLOW_COMMA_in_literal_list506) - stream_COMMA.add(COMMA22) - - - self._state.following.append(self.FOLLOW_literal_in_literal_list508) - literal23 = self.literal() - - self._state.following.pop() - stream_literal.add(literal23.tree) - - - - else: - break #loop7 - - - # AST Rewrite - # elements: literal - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 105:32: -> ^( AND ( literal )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:35: ^( AND ( literal )+ ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(AND, "AND") - , root_1) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:105:41: ( literal )+ - if not (stream_literal.hasNext()): - raise RewriteEarlyExitException() - - while stream_literal.hasNext(): - self._adaptor.addChild(root_1, stream_literal.nextTree()) - - - stream_literal.reset() - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "literal_list" - - - class literal_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.literal_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "literal" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:108:1: literal : ( fact -> fact | NEGATION fact -> ^( NOT fact ) ); - def literal(self, ): - retval = self.literal_return() - retval.start = self.input.LT(1) - - - root_0 = None - - NEGATION25 = None - fact24 = None - fact26 = None - - NEGATION25_tree = None - stream_NEGATION = RewriteRuleTokenStream(self._adaptor, "token NEGATION") - stream_fact = RewriteRuleSubtreeStream(self._adaptor, "rule fact") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:109:5: ( fact -> fact | NEGATION fact -> ^( NOT fact ) ) - alt8 = 2 - LA8_0 = self.input.LA(1) - - if (LA8_0 == ID or (56 <= LA8_0 <= 58)) : - alt8 = 1 - elif (LA8_0 == NEGATION) : - alt8 = 2 - else: - nvae = NoViableAltException("", 8, 0, self.input) - - raise nvae - - - if alt8 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:109:7: fact - pass - self._state.following.append(self.FOLLOW_fact_in_literal536) - fact24 = self.fact() - - self._state.following.pop() - stream_fact.add(fact24.tree) - - - # AST Rewrite - # elements: fact - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 109:23: -> fact - self._adaptor.addChild(root_0, stream_fact.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt8 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:110:7: NEGATION fact - pass - NEGATION25 = self.match(self.input, NEGATION, self.FOLLOW_NEGATION_in_literal559) - stream_NEGATION.add(NEGATION25) - - - self._state.following.append(self.FOLLOW_fact_in_literal561) - fact26 = self.fact() - - self._state.following.pop() - stream_fact.add(fact26.tree) - - - # AST Rewrite - # elements: fact - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 110:23: -> ^( NOT fact ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:110:26: ^( NOT fact ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(NOT, "NOT") - , root_1) - - self._adaptor.addChild(root_1, stream_fact.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "literal" - - - class fact_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.fact_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "fact" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:115:1: fact : ( atom | modal_op LBRACKET atom RBRACKET -> ^( MODAL modal_op atom ) ); - def fact(self, ): - retval = self.fact_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LBRACKET29 = None - RBRACKET31 = None - atom27 = None - modal_op28 = None - atom30 = None - - LBRACKET29_tree = None - RBRACKET31_tree = None - stream_LBRACKET = RewriteRuleTokenStream(self._adaptor, "token LBRACKET") - stream_RBRACKET = RewriteRuleTokenStream(self._adaptor, "token RBRACKET") - stream_modal_op = RewriteRuleSubtreeStream(self._adaptor, "rule modal_op") - stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:116:5: ( atom | modal_op LBRACKET atom RBRACKET -> ^( MODAL modal_op atom ) ) - alt9 = 2 - LA9_0 = self.input.LA(1) - - if (LA9_0 == ID) : - alt9 = 1 - elif ((56 <= LA9_0 <= 58)) : - alt9 = 2 - else: - nvae = NoViableAltException("", 9, 0, self.input) - - raise nvae - - - if alt9 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:116:7: atom - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_atom_in_fact590) - atom27 = self.atom() - - self._state.following.pop() - self._adaptor.addChild(root_0, atom27.tree) - - - - elif alt9 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:117:7: modal_op LBRACKET atom RBRACKET - pass - self._state.following.append(self.FOLLOW_modal_op_in_fact598) - modal_op28 = self.modal_op() - - self._state.following.pop() - stream_modal_op.add(modal_op28.tree) - - - LBRACKET29 = self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_fact600) - stream_LBRACKET.add(LBRACKET29) - - - self._state.following.append(self.FOLLOW_atom_in_fact602) - atom30 = self.atom() - - self._state.following.pop() - stream_atom.add(atom30.tree) - - - RBRACKET31 = self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_fact604) - stream_RBRACKET.add(RBRACKET31) - - - # AST Rewrite - # elements: modal_op, atom - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 117:39: -> ^( MODAL modal_op atom ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:117:42: ^( MODAL modal_op atom ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(MODAL, "MODAL") - , root_1) - - self._adaptor.addChild(root_1, stream_modal_op.nextTree()) - - self._adaptor.addChild(root_1, stream_atom.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "fact" - - - class modal_op_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.modal_op_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "modal_op" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:120:1: modal_op : ( 'execute' | 'insert' | 'delete' ); - def modal_op(self, ): - retval = self.modal_op_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set32 = None - - set32_tree = None - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:121:5: ( 'execute' | 'insert' | 'delete' ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g: - pass - root_0 = self._adaptor.nil() - - - set32 = self.input.LT(1) - - if (56 <= self.input.LA(1) <= 58): - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set32)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "modal_op" - - - class atom_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.atom_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "atom" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:126:1: atom : relation_constant ( LPAREN ( parameter_list )? RPAREN )? -> ^( ATOM relation_constant ( parameter_list )? ) ; - def atom(self, ): - retval = self.atom_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LPAREN34 = None - RPAREN36 = None - relation_constant33 = None - parameter_list35 = None - - LPAREN34_tree = None - RPAREN36_tree = None - stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN") - stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN") - stream_relation_constant = RewriteRuleSubtreeStream(self._adaptor, "rule relation_constant") - stream_parameter_list = RewriteRuleSubtreeStream(self._adaptor, "rule parameter_list") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:5: ( relation_constant ( LPAREN ( parameter_list )? RPAREN )? -> ^( ATOM relation_constant ( parameter_list )? ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:7: relation_constant ( LPAREN ( parameter_list )? RPAREN )? - pass - self._state.following.append(self.FOLLOW_relation_constant_in_atom664) - relation_constant33 = self.relation_constant() - - self._state.following.pop() - stream_relation_constant.add(relation_constant33.tree) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:25: ( LPAREN ( parameter_list )? RPAREN )? - alt11 = 2 - LA11_0 = self.input.LA(1) - - if (LA11_0 == LPAREN) : - alt11 = 1 - if alt11 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:26: LPAREN ( parameter_list )? RPAREN - pass - LPAREN34 = self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_atom667) - stream_LPAREN.add(LPAREN34) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:33: ( parameter_list )? - alt10 = 2 - LA10_0 = self.input.LA(1) - - if (LA10_0 == FLOAT or (ID <= LA10_0 <= INT) or LA10_0 == STRING) : - alt10 = 1 - if alt10 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:33: parameter_list - pass - self._state.following.append(self.FOLLOW_parameter_list_in_atom669) - parameter_list35 = self.parameter_list() - - self._state.following.pop() - stream_parameter_list.add(parameter_list35.tree) - - - - - - RPAREN36 = self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_atom672) - stream_RPAREN.add(RPAREN36) - - - - - - # AST Rewrite - # elements: parameter_list, relation_constant - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 127:58: -> ^( ATOM relation_constant ( parameter_list )? ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:61: ^( ATOM relation_constant ( parameter_list )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(ATOM, "ATOM") - , root_1) - - self._adaptor.addChild(root_1, stream_relation_constant.nextTree()) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:127:86: ( parameter_list )? - if stream_parameter_list.hasNext(): - self._adaptor.addChild(root_1, stream_parameter_list.nextTree()) - - - stream_parameter_list.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "atom" - - - class parameter_list_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.parameter_list_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "parameter_list" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:130:1: parameter_list : parameter ( COMMA parameter )* -> ( parameter )+ ; - def parameter_list(self, ): - retval = self.parameter_list_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMA38 = None - parameter37 = None - parameter39 = None - - COMMA38_tree = None - stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA") - stream_parameter = RewriteRuleSubtreeStream(self._adaptor, "rule parameter") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:131:5: ( parameter ( COMMA parameter )* -> ( parameter )+ ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:131:7: parameter ( COMMA parameter )* - pass - self._state.following.append(self.FOLLOW_parameter_in_parameter_list702) - parameter37 = self.parameter() - - self._state.following.pop() - stream_parameter.add(parameter37.tree) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:131:17: ( COMMA parameter )* - while True: #loop12 - alt12 = 2 - LA12_0 = self.input.LA(1) - - if (LA12_0 == COMMA) : - alt12 = 1 - - - if alt12 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:131:18: COMMA parameter - pass - COMMA38 = self.match(self.input, COMMA, self.FOLLOW_COMMA_in_parameter_list705) - stream_COMMA.add(COMMA38) - - - self._state.following.append(self.FOLLOW_parameter_in_parameter_list707) - parameter39 = self.parameter() - - self._state.following.pop() - stream_parameter.add(parameter39.tree) - - - - else: - break #loop12 - - - # AST Rewrite - # elements: parameter - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 131:36: -> ( parameter )+ - # /Users/tim/opencode/congress/congress/datalog/Congress.g:131:39: ( parameter )+ - if not (stream_parameter.hasNext()): - raise RewriteEarlyExitException() - - while stream_parameter.hasNext(): - self._adaptor.addChild(root_0, stream_parameter.nextTree()) - - - stream_parameter.reset() - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "parameter_list" - - - class parameter_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.parameter_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "parameter" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:134:1: parameter : ( term -> term | column_ref EQUAL term -> ^( NAMED_PARAM column_ref term ) ); - def parameter(self, ): - retval = self.parameter_return() - retval.start = self.input.LT(1) - - - root_0 = None - - EQUAL42 = None - term40 = None - column_ref41 = None - term43 = None - - EQUAL42_tree = None - stream_EQUAL = RewriteRuleTokenStream(self._adaptor, "token EQUAL") - stream_term = RewriteRuleSubtreeStream(self._adaptor, "rule term") - stream_column_ref = RewriteRuleSubtreeStream(self._adaptor, "rule column_ref") - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:135:5: ( term -> term | column_ref EQUAL term -> ^( NAMED_PARAM column_ref term ) ) - alt13 = 2 - LA13 = self.input.LA(1) - if LA13 == INT: - LA13_1 = self.input.LA(2) - - if (LA13_1 == COMMA or LA13_1 == RPAREN) : - alt13 = 1 - elif (LA13_1 == EQUAL) : - alt13 = 2 - else: - nvae = NoViableAltException("", 13, 1, self.input) - - raise nvae - - - elif LA13 == FLOAT or LA13 == STRING: - alt13 = 1 - elif LA13 == ID: - LA13_3 = self.input.LA(2) - - if (LA13_3 == COMMA or LA13_3 == RPAREN) : - alt13 = 1 - elif (LA13_3 == EQUAL) : - alt13 = 2 - else: - nvae = NoViableAltException("", 13, 3, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 13, 0, self.input) - - raise nvae - - - if alt13 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:135:7: term - pass - self._state.following.append(self.FOLLOW_term_in_parameter731) - term40 = self.term() - - self._state.following.pop() - stream_term.add(term40.tree) - - - # AST Rewrite - # elements: term - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 135:12: -> term - self._adaptor.addChild(root_0, stream_term.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt13 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:136:7: column_ref EQUAL term - pass - self._state.following.append(self.FOLLOW_column_ref_in_parameter743) - column_ref41 = self.column_ref() - - self._state.following.pop() - stream_column_ref.add(column_ref41.tree) - - - EQUAL42 = self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_parameter745) - stream_EQUAL.add(EQUAL42) - - - self._state.following.append(self.FOLLOW_term_in_parameter747) - term43 = self.term() - - self._state.following.pop() - stream_term.add(term43.tree) - - - # AST Rewrite - # elements: column_ref, term - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 136:29: -> ^( NAMED_PARAM column_ref term ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:136:32: ^( NAMED_PARAM column_ref term ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(NAMED_PARAM, "NAMED_PARAM") - , root_1) - - self._adaptor.addChild(root_1, stream_column_ref.nextTree()) - - self._adaptor.addChild(root_1, stream_term.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "parameter" - - - class column_ref_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.column_ref_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "column_ref" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:139:1: column_ref : ( ID -> ^( COLUMN_NAME ID ) | INT -> ^( COLUMN_NUMBER INT ) ); - def column_ref(self, ): - retval = self.column_ref_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID44 = None - INT45 = None - - ID44_tree = None - INT45_tree = None - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT") - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:140:5: ( ID -> ^( COLUMN_NAME ID ) | INT -> ^( COLUMN_NUMBER INT ) ) - alt14 = 2 - LA14_0 = self.input.LA(1) - - if (LA14_0 == ID) : - alt14 = 1 - elif (LA14_0 == INT) : - alt14 = 2 - else: - nvae = NoViableAltException("", 14, 0, self.input) - - raise nvae - - - if alt14 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:140:7: ID - pass - ID44 = self.match(self.input, ID, self.FOLLOW_ID_in_column_ref774) - stream_ID.add(ID44) - - - # AST Rewrite - # elements: ID - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 140:12: -> ^( COLUMN_NAME ID ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:140:16: ^( COLUMN_NAME ID ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(COLUMN_NAME, "COLUMN_NAME") - , root_1) - - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt14 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:141:7: INT - pass - INT45 = self.match(self.input, INT, self.FOLLOW_INT_in_column_ref793) - stream_INT.add(INT45) - - - # AST Rewrite - # elements: INT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 141:12: -> ^( COLUMN_NUMBER INT ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:141:16: ^( COLUMN_NUMBER INT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(COLUMN_NUMBER, "COLUMN_NUMBER") - , root_1) - - self._adaptor.addChild(root_1, - stream_INT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "column_ref" - - - class term_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.term_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "term" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:144:1: term : ( object_constant | variable ); - def term(self, ): - retval = self.term_return() - retval.start = self.input.LT(1) - - - root_0 = None - - object_constant46 = None - variable47 = None - - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:145:5: ( object_constant | variable ) - alt15 = 2 - LA15_0 = self.input.LA(1) - - if (LA15_0 == FLOAT or LA15_0 == INT or LA15_0 == STRING) : - alt15 = 1 - elif (LA15_0 == ID) : - alt15 = 2 - else: - nvae = NoViableAltException("", 15, 0, self.input) - - raise nvae - - - if alt15 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:145:7: object_constant - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_object_constant_in_term820) - object_constant46 = self.object_constant() - - self._state.following.pop() - self._adaptor.addChild(root_0, object_constant46.tree) - - - - elif alt15 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:146:7: variable - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_variable_in_term828) - variable47 = self.variable() - - self._state.following.pop() - self._adaptor.addChild(root_0, variable47.tree) - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "term" - - - class object_constant_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.object_constant_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "object_constant" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:149:1: object_constant : ( INT -> ^( INTEGER_OBJ INT ) | FLOAT -> ^( FLOAT_OBJ FLOAT ) | STRING -> ^( STRING_OBJ STRING ) ); - def object_constant(self, ): - retval = self.object_constant_return() - retval.start = self.input.LT(1) - - - root_0 = None - - INT48 = None - FLOAT49 = None - STRING50 = None - - INT48_tree = None - FLOAT49_tree = None - STRING50_tree = None - stream_FLOAT = RewriteRuleTokenStream(self._adaptor, "token FLOAT") - stream_STRING = RewriteRuleTokenStream(self._adaptor, "token STRING") - stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT") - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:150:5: ( INT -> ^( INTEGER_OBJ INT ) | FLOAT -> ^( FLOAT_OBJ FLOAT ) | STRING -> ^( STRING_OBJ STRING ) ) - alt16 = 3 - LA16 = self.input.LA(1) - if LA16 == INT: - alt16 = 1 - elif LA16 == FLOAT: - alt16 = 2 - elif LA16 == STRING: - alt16 = 3 - else: - nvae = NoViableAltException("", 16, 0, self.input) - - raise nvae - - - if alt16 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:150:7: INT - pass - INT48 = self.match(self.input, INT, self.FOLLOW_INT_in_object_constant845) - stream_INT.add(INT48) - - - # AST Rewrite - # elements: INT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 150:16: -> ^( INTEGER_OBJ INT ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:150:19: ^( INTEGER_OBJ INT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(INTEGER_OBJ, "INTEGER_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_INT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt16 == 2: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:151:7: FLOAT - pass - FLOAT49 = self.match(self.input, FLOAT, self.FOLLOW_FLOAT_in_object_constant866) - stream_FLOAT.add(FLOAT49) - - - # AST Rewrite - # elements: FLOAT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 151:16: -> ^( FLOAT_OBJ FLOAT ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:151:19: ^( FLOAT_OBJ FLOAT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(FLOAT_OBJ, "FLOAT_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_FLOAT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt16 == 3: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:152:7: STRING - pass - STRING50 = self.match(self.input, STRING, self.FOLLOW_STRING_in_object_constant885) - stream_STRING.add(STRING50) - - - # AST Rewrite - # elements: STRING - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 152:16: -> ^( STRING_OBJ STRING ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:152:19: ^( STRING_OBJ STRING ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(STRING_OBJ, "STRING_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_STRING.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "object_constant" - - - class variable_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.variable_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "variable" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:155:1: variable : ID -> ^( VARIABLE ID ) ; - def variable(self, ): - retval = self.variable_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID51 = None - - ID51_tree = None - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:156:5: ( ID -> ^( VARIABLE ID ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:156:7: ID - pass - ID51 = self.match(self.input, ID, self.FOLLOW_ID_in_variable912) - stream_ID.add(ID51) - - - # AST Rewrite - # elements: ID - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 156:10: -> ^( VARIABLE ID ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:156:13: ^( VARIABLE ID ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(VARIABLE, "VARIABLE") - , root_1) - - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "variable" - - - class relation_constant_return(ParserRuleReturnScope): - def __init__(self): - super(CongressParser.relation_constant_return, self).__init__() - - self.tree = None - - - - - - # $ANTLR start "relation_constant" - # /Users/tim/opencode/congress/congress/datalog/Congress.g:159:1: relation_constant : ID ( ':' ID )* ( SIGN )? -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) ; - def relation_constant(self, ): - retval = self.relation_constant_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID52 = None - char_literal53 = None - ID54 = None - SIGN55 = None - - ID52_tree = None - char_literal53_tree = None - ID54_tree = None - SIGN55_tree = None - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - stream_SIGN = RewriteRuleTokenStream(self._adaptor, "token SIGN") - stream_54 = RewriteRuleTokenStream(self._adaptor, "token 54") - - try: - try: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:5: ( ID ( ':' ID )* ( SIGN )? -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:7: ID ( ':' ID )* ( SIGN )? - pass - ID52 = self.match(self.input, ID, self.FOLLOW_ID_in_relation_constant937) - stream_ID.add(ID52) - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:10: ( ':' ID )* - while True: #loop17 - alt17 = 2 - LA17_0 = self.input.LA(1) - - if (LA17_0 == 54) : - alt17 = 1 - - - if alt17 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:11: ':' ID - pass - char_literal53 = self.match(self.input, 54, self.FOLLOW_54_in_relation_constant940) - stream_54.add(char_literal53) - - - ID54 = self.match(self.input, ID, self.FOLLOW_ID_in_relation_constant942) - stream_ID.add(ID54) - - - - else: - break #loop17 - - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:20: ( SIGN )? - alt18 = 2 - LA18_0 = self.input.LA(1) - - if (LA18_0 == SIGN) : - alt18 = 1 - if alt18 == 1: - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:20: SIGN - pass - SIGN55 = self.match(self.input, SIGN, self.FOLLOW_SIGN_in_relation_constant946) - stream_SIGN.add(SIGN55) - - - - - - # AST Rewrite - # elements: ID, SIGN - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 160:26: -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:29: ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(STRUCTURED_NAME, "STRUCTURED_NAME") - , root_1) - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:47: ( ID )+ - if not (stream_ID.hasNext()): - raise RewriteEarlyExitException() - - while stream_ID.hasNext(): - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - - stream_ID.reset() - - # /Users/tim/opencode/congress/congress/datalog/Congress.g:160:51: ( SIGN )? - if stream_SIGN.hasNext(): - self._adaptor.addChild(root_1, - stream_SIGN.nextNode() - ) - - - stream_SIGN.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException, re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "relation_constant" - - - - # lookup tables for DFA #5 - - DFA5_eot = DFA.unpack( - u"\124\uffff" - ) - - DFA5_eof = DFA.unpack( - u"\1\uffff\1\10\4\uffff\1\10\4\uffff\1\10\4\uffff\1\10\10\uffff\1" - u"\10\72\uffff" - ) - - DFA5_min = DFA.unpack( - u"\1\32\1\13\1\36\1\uffff\1\36\1\32\1\13\1\24\1\uffff\2\32\1\13\4" - u"\16\2\13\1\uffff\1\40\2\24\1\32\1\13\1\24\1\13\1\32\1\40\1\24\10" - u"\16\1\13\4\16\1\13\1\40\4\16\1\46\5\24\24\16\2\24\10\16" - ) - - DFA5_max = DFA.unpack( - u"\2\72\1\36\1\uffff\1\36\1\32\1\72\1\54\1\uffff\1\72\1\32\1\72\4" - u"\47\1\72\1\66\1\uffff\1\66\2\54\1\32\1\46\1\54\1\72\1\32\1\46\1" - u"\54\10\47\1\66\4\47\1\46\1\66\4\47\1\46\5\54\24\47\2\54\10\47" - ) - - DFA5_accept = DFA.unpack( - u"\3\uffff\1\1\4\uffff\1\2\11\uffff\1\3\101\uffff" - ) - - DFA5_special = DFA.unpack( - u"\124\uffff" - ) - - - DFA5_transition = [ - DFA.unpack(u"\1\1\10\uffff\1\3\24\uffff\1\2\1\4\1\2"), - DFA.unpack(u"\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - u"\1\10\5\uffff\1\6\13\uffff\1\10\1\5\4\10"), - DFA.unpack(u"\1\11"), - DFA.unpack(u""), - DFA.unpack(u"\1\12"), - DFA.unpack(u"\1\13"), - DFA.unpack(u"\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - u"\1\10\21\uffff\1\10\1\uffff\4\10"), - DFA.unpack(u"\1\15\5\uffff\1\17\1\14\13\uffff\1\20\4\uffff\1\16"), - DFA.unpack(u""), - DFA.unpack(u"\1\21\10\uffff\1\22\24\uffff\3\22"), - DFA.unpack(u"\1\23"), - DFA.unpack(u"\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - u"\1\10\5\uffff\1\6\13\uffff\1\10\1\5\4\10"), - DFA.unpack(u"\1\24\2\uffff\1\25\25\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\2\uffff\1\25\25\uffff\1\20"), - DFA.unpack(u"\1\3\2\uffff\1\3\1\10\12\uffff\1\10\10\uffff\1\10\21" - u"\uffff\1\10\1\uffff\4\10"), - DFA.unpack(u"\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31\2\uffff" - u"\1\27\14\uffff\1\26"), - DFA.unpack(u""), - DFA.unpack(u"\1\34\5\uffff\1\31\2\uffff\1\33\14\uffff\1\32"), - DFA.unpack(u"\1\36\5\uffff\1\40\1\35\20\uffff\1\37"), - DFA.unpack(u"\1\42\5\uffff\1\44\1\41\20\uffff\1\43"), - DFA.unpack(u"\1\45"), - DFA.unpack(u"\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31"), - DFA.unpack(u"\1\47\5\uffff\1\51\1\46\13\uffff\1\52\4\uffff\1\50"), - DFA.unpack(u"\1\3\2\uffff\1\3\1\10\12\uffff\1\10\10\uffff\1\10\21" - u"\uffff\1\10\1\uffff\4\10"), - DFA.unpack(u"\1\53"), - DFA.unpack(u"\1\34\5\uffff\1\31"), - DFA.unpack(u"\1\55\5\uffff\1\57\1\54\13\uffff\1\60\4\uffff\1\56"), - DFA.unpack(u"\1\24\2\uffff\1\61\25\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\2\uffff\1\61\25\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31\2\uffff" - u"\1\27\14\uffff\1\26"), - DFA.unpack(u"\1\62\2\uffff\1\63\25\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\2\uffff\1\63\25\uffff\1\52"), - DFA.unpack(u"\1\22\2\uffff\1\22\27\uffff\1\31"), - DFA.unpack(u"\1\34\5\uffff\1\31\2\uffff\1\33\14\uffff\1\32"), - DFA.unpack(u"\1\64\2\uffff\1\65\25\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\2\uffff\1\65\25\uffff\1\60"), - DFA.unpack(u"\1\31"), - DFA.unpack(u"\1\67\5\uffff\1\71\1\66\20\uffff\1\70"), - DFA.unpack(u"\1\73\5\uffff\1\75\1\72\20\uffff\1\74"), - DFA.unpack(u"\1\77\5\uffff\1\101\1\76\20\uffff\1\100"), - DFA.unpack(u"\1\103\5\uffff\1\105\1\102\20\uffff\1\104"), - DFA.unpack(u"\1\107\5\uffff\1\111\1\106\20\uffff\1\110"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\24\30\uffff\1\20"), - DFA.unpack(u"\1\62\2\uffff\1\112\25\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\2\uffff\1\112\25\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\64\2\uffff\1\113\25\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\2\uffff\1\113\25\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\115\5\uffff\1\117\1\114\20\uffff\1\116"), - DFA.unpack(u"\1\121\5\uffff\1\123\1\120\20\uffff\1\122"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\62\30\uffff\1\52"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60"), - DFA.unpack(u"\1\64\30\uffff\1\60") - ] - - # class definition for DFA #5 - - class DFA5(DFA): - pass - - - - - FOLLOW_statement_in_prog265 = frozenset([15, 26, 35, 56, 57, 58]) - FOLLOW_EOF_in_prog268 = frozenset([1]) - FOLLOW_EOF_in_prog285 = frozenset([1]) - FOLLOW_formula_in_statement304 = frozenset([1, 53, 55]) - FOLLOW_formula_terminator_in_statement306 = frozenset([1]) - FOLLOW_COMMENT_in_statement319 = frozenset([1]) - FOLLOW_rule_in_formula336 = frozenset([1]) - FOLLOW_fact_in_formula344 = frozenset([1]) - FOLLOW_event_in_formula352 = frozenset([1]) - FOLLOW_event_op_in_event379 = frozenset([30]) - FOLLOW_LBRACKET_in_event381 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_rule_in_event383 = frozenset([38, 53, 55]) - FOLLOW_formula_terminator_in_event386 = frozenset([44]) - FOLLOW_STRING_in_event388 = frozenset([38]) - FOLLOW_RBRACKET_in_event392 = frozenset([1]) - FOLLOW_literal_list_in_rule472 = frozenset([11]) - FOLLOW_COLONMINUS_in_rule474 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_literal_list_in_rule476 = frozenset([1]) - FOLLOW_literal_in_literal_list503 = frozenset([1, 14]) - FOLLOW_COMMA_in_literal_list506 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_literal_in_literal_list508 = frozenset([1, 14]) - FOLLOW_fact_in_literal536 = frozenset([1]) - FOLLOW_NEGATION_in_literal559 = frozenset([26, 56, 57, 58]) - FOLLOW_fact_in_literal561 = frozenset([1]) - FOLLOW_atom_in_fact590 = frozenset([1]) - FOLLOW_modal_op_in_fact598 = frozenset([30]) - FOLLOW_LBRACKET_in_fact600 = frozenset([26]) - FOLLOW_atom_in_fact602 = frozenset([38]) - FOLLOW_RBRACKET_in_fact604 = frozenset([1]) - FOLLOW_relation_constant_in_atom664 = frozenset([1, 32]) - FOLLOW_LPAREN_in_atom667 = frozenset([20, 26, 27, 39, 44]) - FOLLOW_parameter_list_in_atom669 = frozenset([39]) - FOLLOW_RPAREN_in_atom672 = frozenset([1]) - FOLLOW_parameter_in_parameter_list702 = frozenset([1, 14]) - FOLLOW_COMMA_in_parameter_list705 = frozenset([20, 26, 27, 44]) - FOLLOW_parameter_in_parameter_list707 = frozenset([1, 14]) - FOLLOW_term_in_parameter731 = frozenset([1]) - FOLLOW_column_ref_in_parameter743 = frozenset([17]) - FOLLOW_EQUAL_in_parameter745 = frozenset([20, 26, 27, 44]) - FOLLOW_term_in_parameter747 = frozenset([1]) - FOLLOW_ID_in_column_ref774 = frozenset([1]) - FOLLOW_INT_in_column_ref793 = frozenset([1]) - FOLLOW_object_constant_in_term820 = frozenset([1]) - FOLLOW_variable_in_term828 = frozenset([1]) - FOLLOW_INT_in_object_constant845 = frozenset([1]) - FOLLOW_FLOAT_in_object_constant866 = frozenset([1]) - FOLLOW_STRING_in_object_constant885 = frozenset([1]) - FOLLOW_ID_in_variable912 = frozenset([1]) - FOLLOW_ID_in_relation_constant937 = frozenset([1, 41, 54]) - FOLLOW_54_in_relation_constant940 = frozenset([26]) - FOLLOW_ID_in_relation_constant942 = frozenset([1, 41, 54]) - FOLLOW_SIGN_in_relation_constant946 = frozenset([1]) - - - -def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): - from antlr3.main import ParserMain - main = ParserMain("CongressLexer", CongressParser) - - main.stdin = stdin - main.stdout = stdout - main.stderr = stderr - main.execute(argv) - - - -if __name__ == '__main__': - main(sys.argv) diff --git a/congress/datalog/Python2/__init__.py b/congress/datalog/Python2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/datalog/Python3/CongressLexer.py b/congress/datalog/Python3/CongressLexer.py deleted file mode 100644 index b3468b0a..00000000 --- a/congress/datalog/Python3/CongressLexer.py +++ /dev/null @@ -1,2750 +0,0 @@ -# $ANTLR 3.5.2 Congress.g 2015-11-02 17:04:43 - -import sys -from antlr3 import * - - - -# for convenience in actions -HIDDEN = BaseRecognizer.HIDDEN - -# token types -EOF=-1 -T__53=53 -T__54=54 -T__55=55 -T__56=56 -T__57=57 -T__58=58 -AND=4 -ATOM=5 -BYTESTRPREFIX=6 -BYTES_CHAR_DQ=7 -BYTES_CHAR_SQ=8 -BYTES_ESC=9 -BYTES_TESC=10 -COLONMINUS=11 -COLUMN_NAME=12 -COLUMN_NUMBER=13 -COMMA=14 -COMMENT=15 -DIGIT=16 -EQUAL=17 -EVENT=18 -EXPONENT=19 -FLOAT=20 -FLOAT_EXP=21 -FLOAT_NO_EXP=22 -FLOAT_OBJ=23 -FRAC_PART=24 -HEX_DIGIT=25 -ID=26 -INT=27 -INTEGER_OBJ=28 -INT_PART=29 -LBRACKET=30 -LITERAL=31 -LPAREN=32 -MODAL=33 -NAMED_PARAM=34 -NEGATION=35 -NOT=36 -PROG=37 -RBRACKET=38 -RPAREN=39 -RULE=40 -SIGN=41 -SLBYTESTRING=42 -SLSTRING=43 -STRING=44 -STRING_ESC=45 -STRING_OBJ=46 -STRPREFIX=47 -STRUCTURED_NAME=48 -SYMBOL_OBJ=49 -THEORY=50 -VARIABLE=51 -WS=52 - -# token names -tokenNamesMap = { - 0: "", 1: "", 2: "", 3: "", - -1: "EOF", 53: "T__53", 54: "T__54", 55: "T__55", 56: "T__56", 57: "T__57", - 58: "T__58", 4: "AND", 5: "ATOM", 6: "BYTESTRPREFIX", 7: "BYTES_CHAR_DQ", - 8: "BYTES_CHAR_SQ", 9: "BYTES_ESC", 10: "BYTES_TESC", 11: "COLONMINUS", - 12: "COLUMN_NAME", 13: "COLUMN_NUMBER", 14: "COMMA", 15: "COMMENT", - 16: "DIGIT", 17: "EQUAL", 18: "EVENT", 19: "EXPONENT", 20: "FLOAT", - 21: "FLOAT_EXP", 22: "FLOAT_NO_EXP", 23: "FLOAT_OBJ", 24: "FRAC_PART", - 25: "HEX_DIGIT", 26: "ID", 27: "INT", 28: "INTEGER_OBJ", 29: "INT_PART", - 30: "LBRACKET", 31: "LITERAL", 32: "LPAREN", 33: "MODAL", 34: "NAMED_PARAM", - 35: "NEGATION", 36: "NOT", 37: "PROG", 38: "RBRACKET", 39: "RPAREN", - 40: "RULE", 41: "SIGN", 42: "SLBYTESTRING", 43: "SLSTRING", 44: "STRING", - 45: "STRING_ESC", 46: "STRING_OBJ", 47: "STRPREFIX", 48: "STRUCTURED_NAME", - 49: "SYMBOL_OBJ", 50: "THEORY", 51: "VARIABLE", 52: "WS" -} -Token.registerTokenNamesMap(tokenNamesMap) - -class CongressLexer(Lexer): - - grammarFileName = "Congress.g" - api_version = 1 - - def __init__(self, input=None, state=None): - if state is None: - state = RecognizerSharedState() - super().__init__(input, state) - - self.delegates = [] - - self.dfa8 = self.DFA8( - self, 8, - eot = self.DFA8_eot, - eof = self.DFA8_eof, - min = self.DFA8_min, - max = self.DFA8_max, - accept = self.DFA8_accept, - special = self.DFA8_special, - transition = self.DFA8_transition - ) - - self.dfa23 = self.DFA23( - self, 23, - eot = self.DFA23_eot, - eof = self.DFA23_eof, - min = self.DFA23_min, - max = self.DFA23_max, - accept = self.DFA23_accept, - special = self.DFA23_special, - transition = self.DFA23_transition - ) - - self.dfa24 = self.DFA24( - self, 24, - eot = self.DFA24_eot, - eof = self.DFA24_eof, - min = self.DFA24_min, - max = self.DFA24_max, - accept = self.DFA24_accept, - special = self.DFA24_special, - transition = self.DFA24_transition - ) - - self.dfa38 = self.DFA38( - self, 38, - eot = self.DFA38_eot, - eof = self.DFA38_eof, - min = self.DFA38_min, - max = self.DFA38_max, - accept = self.DFA38_accept, - special = self.DFA38_special, - transition = self.DFA38_transition - ) - - - - - - - # $ANTLR start "COLONMINUS" - def mCOLONMINUS(self, ): - try: - _type = COLONMINUS - _channel = DEFAULT_CHANNEL - - # Congress.g:7:12: ( ':-' ) - # Congress.g:7:14: ':-' - pass - self.match(":-") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COLONMINUS" - - - - # $ANTLR start "COMMA" - def mCOMMA(self, ): - try: - _type = COMMA - _channel = DEFAULT_CHANNEL - - # Congress.g:8:7: ( ',' ) - # Congress.g:8:9: ',' - pass - self.match(44) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COMMA" - - - - # $ANTLR start "LBRACKET" - def mLBRACKET(self, ): - try: - _type = LBRACKET - _channel = DEFAULT_CHANNEL - - # Congress.g:9:10: ( '[' ) - # Congress.g:9:12: '[' - pass - self.match(91) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "LBRACKET" - - - - # $ANTLR start "LPAREN" - def mLPAREN(self, ): - try: - _type = LPAREN - _channel = DEFAULT_CHANNEL - - # Congress.g:10:8: ( '(' ) - # Congress.g:10:10: '(' - pass - self.match(40) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "LPAREN" - - - - # $ANTLR start "RBRACKET" - def mRBRACKET(self, ): - try: - _type = RBRACKET - _channel = DEFAULT_CHANNEL - - # Congress.g:11:10: ( ']' ) - # Congress.g:11:12: ']' - pass - self.match(93) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "RBRACKET" - - - - # $ANTLR start "RPAREN" - def mRPAREN(self, ): - try: - _type = RPAREN - _channel = DEFAULT_CHANNEL - - # Congress.g:12:8: ( ')' ) - # Congress.g:12:10: ')' - pass - self.match(41) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "RPAREN" - - - - # $ANTLR start "T__53" - def mT__53(self, ): - try: - _type = T__53 - _channel = DEFAULT_CHANNEL - - # Congress.g:13:7: ( '.' ) - # Congress.g:13:9: '.' - pass - self.match(46) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__53" - - - - # $ANTLR start "T__54" - def mT__54(self, ): - try: - _type = T__54 - _channel = DEFAULT_CHANNEL - - # Congress.g:14:7: ( ':' ) - # Congress.g:14:9: ':' - pass - self.match(58) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__54" - - - - # $ANTLR start "T__55" - def mT__55(self, ): - try: - _type = T__55 - _channel = DEFAULT_CHANNEL - - # Congress.g:15:7: ( ';' ) - # Congress.g:15:9: ';' - pass - self.match(59) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__55" - - - - # $ANTLR start "T__56" - def mT__56(self, ): - try: - _type = T__56 - _channel = DEFAULT_CHANNEL - - # Congress.g:16:7: ( 'delete' ) - # Congress.g:16:9: 'delete' - pass - self.match("delete") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__56" - - - - # $ANTLR start "T__57" - def mT__57(self, ): - try: - _type = T__57 - _channel = DEFAULT_CHANNEL - - # Congress.g:17:7: ( 'execute' ) - # Congress.g:17:9: 'execute' - pass - self.match("execute") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__57" - - - - # $ANTLR start "T__58" - def mT__58(self, ): - try: - _type = T__58 - _channel = DEFAULT_CHANNEL - - # Congress.g:18:7: ( 'insert' ) - # Congress.g:18:9: 'insert' - pass - self.match("insert") - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "T__58" - - - - # $ANTLR start "NEGATION" - def mNEGATION(self, ): - try: - _type = NEGATION - _channel = DEFAULT_CHANNEL - - # Congress.g:167:5: ( 'not' | 'NOT' | '!' ) - alt1 = 3 - LA1 = self.input.LA(1) - if LA1 in {110}: - alt1 = 1 - elif LA1 in {78}: - alt1 = 2 - elif LA1 in {33}: - alt1 = 3 - else: - nvae = NoViableAltException("", 1, 0, self.input) - - raise nvae - - - if alt1 == 1: - # Congress.g:167:7: 'not' - pass - self.match("not") - - - - elif alt1 == 2: - # Congress.g:168:7: 'NOT' - pass - self.match("NOT") - - - - elif alt1 == 3: - # Congress.g:169:7: '!' - pass - self.match(33) - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "NEGATION" - - - - # $ANTLR start "EQUAL" - def mEQUAL(self, ): - try: - _type = EQUAL - _channel = DEFAULT_CHANNEL - - # Congress.g:173:5: ( '=' ) - # Congress.g:173:8: '=' - pass - self.match(61) - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "EQUAL" - - - - # $ANTLR start "SIGN" - def mSIGN(self, ): - try: - _type = SIGN - _channel = DEFAULT_CHANNEL - - # Congress.g:177:5: ( '+' | '-' ) - # Congress.g: - pass - if self.input.LA(1) in {43, 45}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "SIGN" - - - - # $ANTLR start "INT" - def mINT(self, ): - try: - _type = INT - _channel = DEFAULT_CHANNEL - - # Congress.g:184:5: ( '1' .. '9' ( '0' .. '9' )* | ( '0' )+ | '0' ( 'o' | 'O' ) ( '0' .. '7' )+ | '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ | '0' ( 'b' | 'B' ) ( '0' | '1' )+ ) - alt7 = 5 - LA7_0 = self.input.LA(1) - - if ((49 <= LA7_0 <= 57) or LA7_0 in {}) : - alt7 = 1 - elif (LA7_0 == 48) : - LA7 = self.input.LA(2) - if LA7 in {79, 111}: - alt7 = 3 - elif LA7 in {88, 120}: - alt7 = 4 - elif LA7 in {66, 98}: - alt7 = 5 - else: - alt7 = 2 - - else: - nvae = NoViableAltException("", 7, 0, self.input) - - raise nvae - - - if alt7 == 1: - # Congress.g:184:7: '1' .. '9' ( '0' .. '9' )* - pass - self.matchRange(49, 57) - - # Congress.g:184:16: ( '0' .. '9' )* - while True: #loop2 - alt2 = 2 - LA2_0 = self.input.LA(1) - - if ((48 <= LA2_0 <= 57) or LA2_0 in {}) : - alt2 = 1 - - - if alt2 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop2 - - - - elif alt7 == 2: - # Congress.g:185:7: ( '0' )+ - pass - # Congress.g:185:7: ( '0' )+ - cnt3 = 0 - while True: #loop3 - alt3 = 2 - LA3_0 = self.input.LA(1) - - if (LA3_0 == 48) : - alt3 = 1 - - - if alt3 == 1: - # Congress.g:185:7: '0' - pass - self.match(48) - - - else: - if cnt3 >= 1: - break #loop3 - - eee = EarlyExitException(3, self.input) - raise eee - - cnt3 += 1 - - - - elif alt7 == 3: - # Congress.g:186:7: '0' ( 'o' | 'O' ) ( '0' .. '7' )+ - pass - self.match(48) - - if self.input.LA(1) in {79, 111}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # Congress.g:186:23: ( '0' .. '7' )+ - cnt4 = 0 - while True: #loop4 - alt4 = 2 - LA4_0 = self.input.LA(1) - - if ((48 <= LA4_0 <= 55) or LA4_0 in {}) : - alt4 = 1 - - - if alt4 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 55) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt4 >= 1: - break #loop4 - - eee = EarlyExitException(4, self.input) - raise eee - - cnt4 += 1 - - - - elif alt7 == 4: - # Congress.g:187:7: '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ - pass - self.match(48) - - if self.input.LA(1) in {88, 120}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # Congress.g:187:23: ( HEX_DIGIT )+ - cnt5 = 0 - while True: #loop5 - alt5 = 2 - LA5_0 = self.input.LA(1) - - if ((48 <= LA5_0 <= 57) or (65 <= LA5_0 <= 70) or (97 <= LA5_0 <= 102) or LA5_0 in {}) : - alt5 = 1 - - - if alt5 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt5 >= 1: - break #loop5 - - eee = EarlyExitException(5, self.input) - raise eee - - cnt5 += 1 - - - - elif alt7 == 5: - # Congress.g:188:7: '0' ( 'b' | 'B' ) ( '0' | '1' )+ - pass - self.match(48) - - if self.input.LA(1) in {66, 98}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # Congress.g:188:23: ( '0' | '1' )+ - cnt6 = 0 - while True: #loop6 - alt6 = 2 - LA6_0 = self.input.LA(1) - - if (LA6_0 in {48, 49}) : - alt6 = 1 - - - if alt6 == 1: - # Congress.g: - pass - if self.input.LA(1) in {48, 49}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt6 >= 1: - break #loop6 - - eee = EarlyExitException(6, self.input) - raise eee - - cnt6 += 1 - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "INT" - - - - # $ANTLR start "FLOAT" - def mFLOAT(self, ): - try: - _type = FLOAT - _channel = DEFAULT_CHANNEL - - # Congress.g:194:5: ( FLOAT_NO_EXP | FLOAT_EXP ) - alt8 = 2 - alt8 = self.dfa8.predict(self.input) - if alt8 == 1: - # Congress.g:194:7: FLOAT_NO_EXP - pass - self.mFLOAT_NO_EXP() - - - - elif alt8 == 2: - # Congress.g:195:7: FLOAT_EXP - pass - self.mFLOAT_EXP() - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "FLOAT" - - - - # $ANTLR start "STRING" - def mSTRING(self, ): - try: - _type = STRING - _channel = DEFAULT_CHANNEL - - # Congress.g:218:5: ( ( STRPREFIX )? ( SLSTRING )+ | ( BYTESTRPREFIX ) ( SLBYTESTRING )+ ) - alt12 = 2 - LA12 = self.input.LA(1) - if LA12 in {114}: - LA12_1 = self.input.LA(2) - - if (LA12_1 in {66, 98}) : - alt12 = 2 - elif (LA12_1 in {34, 39}) : - alt12 = 1 - else: - nvae = NoViableAltException("", 12, 1, self.input) - - raise nvae - - - elif LA12 in {34, 39, 85, 117}: - alt12 = 1 - elif LA12 in {66, 98}: - alt12 = 2 - elif LA12 in {82}: - LA12_4 = self.input.LA(2) - - if (LA12_4 in {66, 98}) : - alt12 = 2 - elif (LA12_4 in {34, 39}) : - alt12 = 1 - else: - nvae = NoViableAltException("", 12, 4, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 12, 0, self.input) - - raise nvae - - - if alt12 == 1: - # Congress.g:218:7: ( STRPREFIX )? ( SLSTRING )+ - pass - # Congress.g:218:7: ( STRPREFIX )? - alt9 = 2 - LA9_0 = self.input.LA(1) - - if (LA9_0 in {82, 85, 114, 117}) : - alt9 = 1 - if alt9 == 1: - # Congress.g: - pass - if self.input.LA(1) in {82, 85, 114, 117}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - # Congress.g:218:20: ( SLSTRING )+ - cnt10 = 0 - while True: #loop10 - alt10 = 2 - LA10_0 = self.input.LA(1) - - if (LA10_0 in {34, 39}) : - alt10 = 1 - - - if alt10 == 1: - # Congress.g:218:21: SLSTRING - pass - self.mSLSTRING() - - - - else: - if cnt10 >= 1: - break #loop10 - - eee = EarlyExitException(10, self.input) - raise eee - - cnt10 += 1 - - - - elif alt12 == 2: - # Congress.g:219:7: ( BYTESTRPREFIX ) ( SLBYTESTRING )+ - pass - # Congress.g:219:7: ( BYTESTRPREFIX ) - # Congress.g:219:8: BYTESTRPREFIX - pass - self.mBYTESTRPREFIX() - - - - - - # Congress.g:219:23: ( SLBYTESTRING )+ - cnt11 = 0 - while True: #loop11 - alt11 = 2 - LA11_0 = self.input.LA(1) - - if (LA11_0 in {34, 39}) : - alt11 = 1 - - - if alt11 == 1: - # Congress.g:219:24: SLBYTESTRING - pass - self.mSLBYTESTRING() - - - - else: - if cnt11 >= 1: - break #loop11 - - eee = EarlyExitException(11, self.input) - raise eee - - cnt11 += 1 - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "STRING" - - - - # $ANTLR start "ID" - def mID(self, ): - try: - _type = ID - _channel = DEFAULT_CHANNEL - - # Congress.g:225:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '.' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* ) - # Congress.g:225:7: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '.' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* - pass - if (65 <= self.input.LA(1) <= 90) or (97 <= self.input.LA(1) <= 122) or self.input.LA(1) in {46, 95}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # Congress.g:225:35: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '.' )* - while True: #loop13 - alt13 = 2 - LA13_0 = self.input.LA(1) - - if ((48 <= LA13_0 <= 57) or (65 <= LA13_0 <= 90) or (97 <= LA13_0 <= 122) or LA13_0 in {46, 95}) : - alt13 = 1 - - - if alt13 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or (97 <= self.input.LA(1) <= 122) or self.input.LA(1) in {46, 95}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop13 - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "ID" - - - - # $ANTLR start "COMMENT" - def mCOMMENT(self, ): - try: - _type = COMMENT - _channel = DEFAULT_CHANNEL - - # Congress.g:230:5: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' | '/*' ( options {greedy=false; } : . )* '*/' | '#' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' ) - alt19 = 3 - LA19_0 = self.input.LA(1) - - if (LA19_0 == 47) : - LA19_1 = self.input.LA(2) - - if (LA19_1 == 47) : - alt19 = 1 - elif (LA19_1 == 42) : - alt19 = 2 - else: - nvae = NoViableAltException("", 19, 1, self.input) - - raise nvae - - - elif (LA19_0 == 35) : - alt19 = 3 - else: - nvae = NoViableAltException("", 19, 0, self.input) - - raise nvae - - - if alt19 == 1: - # Congress.g:230:7: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' - pass - self.match("//") - - - # Congress.g:230:12: (~ ( '\\n' | '\\r' ) )* - while True: #loop14 - alt14 = 2 - LA14_0 = self.input.LA(1) - - if ((0 <= LA14_0 <= 9) or (14 <= LA14_0 <= 65535) or LA14_0 in {11, 12}) : - alt14 = 1 - - - if alt14 == 1: - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop14 - - - # Congress.g:230:26: ( '\\r' )? - alt15 = 2 - LA15_0 = self.input.LA(1) - - if (LA15_0 == 13) : - alt15 = 1 - if alt15 == 1: - # Congress.g:230:26: '\\r' - pass - self.match(13) - - - - - self.match(10) - - #action start - _channel=HIDDEN; - #action end - - - - elif alt19 == 2: - # Congress.g:231:7: '/*' ( options {greedy=false; } : . )* '*/' - pass - self.match("/*") - - - # Congress.g:231:12: ( options {greedy=false; } : . )* - while True: #loop16 - alt16 = 2 - LA16_0 = self.input.LA(1) - - if (LA16_0 == 42) : - LA16_1 = self.input.LA(2) - - if (LA16_1 == 47) : - alt16 = 2 - elif ((0 <= LA16_1 <= 46) or (48 <= LA16_1 <= 65535) or LA16_1 in {}) : - alt16 = 1 - - - elif ((0 <= LA16_0 <= 41) or (43 <= LA16_0 <= 65535) or LA16_0 in {}) : - alt16 = 1 - - - if alt16 == 1: - # Congress.g:231:40: . - pass - self.matchAny() - - - else: - break #loop16 - - - self.match("*/") - - - #action start - _channel=HIDDEN; - #action end - - - - elif alt19 == 3: - # Congress.g:232:7: '#' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' - pass - self.match(35) - - # Congress.g:232:11: (~ ( '\\n' | '\\r' ) )* - while True: #loop17 - alt17 = 2 - LA17_0 = self.input.LA(1) - - if ((0 <= LA17_0 <= 9) or (14 <= LA17_0 <= 65535) or LA17_0 in {11, 12}) : - alt17 = 1 - - - if alt17 == 1: - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop17 - - - # Congress.g:232:25: ( '\\r' )? - alt18 = 2 - LA18_0 = self.input.LA(1) - - if (LA18_0 == 13) : - alt18 = 1 - if alt18 == 1: - # Congress.g:232:25: '\\r' - pass - self.match(13) - - - - - self.match(10) - - #action start - _channel=HIDDEN; - #action end - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "COMMENT" - - - - # $ANTLR start "WS" - def mWS(self, ): - try: - _type = WS - _channel = DEFAULT_CHANNEL - - # Congress.g:236:5: ( ( ' ' | '\\t' | '\\r' | '\\n' ) ) - # Congress.g:236:7: ( ' ' | '\\t' | '\\r' | '\\n' ) - pass - if self.input.LA(1) in {9, 10, 13, 32}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - #action start - _channel=HIDDEN; - #action end - - - - - self._state.type = _type - self._state.channel = _channel - finally: - pass - - # $ANTLR end "WS" - - - - # $ANTLR start "EXPONENT" - def mEXPONENT(self, ): - try: - # Congress.g:250:5: ( ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+ ) - # Congress.g:250:7: ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+ - pass - if self.input.LA(1) in {69, 101}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - # Congress.g:250:17: ( '+' | '-' )? - alt20 = 2 - LA20_0 = self.input.LA(1) - - if (LA20_0 in {43, 45}) : - alt20 = 1 - if alt20 == 1: - # Congress.g: - pass - if self.input.LA(1) in {43, 45}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - # Congress.g:250:28: ( '0' .. '9' )+ - cnt21 = 0 - while True: #loop21 - alt21 = 2 - LA21_0 = self.input.LA(1) - - if ((48 <= LA21_0 <= 57) or LA21_0 in {}) : - alt21 = 1 - - - if alt21 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt21 >= 1: - break #loop21 - - eee = EarlyExitException(21, self.input) - raise eee - - cnt21 += 1 - - - - - - finally: - pass - - # $ANTLR end "EXPONENT" - - - - # $ANTLR start "HEX_DIGIT" - def mHEX_DIGIT(self, ): - try: - # Congress.g:255:5: ( ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ) ) - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "HEX_DIGIT" - - - - # $ANTLR start "DIGIT" - def mDIGIT(self, ): - try: - # Congress.g:260:5: ( ( '0' .. '9' ) ) - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "DIGIT" - - - - # $ANTLR start "FLOAT_NO_EXP" - def mFLOAT_NO_EXP(self, ): - try: - # Congress.g:265:5: ( ( INT_PART )? FRAC_PART | INT_PART '.' ) - alt23 = 2 - alt23 = self.dfa23.predict(self.input) - if alt23 == 1: - # Congress.g:265:7: ( INT_PART )? FRAC_PART - pass - # Congress.g:265:7: ( INT_PART )? - alt22 = 2 - LA22_0 = self.input.LA(1) - - if ((48 <= LA22_0 <= 57) or LA22_0 in {}) : - alt22 = 1 - if alt22 == 1: - # Congress.g:265:7: INT_PART - pass - self.mINT_PART() - - - - - - self.mFRAC_PART() - - - - elif alt23 == 2: - # Congress.g:266:7: INT_PART '.' - pass - self.mINT_PART() - - - self.match(46) - - - - finally: - pass - - # $ANTLR end "FLOAT_NO_EXP" - - - - # $ANTLR start "FLOAT_EXP" - def mFLOAT_EXP(self, ): - try: - # Congress.g:271:5: ( ( INT_PART | FLOAT_NO_EXP ) EXPONENT ) - # Congress.g:271:7: ( INT_PART | FLOAT_NO_EXP ) EXPONENT - pass - # Congress.g:271:7: ( INT_PART | FLOAT_NO_EXP ) - alt24 = 2 - alt24 = self.dfa24.predict(self.input) - if alt24 == 1: - # Congress.g:271:9: INT_PART - pass - self.mINT_PART() - - - - elif alt24 == 2: - # Congress.g:271:20: FLOAT_NO_EXP - pass - self.mFLOAT_NO_EXP() - - - - - - self.mEXPONENT() - - - - - - finally: - pass - - # $ANTLR end "FLOAT_EXP" - - - - # $ANTLR start "INT_PART" - def mINT_PART(self, ): - try: - # Congress.g:276:5: ( ( DIGIT )+ ) - # Congress.g:276:7: ( DIGIT )+ - pass - # Congress.g:276:7: ( DIGIT )+ - cnt25 = 0 - while True: #loop25 - alt25 = 2 - LA25_0 = self.input.LA(1) - - if ((48 <= LA25_0 <= 57) or LA25_0 in {}) : - alt25 = 1 - - - if alt25 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt25 >= 1: - break #loop25 - - eee = EarlyExitException(25, self.input) - raise eee - - cnt25 += 1 - - - - - - finally: - pass - - # $ANTLR end "INT_PART" - - - - # $ANTLR start "FRAC_PART" - def mFRAC_PART(self, ): - try: - # Congress.g:281:5: ( '.' ( DIGIT )+ ) - # Congress.g:281:7: '.' ( DIGIT )+ - pass - self.match(46) - - # Congress.g:281:11: ( DIGIT )+ - cnt26 = 0 - while True: #loop26 - alt26 = 2 - LA26_0 = self.input.LA(1) - - if ((48 <= LA26_0 <= 57) or LA26_0 in {}) : - alt26 = 1 - - - if alt26 == 1: - # Congress.g: - pass - if (48 <= self.input.LA(1) <= 57) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - if cnt26 >= 1: - break #loop26 - - eee = EarlyExitException(26, self.input) - raise eee - - cnt26 += 1 - - - - - - finally: - pass - - # $ANTLR end "FRAC_PART" - - - - # $ANTLR start "STRPREFIX" - def mSTRPREFIX(self, ): - try: - # Congress.g:289:5: ( 'r' | 'R' | 'u' | 'U' ) - # Congress.g: - pass - if self.input.LA(1) in {82, 85, 114, 117}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "STRPREFIX" - - - - # $ANTLR start "STRING_ESC" - def mSTRING_ESC(self, ): - try: - # Congress.g:294:5: ( '\\\\' . ) - # Congress.g:294:7: '\\\\' . - pass - self.match(92) - - self.matchAny() - - - - - finally: - pass - - # $ANTLR end "STRING_ESC" - - - - # $ANTLR start "SLSTRING" - def mSLSTRING(self, ): - try: - # Congress.g:301:5: ( '\\'' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* '\\'' | '\"' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* '\"' | '\\'\\'\\'' ( STRING_ESC |~ ( '\\\\' ) )* '\\'\\'\\'' | '\"\"\"' ( STRING_ESC |~ ( '\\\\' ) )* '\"\"\"' ) - alt31 = 4 - LA31_0 = self.input.LA(1) - - if (LA31_0 == 39) : - LA31_1 = self.input.LA(2) - - if (LA31_1 == 39) : - LA31_3 = self.input.LA(3) - - if (LA31_3 == 39) : - alt31 = 3 - else: - alt31 = 1 - - elif ((0 <= LA31_1 <= 9) or (14 <= LA31_1 <= 38) or (40 <= LA31_1 <= 65535) or LA31_1 in {11, 12}) : - alt31 = 1 - else: - nvae = NoViableAltException("", 31, 1, self.input) - - raise nvae - - - elif (LA31_0 == 34) : - LA31_2 = self.input.LA(2) - - if (LA31_2 == 34) : - LA31_5 = self.input.LA(3) - - if (LA31_5 == 34) : - alt31 = 4 - else: - alt31 = 2 - - elif ((0 <= LA31_2 <= 9) or (14 <= LA31_2 <= 33) or (35 <= LA31_2 <= 65535) or LA31_2 in {11, 12}) : - alt31 = 2 - else: - nvae = NoViableAltException("", 31, 2, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 31, 0, self.input) - - raise nvae - - - if alt31 == 1: - # Congress.g:301:7: '\\'' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* '\\'' - pass - self.match(39) - - # Congress.g:301:12: ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) )* - while True: #loop27 - alt27 = 3 - LA27_0 = self.input.LA(1) - - if (LA27_0 == 92) : - alt27 = 1 - elif ((0 <= LA27_0 <= 9) or (14 <= LA27_0 <= 38) or (40 <= LA27_0 <= 91) or (93 <= LA27_0 <= 65535) or LA27_0 in {11, 12}) : - alt27 = 2 - - - if alt27 == 1: - # Congress.g:301:13: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt27 == 2: - # Congress.g:301:26: ~ ( '\\\\' | '\\r' | '\\n' | '\\'' ) - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop27 - - - self.match(39) - - - elif alt31 == 2: - # Congress.g:302:7: '\"' ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* '\"' - pass - self.match(34) - - # Congress.g:302:11: ( STRING_ESC |~ ( '\\\\' | '\\r' | '\\n' | '\"' ) )* - while True: #loop28 - alt28 = 3 - LA28_0 = self.input.LA(1) - - if (LA28_0 == 92) : - alt28 = 1 - elif ((0 <= LA28_0 <= 9) or (14 <= LA28_0 <= 33) or (35 <= LA28_0 <= 91) or (93 <= LA28_0 <= 65535) or LA28_0 in {11, 12}) : - alt28 = 2 - - - if alt28 == 1: - # Congress.g:302:12: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt28 == 2: - # Congress.g:302:25: ~ ( '\\\\' | '\\r' | '\\n' | '\"' ) - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop28 - - - self.match(34) - - - elif alt31 == 3: - # Congress.g:303:7: '\\'\\'\\'' ( STRING_ESC |~ ( '\\\\' ) )* '\\'\\'\\'' - pass - self.match("'''") - - - # Congress.g:303:16: ( STRING_ESC |~ ( '\\\\' ) )* - while True: #loop29 - alt29 = 3 - LA29_0 = self.input.LA(1) - - if (LA29_0 == 39) : - LA29_1 = self.input.LA(2) - - if (LA29_1 == 39) : - LA29_4 = self.input.LA(3) - - if (LA29_4 == 39) : - LA29_5 = self.input.LA(4) - - if ((0 <= LA29_5 <= 65535) or LA29_5 in {}) : - alt29 = 2 - - - elif ((0 <= LA29_4 <= 38) or (40 <= LA29_4 <= 65535) or LA29_4 in {}) : - alt29 = 2 - - - elif ((0 <= LA29_1 <= 38) or (40 <= LA29_1 <= 65535) or LA29_1 in {}) : - alt29 = 2 - - - elif (LA29_0 == 92) : - alt29 = 1 - elif ((0 <= LA29_0 <= 38) or (40 <= LA29_0 <= 91) or (93 <= LA29_0 <= 65535) or LA29_0 in {}) : - alt29 = 2 - - - if alt29 == 1: - # Congress.g:303:17: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt29 == 2: - # Congress.g:303:30: ~ ( '\\\\' ) - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop29 - - - self.match("'''") - - - - elif alt31 == 4: - # Congress.g:304:7: '\"\"\"' ( STRING_ESC |~ ( '\\\\' ) )* '\"\"\"' - pass - self.match("\"\"\"") - - - # Congress.g:304:13: ( STRING_ESC |~ ( '\\\\' ) )* - while True: #loop30 - alt30 = 3 - LA30_0 = self.input.LA(1) - - if (LA30_0 == 34) : - LA30_1 = self.input.LA(2) - - if (LA30_1 == 34) : - LA30_4 = self.input.LA(3) - - if (LA30_4 == 34) : - LA30_5 = self.input.LA(4) - - if ((0 <= LA30_5 <= 65535) or LA30_5 in {}) : - alt30 = 2 - - - elif ((0 <= LA30_4 <= 33) or (35 <= LA30_4 <= 65535) or LA30_4 in {}) : - alt30 = 2 - - - elif ((0 <= LA30_1 <= 33) or (35 <= LA30_1 <= 65535) or LA30_1 in {}) : - alt30 = 2 - - - elif (LA30_0 == 92) : - alt30 = 1 - elif ((0 <= LA30_0 <= 33) or (35 <= LA30_0 <= 91) or (93 <= LA30_0 <= 65535) or LA30_0 in {}) : - alt30 = 2 - - - if alt30 == 1: - # Congress.g:304:14: STRING_ESC - pass - self.mSTRING_ESC() - - - - elif alt30 == 2: - # Congress.g:304:27: ~ ( '\\\\' ) - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop30 - - - self.match("\"\"\"") - - - - - finally: - pass - - # $ANTLR end "SLSTRING" - - - - # $ANTLR start "BYTESTRPREFIX" - def mBYTESTRPREFIX(self, ): - try: - # Congress.g:315:5: ( 'b' | 'B' | 'br' | 'Br' | 'bR' | 'BR' | 'rb' | 'rB' | 'Rb' | 'RB' ) - alt32 = 10 - LA32 = self.input.LA(1) - if LA32 in {98}: - LA32 = self.input.LA(2) - if LA32 in {114}: - alt32 = 3 - elif LA32 in {82}: - alt32 = 5 - else: - alt32 = 1 - - elif LA32 in {66}: - LA32 = self.input.LA(2) - if LA32 in {114}: - alt32 = 4 - elif LA32 in {82}: - alt32 = 6 - else: - alt32 = 2 - - elif LA32 in {114}: - LA32_3 = self.input.LA(2) - - if (LA32_3 == 98) : - alt32 = 7 - elif (LA32_3 == 66) : - alt32 = 8 - else: - nvae = NoViableAltException("", 32, 3, self.input) - - raise nvae - - - elif LA32 in {82}: - LA32_4 = self.input.LA(2) - - if (LA32_4 == 98) : - alt32 = 9 - elif (LA32_4 == 66) : - alt32 = 10 - else: - nvae = NoViableAltException("", 32, 4, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 32, 0, self.input) - - raise nvae - - - if alt32 == 1: - # Congress.g:315:7: 'b' - pass - self.match(98) - - - elif alt32 == 2: - # Congress.g:315:13: 'B' - pass - self.match(66) - - - elif alt32 == 3: - # Congress.g:315:19: 'br' - pass - self.match("br") - - - - elif alt32 == 4: - # Congress.g:315:26: 'Br' - pass - self.match("Br") - - - - elif alt32 == 5: - # Congress.g:315:33: 'bR' - pass - self.match("bR") - - - - elif alt32 == 6: - # Congress.g:315:40: 'BR' - pass - self.match("BR") - - - - elif alt32 == 7: - # Congress.g:315:47: 'rb' - pass - self.match("rb") - - - - elif alt32 == 8: - # Congress.g:315:54: 'rB' - pass - self.match("rB") - - - - elif alt32 == 9: - # Congress.g:315:61: 'Rb' - pass - self.match("Rb") - - - - elif alt32 == 10: - # Congress.g:315:68: 'RB' - pass - self.match("RB") - - - - - finally: - pass - - # $ANTLR end "BYTESTRPREFIX" - - - - # $ANTLR start "SLBYTESTRING" - def mSLBYTESTRING(self, ): - try: - # Congress.g:320:5: ( '\\'' ( BYTES_CHAR_SQ | BYTES_ESC )* '\\'' | '\"' ( BYTES_CHAR_DQ | BYTES_ESC )* '\"' | '\\'\\'\\'' ( BYTES_CHAR_SQ | BYTES_TESC )* '\\'\\'\\'' | '\"\"\"' ( BYTES_CHAR_DQ | BYTES_TESC )* '\"\"\"' ) - alt37 = 4 - LA37_0 = self.input.LA(1) - - if (LA37_0 == 39) : - LA37_1 = self.input.LA(2) - - if (LA37_1 == 39) : - LA37_3 = self.input.LA(3) - - if (LA37_3 == 39) : - alt37 = 3 - else: - alt37 = 1 - - elif ((0 <= LA37_1 <= 9) or (14 <= LA37_1 <= 38) or (40 <= LA37_1 <= 127) or LA37_1 in {11, 12}) : - alt37 = 1 - else: - nvae = NoViableAltException("", 37, 1, self.input) - - raise nvae - - - elif (LA37_0 == 34) : - LA37_2 = self.input.LA(2) - - if (LA37_2 == 34) : - LA37_5 = self.input.LA(3) - - if (LA37_5 == 34) : - alt37 = 4 - else: - alt37 = 2 - - elif ((0 <= LA37_2 <= 9) or (14 <= LA37_2 <= 33) or (35 <= LA37_2 <= 127) or LA37_2 in {11, 12}) : - alt37 = 2 - else: - nvae = NoViableAltException("", 37, 2, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 37, 0, self.input) - - raise nvae - - - if alt37 == 1: - # Congress.g:320:7: '\\'' ( BYTES_CHAR_SQ | BYTES_ESC )* '\\'' - pass - self.match(39) - - # Congress.g:320:12: ( BYTES_CHAR_SQ | BYTES_ESC )* - while True: #loop33 - alt33 = 3 - LA33_0 = self.input.LA(1) - - if ((0 <= LA33_0 <= 9) or (14 <= LA33_0 <= 38) or (40 <= LA33_0 <= 91) or (93 <= LA33_0 <= 127) or LA33_0 in {11, 12}) : - alt33 = 1 - elif (LA33_0 == 92) : - alt33 = 2 - - - if alt33 == 1: - # Congress.g:320:13: BYTES_CHAR_SQ - pass - self.mBYTES_CHAR_SQ() - - - - elif alt33 == 2: - # Congress.g:320:29: BYTES_ESC - pass - self.mBYTES_ESC() - - - - else: - break #loop33 - - - self.match(39) - - - elif alt37 == 2: - # Congress.g:321:7: '\"' ( BYTES_CHAR_DQ | BYTES_ESC )* '\"' - pass - self.match(34) - - # Congress.g:321:11: ( BYTES_CHAR_DQ | BYTES_ESC )* - while True: #loop34 - alt34 = 3 - LA34_0 = self.input.LA(1) - - if ((0 <= LA34_0 <= 9) or (14 <= LA34_0 <= 33) or (35 <= LA34_0 <= 91) or (93 <= LA34_0 <= 127) or LA34_0 in {11, 12}) : - alt34 = 1 - elif (LA34_0 == 92) : - alt34 = 2 - - - if alt34 == 1: - # Congress.g:321:12: BYTES_CHAR_DQ - pass - self.mBYTES_CHAR_DQ() - - - - elif alt34 == 2: - # Congress.g:321:28: BYTES_ESC - pass - self.mBYTES_ESC() - - - - else: - break #loop34 - - - self.match(34) - - - elif alt37 == 3: - # Congress.g:322:7: '\\'\\'\\'' ( BYTES_CHAR_SQ | BYTES_TESC )* '\\'\\'\\'' - pass - self.match("'''") - - - # Congress.g:322:16: ( BYTES_CHAR_SQ | BYTES_TESC )* - while True: #loop35 - alt35 = 2 - LA35_0 = self.input.LA(1) - - if (LA35_0 == 39) : - LA35_1 = self.input.LA(2) - - if (LA35_1 == 39) : - LA35_3 = self.input.LA(3) - - if (LA35_3 == 39) : - LA35_4 = self.input.LA(4) - - if ((0 <= LA35_4 <= 91) or (93 <= LA35_4 <= 127) or LA35_4 in {}) : - alt35 = 1 - - - elif ((0 <= LA35_3 <= 38) or (40 <= LA35_3 <= 91) or (93 <= LA35_3 <= 127) or LA35_3 in {}) : - alt35 = 1 - - - elif ((0 <= LA35_1 <= 38) or (40 <= LA35_1 <= 91) or (93 <= LA35_1 <= 127) or LA35_1 in {}) : - alt35 = 1 - - - elif ((0 <= LA35_0 <= 38) or (40 <= LA35_0 <= 91) or (93 <= LA35_0 <= 127) or LA35_0 in {}) : - alt35 = 1 - - - if alt35 == 1: - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop35 - - - self.match("'''") - - - - elif alt37 == 4: - # Congress.g:323:7: '\"\"\"' ( BYTES_CHAR_DQ | BYTES_TESC )* '\"\"\"' - pass - self.match("\"\"\"") - - - # Congress.g:323:13: ( BYTES_CHAR_DQ | BYTES_TESC )* - while True: #loop36 - alt36 = 2 - LA36_0 = self.input.LA(1) - - if (LA36_0 == 34) : - LA36_1 = self.input.LA(2) - - if (LA36_1 == 34) : - LA36_3 = self.input.LA(3) - - if (LA36_3 == 34) : - LA36_4 = self.input.LA(4) - - if ((0 <= LA36_4 <= 91) or (93 <= LA36_4 <= 127) or LA36_4 in {}) : - alt36 = 1 - - - elif ((0 <= LA36_3 <= 33) or (35 <= LA36_3 <= 91) or (93 <= LA36_3 <= 127) or LA36_3 in {}) : - alt36 = 1 - - - elif ((0 <= LA36_1 <= 33) or (35 <= LA36_1 <= 91) or (93 <= LA36_1 <= 127) or LA36_1 in {}) : - alt36 = 1 - - - elif ((0 <= LA36_0 <= 33) or (35 <= LA36_0 <= 91) or (93 <= LA36_0 <= 127) or LA36_0 in {}) : - alt36 = 1 - - - if alt36 == 1: - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - else: - break #loop36 - - - self.match("\"\"\"") - - - - - finally: - pass - - # $ANTLR end "SLBYTESTRING" - - - - # $ANTLR start "BYTES_CHAR_SQ" - def mBYTES_CHAR_SQ(self, ): - try: - # Congress.g:328:5: ( '\\u0000' .. '\\u0009' | '\\u000B' .. '\\u000C' | '\\u000E' .. '\\u0026' | '\\u0028' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_CHAR_SQ" - - - - # $ANTLR start "BYTES_CHAR_DQ" - def mBYTES_CHAR_DQ(self, ): - try: - # Congress.g:337:5: ( '\\u0000' .. '\\u0009' | '\\u000B' .. '\\u000C' | '\\u000E' .. '\\u0021' | '\\u0023' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 9) or (14 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127) or self.input.LA(1) in {11, 12}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_CHAR_DQ" - - - - # $ANTLR start "BYTES_ESC" - def mBYTES_ESC(self, ): - try: - # Congress.g:346:5: ( '\\\\' '\\u0000' .. '\\u007F' ) - # Congress.g:346:7: '\\\\' '\\u0000' .. '\\u007F' - pass - self.match(92) - - self.matchRange(0, 127) - - - - - finally: - pass - - # $ANTLR end "BYTES_ESC" - - - - # $ANTLR start "BYTES_TESC" - def mBYTES_TESC(self, ): - try: - # Congress.g:352:5: ( '\\u0000' .. '\\u005B' | '\\u005D' .. '\\u007F' ) - # Congress.g: - pass - if (0 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 127) or self.input.LA(1) in {}: - self.input.consume() - else: - mse = MismatchedSetException(None, self.input) - self.recover(mse) - raise mse - - - - - - - finally: - pass - - # $ANTLR end "BYTES_TESC" - - - - def mTokens(self): - # Congress.g:1:8: ( COLONMINUS | COMMA | LBRACKET | LPAREN | RBRACKET | RPAREN | T__53 | T__54 | T__55 | T__56 | T__57 | T__58 | NEGATION | EQUAL | SIGN | INT | FLOAT | STRING | ID | COMMENT | WS ) - alt38 = 21 - alt38 = self.dfa38.predict(self.input) - if alt38 == 1: - # Congress.g:1:10: COLONMINUS - pass - self.mCOLONMINUS() - - - - elif alt38 == 2: - # Congress.g:1:21: COMMA - pass - self.mCOMMA() - - - - elif alt38 == 3: - # Congress.g:1:27: LBRACKET - pass - self.mLBRACKET() - - - - elif alt38 == 4: - # Congress.g:1:36: LPAREN - pass - self.mLPAREN() - - - - elif alt38 == 5: - # Congress.g:1:43: RBRACKET - pass - self.mRBRACKET() - - - - elif alt38 == 6: - # Congress.g:1:52: RPAREN - pass - self.mRPAREN() - - - - elif alt38 == 7: - # Congress.g:1:59: T__53 - pass - self.mT__53() - - - - elif alt38 == 8: - # Congress.g:1:65: T__54 - pass - self.mT__54() - - - - elif alt38 == 9: - # Congress.g:1:71: T__55 - pass - self.mT__55() - - - - elif alt38 == 10: - # Congress.g:1:77: T__56 - pass - self.mT__56() - - - - elif alt38 == 11: - # Congress.g:1:83: T__57 - pass - self.mT__57() - - - - elif alt38 == 12: - # Congress.g:1:89: T__58 - pass - self.mT__58() - - - - elif alt38 == 13: - # Congress.g:1:95: NEGATION - pass - self.mNEGATION() - - - - elif alt38 == 14: - # Congress.g:1:104: EQUAL - pass - self.mEQUAL() - - - - elif alt38 == 15: - # Congress.g:1:110: SIGN - pass - self.mSIGN() - - - - elif alt38 == 16: - # Congress.g:1:115: INT - pass - self.mINT() - - - - elif alt38 == 17: - # Congress.g:1:119: FLOAT - pass - self.mFLOAT() - - - - elif alt38 == 18: - # Congress.g:1:125: STRING - pass - self.mSTRING() - - - - elif alt38 == 19: - # Congress.g:1:132: ID - pass - self.mID() - - - - elif alt38 == 20: - # Congress.g:1:135: COMMENT - pass - self.mCOMMENT() - - - - elif alt38 == 21: - # Congress.g:1:143: WS - pass - self.mWS() - - - - - - - - - # lookup tables for DFA #8 - - DFA8_eot = DFA.unpack( - "\3\uffff\1\6\1\uffff\1\6\1\uffff" - ) - - DFA8_eof = DFA.unpack( - "\7\uffff" - ) - - DFA8_min = DFA.unpack( - "\2\56\2\60\1\uffff\1\60\1\uffff" - ) - - DFA8_max = DFA.unpack( - "\1\71\1\145\1\71\1\145\1\uffff\1\145\1\uffff" - ) - - DFA8_accept = DFA.unpack( - "\4\uffff\1\2\1\uffff\1\1" - ) - - DFA8_special = DFA.unpack( - "\7\uffff" - ) - - - DFA8_transition = [ - DFA.unpack("\1\2\1\uffff\12\1"), - DFA.unpack("\1\3\1\uffff\12\1\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack("\12\5"), - DFA.unpack("\12\5\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack(""), - DFA.unpack("\12\5\13\uffff\1\4\37\uffff\1\4"), - DFA.unpack("") - ] - - # class definition for DFA #8 - - class DFA8(DFA): - pass - - - # lookup tables for DFA #23 - - DFA23_eot = DFA.unpack( - "\3\uffff\1\4\1\uffff" - ) - - DFA23_eof = DFA.unpack( - "\5\uffff" - ) - - DFA23_min = DFA.unpack( - "\2\56\1\uffff\1\60\1\uffff" - ) - - DFA23_max = DFA.unpack( - "\2\71\1\uffff\1\71\1\uffff" - ) - - DFA23_accept = DFA.unpack( - "\2\uffff\1\1\1\uffff\1\2" - ) - - DFA23_special = DFA.unpack( - "\5\uffff" - ) - - - DFA23_transition = [ - DFA.unpack("\1\2\1\uffff\12\1"), - DFA.unpack("\1\3\1\uffff\12\1"), - DFA.unpack(""), - DFA.unpack("\12\2"), - DFA.unpack("") - ] - - # class definition for DFA #23 - - class DFA23(DFA): - pass - - - # lookup tables for DFA #24 - - DFA24_eot = DFA.unpack( - "\4\uffff" - ) - - DFA24_eof = DFA.unpack( - "\4\uffff" - ) - - DFA24_min = DFA.unpack( - "\2\56\2\uffff" - ) - - DFA24_max = DFA.unpack( - "\1\71\1\145\2\uffff" - ) - - DFA24_accept = DFA.unpack( - "\2\uffff\1\2\1\1" - ) - - DFA24_special = DFA.unpack( - "\4\uffff" - ) - - - DFA24_transition = [ - DFA.unpack("\1\2\1\uffff\12\1"), - DFA.unpack("\1\2\1\uffff\12\1\13\uffff\1\3\37\uffff\1\3"), - DFA.unpack(""), - DFA.unpack("") - ] - - # class definition for DFA #24 - - class DFA24(DFA): - pass - - - # lookup tables for DFA #38 - - DFA38_eot = DFA.unpack( - "\1\uffff\1\35\5\uffff\1\36\1\uffff\5\31\3\uffff\2\46\1\31\1\uffff" - "\4\31\6\uffff\1\47\5\31\1\46\2\uffff\1\46\14\31\2\16\1\47\6\31\1" - "\101\1\31\1\103\1\uffff\1\104\2\uffff" - ) - - DFA38_eof = DFA.unpack( - "\105\uffff" - ) - - DFA38_min = DFA.unpack( - "\1\11\1\55\5\uffff\1\56\1\uffff\1\145\1\170\1\156\1\157\1\117\3" - "\uffff\2\56\1\42\1\uffff\4\42\6\uffff\1\56\1\154\1\145\1\163\1\164" - "\1\124\1\56\2\uffff\1\56\10\42\1\53\1\145\1\143\1\145\3\56\1\164" - "\1\165\1\162\1\145\2\164\1\56\1\145\1\56\1\uffff\1\56\2\uffff" - ) - - DFA38_max = DFA.unpack( - "\1\172\1\55\5\uffff\1\172\1\uffff\1\145\1\170\1\156\1\157\1\117" - "\3\uffff\2\145\1\142\1\uffff\2\162\1\142\1\47\6\uffff\1\172\1\154" - "\1\145\1\163\1\164\1\124\1\145\2\uffff\1\145\10\47\1\71\1\145\1" - "\143\1\145\3\172\1\164\1\165\1\162\1\145\2\164\1\172\1\145\1\172" - "\1\uffff\1\172\2\uffff" - ) - - DFA38_accept = DFA.unpack( - "\2\uffff\1\2\1\3\1\4\1\5\1\6\1\uffff\1\11\5\uffff\1\15\1\16\1\17" - "\3\uffff\1\22\4\uffff\1\23\1\24\1\25\1\1\1\10\1\7\7\uffff\1\20\1" - "\21\31\uffff\1\12\1\uffff\1\14\1\13" - ) - - DFA38_special = DFA.unpack( - "\105\uffff" - ) - - - DFA38_transition = [ - DFA.unpack("\2\33\2\uffff\1\33\22\uffff\1\33\1\16\1\24\1\32\3\uffff" - "\1\24\1\4\1\6\1\uffff\1\20\1\2\1\20\1\7\1\32\1\22\11\21\1\1\1\10" - "\1\uffff\1\17\3\uffff\1\31\1\26\13\31\1\15\3\31\1\27\2\31\1\30\5" - "\31\1\3\1\uffff\1\5\1\uffff\1\31\1\uffff\1\31\1\25\1\31\1\11\1\12" - "\3\31\1\13\4\31\1\14\3\31\1\23\2\31\1\30\5\31"), - DFA.unpack("\1\34"), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack("\1\31\1\uffff\12\37\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack(""), - DFA.unpack("\1\40"), - DFA.unpack("\1\41"), - DFA.unpack("\1\42"), - DFA.unpack("\1\43"), - DFA.unpack("\1\44"), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack("\1\47\1\uffff\12\45\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack("\1\47\1\uffff\1\50\11\47\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack("\1\24\4\uffff\1\24\32\uffff\1\52\37\uffff\1\51"), - DFA.unpack(""), - DFA.unpack("\1\24\4\uffff\1\24\52\uffff\1\54\37\uffff\1\53"), - DFA.unpack("\1\24\4\uffff\1\24\52\uffff\1\56\37\uffff\1\55"), - DFA.unpack("\1\24\4\uffff\1\24\32\uffff\1\60\37\uffff\1\57"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack("\1\31\1\uffff\12\37\7\uffff\4\31\1\61\25\31\4\uffff" - "\1\31\1\uffff\4\31\1\61\25\31"), - DFA.unpack("\1\62"), - DFA.unpack("\1\63"), - DFA.unpack("\1\64"), - DFA.unpack("\1\65"), - DFA.unpack("\1\66"), - DFA.unpack("\1\47\1\uffff\12\45\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack(""), - DFA.unpack(""), - DFA.unpack("\1\47\1\uffff\1\50\11\47\13\uffff\1\47\37\uffff\1\47"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\24\4\uffff\1\24"), - DFA.unpack("\1\47\1\uffff\1\47\2\uffff\12\67"), - DFA.unpack("\1\70"), - DFA.unpack("\1\71"), - DFA.unpack("\1\72"), - DFA.unpack("\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack("\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack("\1\31\1\uffff\12\67\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack("\1\73"), - DFA.unpack("\1\74"), - DFA.unpack("\1\75"), - DFA.unpack("\1\76"), - DFA.unpack("\1\77"), - DFA.unpack("\1\100"), - DFA.unpack("\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack("\1\102"), - DFA.unpack("\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack(""), - DFA.unpack("\1\31\1\uffff\12\31\7\uffff\32\31\4\uffff\1\31\1\uffff" - "\32\31"), - DFA.unpack(""), - DFA.unpack("") - ] - - # class definition for DFA #38 - - class DFA38(DFA): - pass - - - - - - -def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): - from antlr3.main import LexerMain - main = LexerMain(CongressLexer) - - main.stdin = stdin - main.stdout = stdout - main.stderr = stderr - main.execute(argv) - - - -if __name__ == '__main__': - main(sys.argv) diff --git a/congress/datalog/Python3/CongressParser.py b/congress/datalog/Python3/CongressParser.py deleted file mode 100644 index 38624d17..00000000 --- a/congress/datalog/Python3/CongressParser.py +++ /dev/null @@ -1,2726 +0,0 @@ -# $ANTLR 3.5.2 Congress.g 2015-11-02 17:04:43 - -import sys -from antlr3 import * - -from antlr3.tree import * - - - - -# for convenience in actions -HIDDEN = BaseRecognizer.HIDDEN - -# token types -EOF=-1 -T__53=53 -T__54=54 -T__55=55 -T__56=56 -T__57=57 -T__58=58 -AND=4 -ATOM=5 -BYTESTRPREFIX=6 -BYTES_CHAR_DQ=7 -BYTES_CHAR_SQ=8 -BYTES_ESC=9 -BYTES_TESC=10 -COLONMINUS=11 -COLUMN_NAME=12 -COLUMN_NUMBER=13 -COMMA=14 -COMMENT=15 -DIGIT=16 -EQUAL=17 -EVENT=18 -EXPONENT=19 -FLOAT=20 -FLOAT_EXP=21 -FLOAT_NO_EXP=22 -FLOAT_OBJ=23 -FRAC_PART=24 -HEX_DIGIT=25 -ID=26 -INT=27 -INTEGER_OBJ=28 -INT_PART=29 -LBRACKET=30 -LITERAL=31 -LPAREN=32 -MODAL=33 -NAMED_PARAM=34 -NEGATION=35 -NOT=36 -PROG=37 -RBRACKET=38 -RPAREN=39 -RULE=40 -SIGN=41 -SLBYTESTRING=42 -SLSTRING=43 -STRING=44 -STRING_ESC=45 -STRING_OBJ=46 -STRPREFIX=47 -STRUCTURED_NAME=48 -SYMBOL_OBJ=49 -THEORY=50 -VARIABLE=51 -WS=52 - -# token names -tokenNamesMap = { - 0: "", 1: "", 2: "", 3: "", - -1: "EOF", 53: "T__53", 54: "T__54", 55: "T__55", 56: "T__56", 57: "T__57", - 58: "T__58", 4: "AND", 5: "ATOM", 6: "BYTESTRPREFIX", 7: "BYTES_CHAR_DQ", - 8: "BYTES_CHAR_SQ", 9: "BYTES_ESC", 10: "BYTES_TESC", 11: "COLONMINUS", - 12: "COLUMN_NAME", 13: "COLUMN_NUMBER", 14: "COMMA", 15: "COMMENT", - 16: "DIGIT", 17: "EQUAL", 18: "EVENT", 19: "EXPONENT", 20: "FLOAT", - 21: "FLOAT_EXP", 22: "FLOAT_NO_EXP", 23: "FLOAT_OBJ", 24: "FRAC_PART", - 25: "HEX_DIGIT", 26: "ID", 27: "INT", 28: "INTEGER_OBJ", 29: "INT_PART", - 30: "LBRACKET", 31: "LITERAL", 32: "LPAREN", 33: "MODAL", 34: "NAMED_PARAM", - 35: "NEGATION", 36: "NOT", 37: "PROG", 38: "RBRACKET", 39: "RPAREN", - 40: "RULE", 41: "SIGN", 42: "SLBYTESTRING", 43: "SLSTRING", 44: "STRING", - 45: "STRING_ESC", 46: "STRING_OBJ", 47: "STRPREFIX", 48: "STRUCTURED_NAME", - 49: "SYMBOL_OBJ", 50: "THEORY", 51: "VARIABLE", 52: "WS" -} -Token.registerTokenNamesMap(tokenNamesMap) - -# token names -tokenNames = [ - "", "", "", "", - "AND", "ATOM", "BYTESTRPREFIX", "BYTES_CHAR_DQ", "BYTES_CHAR_SQ", "BYTES_ESC", - "BYTES_TESC", "COLONMINUS", "COLUMN_NAME", "COLUMN_NUMBER", "COMMA", - "COMMENT", "DIGIT", "EQUAL", "EVENT", "EXPONENT", "FLOAT", "FLOAT_EXP", - "FLOAT_NO_EXP", "FLOAT_OBJ", "FRAC_PART", "HEX_DIGIT", "ID", "INT", - "INTEGER_OBJ", "INT_PART", "LBRACKET", "LITERAL", "LPAREN", "MODAL", - "NAMED_PARAM", "NEGATION", "NOT", "PROG", "RBRACKET", "RPAREN", "RULE", - "SIGN", "SLBYTESTRING", "SLSTRING", "STRING", "STRING_ESC", "STRING_OBJ", - "STRPREFIX", "STRUCTURED_NAME", "SYMBOL_OBJ", "THEORY", "VARIABLE", - "WS", "'.'", "':'", "';'", "'delete'", "'execute'", "'insert'" -] - - - -class CongressParser(Parser): - grammarFileName = "Congress.g" - api_version = 1 - tokenNames = tokenNames - - def __init__(self, input, state=None, *args, **kwargs): - if state is None: - state = RecognizerSharedState() - - super().__init__(input, state, *args, **kwargs) - - self.dfa5 = self.DFA5( - self, 5, - eot = self.DFA5_eot, - eof = self.DFA5_eof, - min = self.DFA5_min, - max = self.DFA5_max, - accept = self.DFA5_accept, - special = self.DFA5_special, - transition = self.DFA5_transition - ) - - - - - self.delegates = [] - - self._adaptor = None - self.adaptor = CommonTreeAdaptor() - - - - def getTreeAdaptor(self): - return self._adaptor - - def setTreeAdaptor(self, adaptor): - self._adaptor = adaptor - - adaptor = property(getTreeAdaptor, setTreeAdaptor) - - - class prog_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "prog" - # Congress.g:58:1: prog : ( ( statement )+ EOF -> ^( THEORY ( statement )+ ) | EOF ); - def prog(self, ): - retval = self.prog_return() - retval.start = self.input.LT(1) - - - root_0 = None - - EOF2 = None - EOF3 = None - statement1 = None - - EOF2_tree = None - EOF3_tree = None - stream_EOF = RewriteRuleTokenStream(self._adaptor, "token EOF") - stream_statement = RewriteRuleSubtreeStream(self._adaptor, "rule statement") - try: - try: - # Congress.g:59:5: ( ( statement )+ EOF -> ^( THEORY ( statement )+ ) | EOF ) - alt2 = 2 - LA2_0 = self.input.LA(1) - - if (LA2_0 in {COMMENT, ID, NEGATION, 56, 57, 58}) : - alt2 = 1 - elif (LA2_0 == EOF) : - alt2 = 2 - else: - nvae = NoViableAltException("", 2, 0, self.input) - - raise nvae - - - if alt2 == 1: - # Congress.g:59:7: ( statement )+ EOF - pass - # Congress.g:59:7: ( statement )+ - cnt1 = 0 - while True: #loop1 - alt1 = 2 - LA1_0 = self.input.LA(1) - - if (LA1_0 in {COMMENT, ID, NEGATION, 56, 57, 58}) : - alt1 = 1 - - - if alt1 == 1: - # Congress.g:59:7: statement - pass - self._state.following.append(self.FOLLOW_statement_in_prog265) - statement1 = self.statement() - - self._state.following.pop() - stream_statement.add(statement1.tree) - - - - else: - if cnt1 >= 1: - break #loop1 - - eee = EarlyExitException(1, self.input) - raise eee - - cnt1 += 1 - - - EOF2 = self.match(self.input, EOF, self.FOLLOW_EOF_in_prog268) - stream_EOF.add(EOF2) - - - # AST Rewrite - # elements: statement - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 59:22: -> ^( THEORY ( statement )+ ) - # Congress.g:59:25: ^( THEORY ( statement )+ ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(THEORY, "THEORY") - , root_1) - - # Congress.g:59:34: ( statement )+ - if not (stream_statement.hasNext()): - raise RewriteEarlyExitException() - - while stream_statement.hasNext(): - self._adaptor.addChild(root_1, stream_statement.nextTree()) - - - stream_statement.reset() - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt2 == 2: - # Congress.g:60:7: EOF - pass - root_0 = self._adaptor.nil() - - - EOF3 = self.match(self.input, EOF, self.FOLLOW_EOF_in_prog285) - EOF3_tree = self._adaptor.createWithPayload(EOF3) - self._adaptor.addChild(root_0, EOF3_tree) - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "prog" - - - class statement_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "statement" - # Congress.g:65:1: statement : ( formula ( formula_terminator )? -> formula | COMMENT ); - def statement(self, ): - retval = self.statement_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMENT6 = None - formula4 = None - formula_terminator5 = None - - COMMENT6_tree = None - stream_formula_terminator = RewriteRuleSubtreeStream(self._adaptor, "rule formula_terminator") - stream_formula = RewriteRuleSubtreeStream(self._adaptor, "rule formula") - try: - try: - # Congress.g:66:5: ( formula ( formula_terminator )? -> formula | COMMENT ) - alt4 = 2 - LA4_0 = self.input.LA(1) - - if (LA4_0 in {ID, NEGATION, 56, 57, 58}) : - alt4 = 1 - elif (LA4_0 == COMMENT) : - alt4 = 2 - else: - nvae = NoViableAltException("", 4, 0, self.input) - - raise nvae - - - if alt4 == 1: - # Congress.g:66:7: formula ( formula_terminator )? - pass - self._state.following.append(self.FOLLOW_formula_in_statement304) - formula4 = self.formula() - - self._state.following.pop() - stream_formula.add(formula4.tree) - - - # Congress.g:66:15: ( formula_terminator )? - alt3 = 2 - LA3_0 = self.input.LA(1) - - if (LA3_0 in {53, 55}) : - alt3 = 1 - if alt3 == 1: - # Congress.g:66:15: formula_terminator - pass - self._state.following.append(self.FOLLOW_formula_terminator_in_statement306) - formula_terminator5 = self.formula_terminator() - - self._state.following.pop() - stream_formula_terminator.add(formula_terminator5.tree) - - - - - - # AST Rewrite - # elements: formula - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 66:35: -> formula - self._adaptor.addChild(root_0, stream_formula.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt4 == 2: - # Congress.g:67:7: COMMENT - pass - root_0 = self._adaptor.nil() - - - COMMENT6 = self.match(self.input, COMMENT, self.FOLLOW_COMMENT_in_statement319) - COMMENT6_tree = self._adaptor.createWithPayload(COMMENT6) - self._adaptor.addChild(root_0, COMMENT6_tree) - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "statement" - - - class formula_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "formula" - # Congress.g:70:1: formula : ( rule | fact | event ); - def formula(self, ): - retval = self.formula_return() - retval.start = self.input.LT(1) - - - root_0 = None - - rule7 = None - fact8 = None - event9 = None - - - try: - try: - # Congress.g:71:5: ( rule | fact | event ) - alt5 = 3 - alt5 = self.dfa5.predict(self.input) - if alt5 == 1: - # Congress.g:71:7: rule - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_rule_in_formula336) - rule7 = self.rule() - - self._state.following.pop() - self._adaptor.addChild(root_0, rule7.tree) - - - - elif alt5 == 2: - # Congress.g:72:7: fact - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_fact_in_formula344) - fact8 = self.fact() - - self._state.following.pop() - self._adaptor.addChild(root_0, fact8.tree) - - - - elif alt5 == 3: - # Congress.g:73:7: event - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_event_in_formula352) - event9 = self.event() - - self._state.following.pop() - self._adaptor.addChild(root_0, event9.tree) - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "formula" - - - class event_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "event" - # Congress.g:86:1: event : event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET -> ^( EVENT event_op rule ( STRING )? ) ; - def event(self, ): - retval = self.event_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LBRACKET11 = None - STRING14 = None - RBRACKET15 = None - event_op10 = None - rule12 = None - formula_terminator13 = None - - LBRACKET11_tree = None - STRING14_tree = None - RBRACKET15_tree = None - stream_LBRACKET = RewriteRuleTokenStream(self._adaptor, "token LBRACKET") - stream_RBRACKET = RewriteRuleTokenStream(self._adaptor, "token RBRACKET") - stream_STRING = RewriteRuleTokenStream(self._adaptor, "token STRING") - stream_formula_terminator = RewriteRuleSubtreeStream(self._adaptor, "rule formula_terminator") - stream_rule = RewriteRuleSubtreeStream(self._adaptor, "rule rule") - stream_event_op = RewriteRuleSubtreeStream(self._adaptor, "rule event_op") - try: - try: - # Congress.g:87:5: ( event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET -> ^( EVENT event_op rule ( STRING )? ) ) - # Congress.g:87:7: event_op LBRACKET rule ( formula_terminator STRING )? RBRACKET - pass - self._state.following.append(self.FOLLOW_event_op_in_event379) - event_op10 = self.event_op() - - self._state.following.pop() - stream_event_op.add(event_op10.tree) - - - LBRACKET11 = self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_event381) - stream_LBRACKET.add(LBRACKET11) - - - self._state.following.append(self.FOLLOW_rule_in_event383) - rule12 = self.rule() - - self._state.following.pop() - stream_rule.add(rule12.tree) - - - # Congress.g:87:30: ( formula_terminator STRING )? - alt6 = 2 - LA6_0 = self.input.LA(1) - - if (LA6_0 in {53, 55}) : - alt6 = 1 - if alt6 == 1: - # Congress.g:87:31: formula_terminator STRING - pass - self._state.following.append(self.FOLLOW_formula_terminator_in_event386) - formula_terminator13 = self.formula_terminator() - - self._state.following.pop() - stream_formula_terminator.add(formula_terminator13.tree) - - - STRING14 = self.match(self.input, STRING, self.FOLLOW_STRING_in_event388) - stream_STRING.add(STRING14) - - - - - - RBRACKET15 = self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_event392) - stream_RBRACKET.add(RBRACKET15) - - - # AST Rewrite - # elements: STRING, rule, event_op - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 87:68: -> ^( EVENT event_op rule ( STRING )? ) - # Congress.g:87:71: ^( EVENT event_op rule ( STRING )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(EVENT, "EVENT") - , root_1) - - self._adaptor.addChild(root_1, stream_event_op.nextTree()) - - self._adaptor.addChild(root_1, stream_rule.nextTree()) - - # Congress.g:87:93: ( STRING )? - if stream_STRING.hasNext(): - self._adaptor.addChild(root_1, - stream_STRING.nextNode() - ) - - - stream_STRING.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "event" - - - class event_op_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "event_op" - # Congress.g:90:1: event_op : ( 'insert' | 'delete' ); - def event_op(self, ): - retval = self.event_op_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set16 = None - - set16_tree = None - - try: - try: - # Congress.g:91:5: ( 'insert' | 'delete' ) - # Congress.g: - pass - root_0 = self._adaptor.nil() - - - set16 = self.input.LT(1) - - if self.input.LA(1) in {56, 58}: - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set16)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "event_op" - - - class formula_terminator_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "formula_terminator" - # Congress.g:95:1: formula_terminator : ( ';' | '.' ); - def formula_terminator(self, ): - retval = self.formula_terminator_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set17 = None - - set17_tree = None - - try: - try: - # Congress.g:96:5: ( ';' | '.' ) - # Congress.g: - pass - root_0 = self._adaptor.nil() - - - set17 = self.input.LT(1) - - if self.input.LA(1) in {53, 55}: - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set17)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "formula_terminator" - - - class rule_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "rule" - # Congress.g:100:1: rule : literal_list COLONMINUS literal_list -> ^( RULE literal_list literal_list ) ; - def rule(self, ): - retval = self.rule_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COLONMINUS19 = None - literal_list18 = None - literal_list20 = None - - COLONMINUS19_tree = None - stream_COLONMINUS = RewriteRuleTokenStream(self._adaptor, "token COLONMINUS") - stream_literal_list = RewriteRuleSubtreeStream(self._adaptor, "rule literal_list") - try: - try: - # Congress.g:101:5: ( literal_list COLONMINUS literal_list -> ^( RULE literal_list literal_list ) ) - # Congress.g:101:7: literal_list COLONMINUS literal_list - pass - self._state.following.append(self.FOLLOW_literal_list_in_rule472) - literal_list18 = self.literal_list() - - self._state.following.pop() - stream_literal_list.add(literal_list18.tree) - - - COLONMINUS19 = self.match(self.input, COLONMINUS, self.FOLLOW_COLONMINUS_in_rule474) - stream_COLONMINUS.add(COLONMINUS19) - - - self._state.following.append(self.FOLLOW_literal_list_in_rule476) - literal_list20 = self.literal_list() - - self._state.following.pop() - stream_literal_list.add(literal_list20.tree) - - - # AST Rewrite - # elements: literal_list, literal_list - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 101:44: -> ^( RULE literal_list literal_list ) - # Congress.g:101:47: ^( RULE literal_list literal_list ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(RULE, "RULE") - , root_1) - - self._adaptor.addChild(root_1, stream_literal_list.nextTree()) - - self._adaptor.addChild(root_1, stream_literal_list.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "rule" - - - class literal_list_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "literal_list" - # Congress.g:104:1: literal_list : literal ( COMMA literal )* -> ^( AND ( literal )+ ) ; - def literal_list(self, ): - retval = self.literal_list_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMA22 = None - literal21 = None - literal23 = None - - COMMA22_tree = None - stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA") - stream_literal = RewriteRuleSubtreeStream(self._adaptor, "rule literal") - try: - try: - # Congress.g:105:5: ( literal ( COMMA literal )* -> ^( AND ( literal )+ ) ) - # Congress.g:105:7: literal ( COMMA literal )* - pass - self._state.following.append(self.FOLLOW_literal_in_literal_list503) - literal21 = self.literal() - - self._state.following.pop() - stream_literal.add(literal21.tree) - - - # Congress.g:105:15: ( COMMA literal )* - while True: #loop7 - alt7 = 2 - LA7_0 = self.input.LA(1) - - if (LA7_0 == COMMA) : - alt7 = 1 - - - if alt7 == 1: - # Congress.g:105:16: COMMA literal - pass - COMMA22 = self.match(self.input, COMMA, self.FOLLOW_COMMA_in_literal_list506) - stream_COMMA.add(COMMA22) - - - self._state.following.append(self.FOLLOW_literal_in_literal_list508) - literal23 = self.literal() - - self._state.following.pop() - stream_literal.add(literal23.tree) - - - - else: - break #loop7 - - - # AST Rewrite - # elements: literal - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 105:32: -> ^( AND ( literal )+ ) - # Congress.g:105:35: ^( AND ( literal )+ ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(AND, "AND") - , root_1) - - # Congress.g:105:41: ( literal )+ - if not (stream_literal.hasNext()): - raise RewriteEarlyExitException() - - while stream_literal.hasNext(): - self._adaptor.addChild(root_1, stream_literal.nextTree()) - - - stream_literal.reset() - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "literal_list" - - - class literal_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "literal" - # Congress.g:108:1: literal : ( fact -> fact | NEGATION fact -> ^( NOT fact ) ); - def literal(self, ): - retval = self.literal_return() - retval.start = self.input.LT(1) - - - root_0 = None - - NEGATION25 = None - fact24 = None - fact26 = None - - NEGATION25_tree = None - stream_NEGATION = RewriteRuleTokenStream(self._adaptor, "token NEGATION") - stream_fact = RewriteRuleSubtreeStream(self._adaptor, "rule fact") - try: - try: - # Congress.g:109:5: ( fact -> fact | NEGATION fact -> ^( NOT fact ) ) - alt8 = 2 - LA8_0 = self.input.LA(1) - - if (LA8_0 in {ID, 56, 57, 58}) : - alt8 = 1 - elif (LA8_0 == NEGATION) : - alt8 = 2 - else: - nvae = NoViableAltException("", 8, 0, self.input) - - raise nvae - - - if alt8 == 1: - # Congress.g:109:7: fact - pass - self._state.following.append(self.FOLLOW_fact_in_literal536) - fact24 = self.fact() - - self._state.following.pop() - stream_fact.add(fact24.tree) - - - # AST Rewrite - # elements: fact - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 109:23: -> fact - self._adaptor.addChild(root_0, stream_fact.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt8 == 2: - # Congress.g:110:7: NEGATION fact - pass - NEGATION25 = self.match(self.input, NEGATION, self.FOLLOW_NEGATION_in_literal559) - stream_NEGATION.add(NEGATION25) - - - self._state.following.append(self.FOLLOW_fact_in_literal561) - fact26 = self.fact() - - self._state.following.pop() - stream_fact.add(fact26.tree) - - - # AST Rewrite - # elements: fact - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 110:23: -> ^( NOT fact ) - # Congress.g:110:26: ^( NOT fact ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(NOT, "NOT") - , root_1) - - self._adaptor.addChild(root_1, stream_fact.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "literal" - - - class fact_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "fact" - # Congress.g:115:1: fact : ( atom | modal_op LBRACKET atom RBRACKET -> ^( MODAL modal_op atom ) ); - def fact(self, ): - retval = self.fact_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LBRACKET29 = None - RBRACKET31 = None - atom27 = None - modal_op28 = None - atom30 = None - - LBRACKET29_tree = None - RBRACKET31_tree = None - stream_LBRACKET = RewriteRuleTokenStream(self._adaptor, "token LBRACKET") - stream_RBRACKET = RewriteRuleTokenStream(self._adaptor, "token RBRACKET") - stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom") - stream_modal_op = RewriteRuleSubtreeStream(self._adaptor, "rule modal_op") - try: - try: - # Congress.g:116:5: ( atom | modal_op LBRACKET atom RBRACKET -> ^( MODAL modal_op atom ) ) - alt9 = 2 - LA9_0 = self.input.LA(1) - - if (LA9_0 == ID) : - alt9 = 1 - elif (LA9_0 in {56, 57, 58}) : - alt9 = 2 - else: - nvae = NoViableAltException("", 9, 0, self.input) - - raise nvae - - - if alt9 == 1: - # Congress.g:116:7: atom - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_atom_in_fact590) - atom27 = self.atom() - - self._state.following.pop() - self._adaptor.addChild(root_0, atom27.tree) - - - - elif alt9 == 2: - # Congress.g:117:7: modal_op LBRACKET atom RBRACKET - pass - self._state.following.append(self.FOLLOW_modal_op_in_fact598) - modal_op28 = self.modal_op() - - self._state.following.pop() - stream_modal_op.add(modal_op28.tree) - - - LBRACKET29 = self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_fact600) - stream_LBRACKET.add(LBRACKET29) - - - self._state.following.append(self.FOLLOW_atom_in_fact602) - atom30 = self.atom() - - self._state.following.pop() - stream_atom.add(atom30.tree) - - - RBRACKET31 = self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_fact604) - stream_RBRACKET.add(RBRACKET31) - - - # AST Rewrite - # elements: atom, modal_op - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 117:39: -> ^( MODAL modal_op atom ) - # Congress.g:117:42: ^( MODAL modal_op atom ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(MODAL, "MODAL") - , root_1) - - self._adaptor.addChild(root_1, stream_modal_op.nextTree()) - - self._adaptor.addChild(root_1, stream_atom.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "fact" - - - class modal_op_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "modal_op" - # Congress.g:120:1: modal_op : ( 'execute' | 'insert' | 'delete' ); - def modal_op(self, ): - retval = self.modal_op_return() - retval.start = self.input.LT(1) - - - root_0 = None - - set32 = None - - set32_tree = None - - try: - try: - # Congress.g:121:5: ( 'execute' | 'insert' | 'delete' ) - # Congress.g: - pass - root_0 = self._adaptor.nil() - - - set32 = self.input.LT(1) - - if self.input.LA(1) in {56, 57, 58}: - self.input.consume() - self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set32)) - - self._state.errorRecovery = False - - - else: - mse = MismatchedSetException(None, self.input) - raise mse - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "modal_op" - - - class atom_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "atom" - # Congress.g:126:1: atom : relation_constant ( LPAREN ( parameter_list )? RPAREN )? -> ^( ATOM relation_constant ( parameter_list )? ) ; - def atom(self, ): - retval = self.atom_return() - retval.start = self.input.LT(1) - - - root_0 = None - - LPAREN34 = None - RPAREN36 = None - relation_constant33 = None - parameter_list35 = None - - LPAREN34_tree = None - RPAREN36_tree = None - stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN") - stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN") - stream_relation_constant = RewriteRuleSubtreeStream(self._adaptor, "rule relation_constant") - stream_parameter_list = RewriteRuleSubtreeStream(self._adaptor, "rule parameter_list") - try: - try: - # Congress.g:127:5: ( relation_constant ( LPAREN ( parameter_list )? RPAREN )? -> ^( ATOM relation_constant ( parameter_list )? ) ) - # Congress.g:127:7: relation_constant ( LPAREN ( parameter_list )? RPAREN )? - pass - self._state.following.append(self.FOLLOW_relation_constant_in_atom664) - relation_constant33 = self.relation_constant() - - self._state.following.pop() - stream_relation_constant.add(relation_constant33.tree) - - - # Congress.g:127:25: ( LPAREN ( parameter_list )? RPAREN )? - alt11 = 2 - LA11_0 = self.input.LA(1) - - if (LA11_0 == LPAREN) : - alt11 = 1 - if alt11 == 1: - # Congress.g:127:26: LPAREN ( parameter_list )? RPAREN - pass - LPAREN34 = self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_atom667) - stream_LPAREN.add(LPAREN34) - - - # Congress.g:127:33: ( parameter_list )? - alt10 = 2 - LA10_0 = self.input.LA(1) - - if (LA10_0 in {FLOAT, ID, INT, STRING}) : - alt10 = 1 - if alt10 == 1: - # Congress.g:127:33: parameter_list - pass - self._state.following.append(self.FOLLOW_parameter_list_in_atom669) - parameter_list35 = self.parameter_list() - - self._state.following.pop() - stream_parameter_list.add(parameter_list35.tree) - - - - - - RPAREN36 = self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_atom672) - stream_RPAREN.add(RPAREN36) - - - - - - # AST Rewrite - # elements: parameter_list, relation_constant - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 127:58: -> ^( ATOM relation_constant ( parameter_list )? ) - # Congress.g:127:61: ^( ATOM relation_constant ( parameter_list )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(ATOM, "ATOM") - , root_1) - - self._adaptor.addChild(root_1, stream_relation_constant.nextTree()) - - # Congress.g:127:86: ( parameter_list )? - if stream_parameter_list.hasNext(): - self._adaptor.addChild(root_1, stream_parameter_list.nextTree()) - - - stream_parameter_list.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "atom" - - - class parameter_list_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "parameter_list" - # Congress.g:130:1: parameter_list : parameter ( COMMA parameter )* -> ( parameter )+ ; - def parameter_list(self, ): - retval = self.parameter_list_return() - retval.start = self.input.LT(1) - - - root_0 = None - - COMMA38 = None - parameter37 = None - parameter39 = None - - COMMA38_tree = None - stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA") - stream_parameter = RewriteRuleSubtreeStream(self._adaptor, "rule parameter") - try: - try: - # Congress.g:131:5: ( parameter ( COMMA parameter )* -> ( parameter )+ ) - # Congress.g:131:7: parameter ( COMMA parameter )* - pass - self._state.following.append(self.FOLLOW_parameter_in_parameter_list702) - parameter37 = self.parameter() - - self._state.following.pop() - stream_parameter.add(parameter37.tree) - - - # Congress.g:131:17: ( COMMA parameter )* - while True: #loop12 - alt12 = 2 - LA12_0 = self.input.LA(1) - - if (LA12_0 == COMMA) : - alt12 = 1 - - - if alt12 == 1: - # Congress.g:131:18: COMMA parameter - pass - COMMA38 = self.match(self.input, COMMA, self.FOLLOW_COMMA_in_parameter_list705) - stream_COMMA.add(COMMA38) - - - self._state.following.append(self.FOLLOW_parameter_in_parameter_list707) - parameter39 = self.parameter() - - self._state.following.pop() - stream_parameter.add(parameter39.tree) - - - - else: - break #loop12 - - - # AST Rewrite - # elements: parameter - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 131:36: -> ( parameter )+ - # Congress.g:131:39: ( parameter )+ - if not (stream_parameter.hasNext()): - raise RewriteEarlyExitException() - - while stream_parameter.hasNext(): - self._adaptor.addChild(root_0, stream_parameter.nextTree()) - - - stream_parameter.reset() - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "parameter_list" - - - class parameter_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "parameter" - # Congress.g:134:1: parameter : ( term -> term | column_ref EQUAL term -> ^( NAMED_PARAM column_ref term ) ); - def parameter(self, ): - retval = self.parameter_return() - retval.start = self.input.LT(1) - - - root_0 = None - - EQUAL42 = None - term40 = None - column_ref41 = None - term43 = None - - EQUAL42_tree = None - stream_EQUAL = RewriteRuleTokenStream(self._adaptor, "token EQUAL") - stream_term = RewriteRuleSubtreeStream(self._adaptor, "rule term") - stream_column_ref = RewriteRuleSubtreeStream(self._adaptor, "rule column_ref") - try: - try: - # Congress.g:135:5: ( term -> term | column_ref EQUAL term -> ^( NAMED_PARAM column_ref term ) ) - alt13 = 2 - LA13 = self.input.LA(1) - if LA13 in {INT}: - LA13_1 = self.input.LA(2) - - if (LA13_1 in {COMMA, RPAREN}) : - alt13 = 1 - elif (LA13_1 == EQUAL) : - alt13 = 2 - else: - nvae = NoViableAltException("", 13, 1, self.input) - - raise nvae - - - elif LA13 in {FLOAT, STRING}: - alt13 = 1 - elif LA13 in {ID}: - LA13_3 = self.input.LA(2) - - if (LA13_3 in {COMMA, RPAREN}) : - alt13 = 1 - elif (LA13_3 == EQUAL) : - alt13 = 2 - else: - nvae = NoViableAltException("", 13, 3, self.input) - - raise nvae - - - else: - nvae = NoViableAltException("", 13, 0, self.input) - - raise nvae - - - if alt13 == 1: - # Congress.g:135:7: term - pass - self._state.following.append(self.FOLLOW_term_in_parameter731) - term40 = self.term() - - self._state.following.pop() - stream_term.add(term40.tree) - - - # AST Rewrite - # elements: term - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 135:12: -> term - self._adaptor.addChild(root_0, stream_term.nextTree()) - - - - - retval.tree = root_0 - - - - - elif alt13 == 2: - # Congress.g:136:7: column_ref EQUAL term - pass - self._state.following.append(self.FOLLOW_column_ref_in_parameter743) - column_ref41 = self.column_ref() - - self._state.following.pop() - stream_column_ref.add(column_ref41.tree) - - - EQUAL42 = self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_parameter745) - stream_EQUAL.add(EQUAL42) - - - self._state.following.append(self.FOLLOW_term_in_parameter747) - term43 = self.term() - - self._state.following.pop() - stream_term.add(term43.tree) - - - # AST Rewrite - # elements: column_ref, term - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 136:29: -> ^( NAMED_PARAM column_ref term ) - # Congress.g:136:32: ^( NAMED_PARAM column_ref term ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(NAMED_PARAM, "NAMED_PARAM") - , root_1) - - self._adaptor.addChild(root_1, stream_column_ref.nextTree()) - - self._adaptor.addChild(root_1, stream_term.nextTree()) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "parameter" - - - class column_ref_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "column_ref" - # Congress.g:139:1: column_ref : ( ID -> ^( COLUMN_NAME ID ) | INT -> ^( COLUMN_NUMBER INT ) ); - def column_ref(self, ): - retval = self.column_ref_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID44 = None - INT45 = None - - ID44_tree = None - INT45_tree = None - stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT") - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - - try: - try: - # Congress.g:140:5: ( ID -> ^( COLUMN_NAME ID ) | INT -> ^( COLUMN_NUMBER INT ) ) - alt14 = 2 - LA14_0 = self.input.LA(1) - - if (LA14_0 == ID) : - alt14 = 1 - elif (LA14_0 == INT) : - alt14 = 2 - else: - nvae = NoViableAltException("", 14, 0, self.input) - - raise nvae - - - if alt14 == 1: - # Congress.g:140:7: ID - pass - ID44 = self.match(self.input, ID, self.FOLLOW_ID_in_column_ref774) - stream_ID.add(ID44) - - - # AST Rewrite - # elements: ID - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 140:12: -> ^( COLUMN_NAME ID ) - # Congress.g:140:16: ^( COLUMN_NAME ID ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(COLUMN_NAME, "COLUMN_NAME") - , root_1) - - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt14 == 2: - # Congress.g:141:7: INT - pass - INT45 = self.match(self.input, INT, self.FOLLOW_INT_in_column_ref793) - stream_INT.add(INT45) - - - # AST Rewrite - # elements: INT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 141:12: -> ^( COLUMN_NUMBER INT ) - # Congress.g:141:16: ^( COLUMN_NUMBER INT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(COLUMN_NUMBER, "COLUMN_NUMBER") - , root_1) - - self._adaptor.addChild(root_1, - stream_INT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "column_ref" - - - class term_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "term" - # Congress.g:144:1: term : ( object_constant | variable ); - def term(self, ): - retval = self.term_return() - retval.start = self.input.LT(1) - - - root_0 = None - - object_constant46 = None - variable47 = None - - - try: - try: - # Congress.g:145:5: ( object_constant | variable ) - alt15 = 2 - LA15_0 = self.input.LA(1) - - if (LA15_0 in {FLOAT, INT, STRING}) : - alt15 = 1 - elif (LA15_0 == ID) : - alt15 = 2 - else: - nvae = NoViableAltException("", 15, 0, self.input) - - raise nvae - - - if alt15 == 1: - # Congress.g:145:7: object_constant - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_object_constant_in_term820) - object_constant46 = self.object_constant() - - self._state.following.pop() - self._adaptor.addChild(root_0, object_constant46.tree) - - - - elif alt15 == 2: - # Congress.g:146:7: variable - pass - root_0 = self._adaptor.nil() - - - self._state.following.append(self.FOLLOW_variable_in_term828) - variable47 = self.variable() - - self._state.following.pop() - self._adaptor.addChild(root_0, variable47.tree) - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "term" - - - class object_constant_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "object_constant" - # Congress.g:149:1: object_constant : ( INT -> ^( INTEGER_OBJ INT ) | FLOAT -> ^( FLOAT_OBJ FLOAT ) | STRING -> ^( STRING_OBJ STRING ) ); - def object_constant(self, ): - retval = self.object_constant_return() - retval.start = self.input.LT(1) - - - root_0 = None - - INT48 = None - FLOAT49 = None - STRING50 = None - - INT48_tree = None - FLOAT49_tree = None - STRING50_tree = None - stream_FLOAT = RewriteRuleTokenStream(self._adaptor, "token FLOAT") - stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT") - stream_STRING = RewriteRuleTokenStream(self._adaptor, "token STRING") - - try: - try: - # Congress.g:150:5: ( INT -> ^( INTEGER_OBJ INT ) | FLOAT -> ^( FLOAT_OBJ FLOAT ) | STRING -> ^( STRING_OBJ STRING ) ) - alt16 = 3 - LA16 = self.input.LA(1) - if LA16 in {INT}: - alt16 = 1 - elif LA16 in {FLOAT}: - alt16 = 2 - elif LA16 in {STRING}: - alt16 = 3 - else: - nvae = NoViableAltException("", 16, 0, self.input) - - raise nvae - - - if alt16 == 1: - # Congress.g:150:7: INT - pass - INT48 = self.match(self.input, INT, self.FOLLOW_INT_in_object_constant845) - stream_INT.add(INT48) - - - # AST Rewrite - # elements: INT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 150:16: -> ^( INTEGER_OBJ INT ) - # Congress.g:150:19: ^( INTEGER_OBJ INT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(INTEGER_OBJ, "INTEGER_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_INT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt16 == 2: - # Congress.g:151:7: FLOAT - pass - FLOAT49 = self.match(self.input, FLOAT, self.FOLLOW_FLOAT_in_object_constant866) - stream_FLOAT.add(FLOAT49) - - - # AST Rewrite - # elements: FLOAT - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 151:16: -> ^( FLOAT_OBJ FLOAT ) - # Congress.g:151:19: ^( FLOAT_OBJ FLOAT ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(FLOAT_OBJ, "FLOAT_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_FLOAT.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - elif alt16 == 3: - # Congress.g:152:7: STRING - pass - STRING50 = self.match(self.input, STRING, self.FOLLOW_STRING_in_object_constant885) - stream_STRING.add(STRING50) - - - # AST Rewrite - # elements: STRING - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 152:16: -> ^( STRING_OBJ STRING ) - # Congress.g:152:19: ^( STRING_OBJ STRING ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(STRING_OBJ, "STRING_OBJ") - , root_1) - - self._adaptor.addChild(root_1, - stream_STRING.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "object_constant" - - - class variable_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "variable" - # Congress.g:155:1: variable : ID -> ^( VARIABLE ID ) ; - def variable(self, ): - retval = self.variable_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID51 = None - - ID51_tree = None - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - - try: - try: - # Congress.g:156:5: ( ID -> ^( VARIABLE ID ) ) - # Congress.g:156:7: ID - pass - ID51 = self.match(self.input, ID, self.FOLLOW_ID_in_variable912) - stream_ID.add(ID51) - - - # AST Rewrite - # elements: ID - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 156:10: -> ^( VARIABLE ID ) - # Congress.g:156:13: ^( VARIABLE ID ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(VARIABLE, "VARIABLE") - , root_1) - - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "variable" - - - class relation_constant_return(ParserRuleReturnScope): - def __init__(self): - super().__init__() - - self.tree = None - - - - - - # $ANTLR start "relation_constant" - # Congress.g:159:1: relation_constant : ID ( ':' ID )* ( SIGN )? -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) ; - def relation_constant(self, ): - retval = self.relation_constant_return() - retval.start = self.input.LT(1) - - - root_0 = None - - ID52 = None - char_literal53 = None - ID54 = None - SIGN55 = None - - ID52_tree = None - char_literal53_tree = None - ID54_tree = None - SIGN55_tree = None - stream_SIGN = RewriteRuleTokenStream(self._adaptor, "token SIGN") - stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID") - stream_54 = RewriteRuleTokenStream(self._adaptor, "token 54") - - try: - try: - # Congress.g:160:5: ( ID ( ':' ID )* ( SIGN )? -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) ) - # Congress.g:160:7: ID ( ':' ID )* ( SIGN )? - pass - ID52 = self.match(self.input, ID, self.FOLLOW_ID_in_relation_constant937) - stream_ID.add(ID52) - - - # Congress.g:160:10: ( ':' ID )* - while True: #loop17 - alt17 = 2 - LA17_0 = self.input.LA(1) - - if (LA17_0 == 54) : - alt17 = 1 - - - if alt17 == 1: - # Congress.g:160:11: ':' ID - pass - char_literal53 = self.match(self.input, 54, self.FOLLOW_54_in_relation_constant940) - stream_54.add(char_literal53) - - - ID54 = self.match(self.input, ID, self.FOLLOW_ID_in_relation_constant942) - stream_ID.add(ID54) - - - - else: - break #loop17 - - - # Congress.g:160:20: ( SIGN )? - alt18 = 2 - LA18_0 = self.input.LA(1) - - if (LA18_0 == SIGN) : - alt18 = 1 - if alt18 == 1: - # Congress.g:160:20: SIGN - pass - SIGN55 = self.match(self.input, SIGN, self.FOLLOW_SIGN_in_relation_constant946) - stream_SIGN.add(SIGN55) - - - - - - # AST Rewrite - # elements: ID, SIGN - # token labels: - # rule labels: retval - # token list labels: - # rule list labels: - # wildcard labels: - retval.tree = root_0 - if retval is not None: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree) - else: - stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) - - - root_0 = self._adaptor.nil() - # 160:26: -> ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) - # Congress.g:160:29: ^( STRUCTURED_NAME ( ID )+ ( SIGN )? ) - root_1 = self._adaptor.nil() - root_1 = self._adaptor.becomeRoot( - self._adaptor.createFromType(STRUCTURED_NAME, "STRUCTURED_NAME") - , root_1) - - # Congress.g:160:47: ( ID )+ - if not (stream_ID.hasNext()): - raise RewriteEarlyExitException() - - while stream_ID.hasNext(): - self._adaptor.addChild(root_1, - stream_ID.nextNode() - ) - - - stream_ID.reset() - - # Congress.g:160:51: ( SIGN )? - if stream_SIGN.hasNext(): - self._adaptor.addChild(root_1, - stream_SIGN.nextNode() - ) - - - stream_SIGN.reset(); - - self._adaptor.addChild(root_0, root_1) - - - - - retval.tree = root_0 - - - - - - retval.stop = self.input.LT(-1) - - - retval.tree = self._adaptor.rulePostProcessing(root_0) - self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) - - - - except RecognitionException as re: - self.reportError(re) - self.recover(self.input, re) - retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) - - finally: - pass - return retval - - # $ANTLR end "relation_constant" - - - - # lookup tables for DFA #5 - - DFA5_eot = DFA.unpack( - "\124\uffff" - ) - - DFA5_eof = DFA.unpack( - "\1\uffff\1\10\4\uffff\1\10\4\uffff\1\10\4\uffff\1\10\10\uffff\1" - "\10\72\uffff" - ) - - DFA5_min = DFA.unpack( - "\1\32\1\13\1\36\1\uffff\1\36\1\32\1\13\1\24\1\uffff\2\32\1\13\4" - "\16\2\13\1\uffff\1\40\2\24\1\32\1\13\1\24\1\13\1\32\1\40\1\24\10" - "\16\1\13\4\16\1\13\1\40\4\16\1\46\5\24\24\16\2\24\10\16" - ) - - DFA5_max = DFA.unpack( - "\2\72\1\36\1\uffff\1\36\1\32\1\72\1\54\1\uffff\1\72\1\32\1\72\4" - "\47\1\72\1\66\1\uffff\1\66\2\54\1\32\1\46\1\54\1\72\1\32\1\46\1" - "\54\10\47\1\66\4\47\1\46\1\66\4\47\1\46\5\54\24\47\2\54\10\47" - ) - - DFA5_accept = DFA.unpack( - "\3\uffff\1\1\4\uffff\1\2\11\uffff\1\3\101\uffff" - ) - - DFA5_special = DFA.unpack( - "\124\uffff" - ) - - - DFA5_transition = [ - DFA.unpack("\1\1\10\uffff\1\3\24\uffff\1\2\1\4\1\2"), - DFA.unpack("\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - "\1\10\5\uffff\1\6\13\uffff\1\10\1\5\4\10"), - DFA.unpack("\1\11"), - DFA.unpack(""), - DFA.unpack("\1\12"), - DFA.unpack("\1\13"), - DFA.unpack("\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - "\1\10\21\uffff\1\10\1\uffff\4\10"), - DFA.unpack("\1\15\5\uffff\1\17\1\14\13\uffff\1\20\4\uffff\1\16"), - DFA.unpack(""), - DFA.unpack("\1\21\10\uffff\1\22\24\uffff\3\22"), - DFA.unpack("\1\23"), - DFA.unpack("\1\3\2\uffff\1\3\1\10\12\uffff\1\10\5\uffff\1\7\2\uffff" - "\1\10\5\uffff\1\6\13\uffff\1\10\1\5\4\10"), - DFA.unpack("\1\24\2\uffff\1\25\25\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\2\uffff\1\25\25\uffff\1\20"), - DFA.unpack("\1\3\2\uffff\1\3\1\10\12\uffff\1\10\10\uffff\1\10\21" - "\uffff\1\10\1\uffff\4\10"), - DFA.unpack("\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31\2\uffff" - "\1\27\14\uffff\1\26"), - DFA.unpack(""), - DFA.unpack("\1\34\5\uffff\1\31\2\uffff\1\33\14\uffff\1\32"), - DFA.unpack("\1\36\5\uffff\1\40\1\35\20\uffff\1\37"), - DFA.unpack("\1\42\5\uffff\1\44\1\41\20\uffff\1\43"), - DFA.unpack("\1\45"), - DFA.unpack("\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31"), - DFA.unpack("\1\47\5\uffff\1\51\1\46\13\uffff\1\52\4\uffff\1\50"), - DFA.unpack("\1\3\2\uffff\1\3\1\10\12\uffff\1\10\10\uffff\1\10\21" - "\uffff\1\10\1\uffff\4\10"), - DFA.unpack("\1\53"), - DFA.unpack("\1\34\5\uffff\1\31"), - DFA.unpack("\1\55\5\uffff\1\57\1\54\13\uffff\1\60\4\uffff\1\56"), - DFA.unpack("\1\24\2\uffff\1\61\25\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\2\uffff\1\61\25\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\22\2\uffff\1\22\21\uffff\1\30\5\uffff\1\31\2\uffff" - "\1\27\14\uffff\1\26"), - DFA.unpack("\1\62\2\uffff\1\63\25\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\2\uffff\1\63\25\uffff\1\52"), - DFA.unpack("\1\22\2\uffff\1\22\27\uffff\1\31"), - DFA.unpack("\1\34\5\uffff\1\31\2\uffff\1\33\14\uffff\1\32"), - DFA.unpack("\1\64\2\uffff\1\65\25\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\2\uffff\1\65\25\uffff\1\60"), - DFA.unpack("\1\31"), - DFA.unpack("\1\67\5\uffff\1\71\1\66\20\uffff\1\70"), - DFA.unpack("\1\73\5\uffff\1\75\1\72\20\uffff\1\74"), - DFA.unpack("\1\77\5\uffff\1\101\1\76\20\uffff\1\100"), - DFA.unpack("\1\103\5\uffff\1\105\1\102\20\uffff\1\104"), - DFA.unpack("\1\107\5\uffff\1\111\1\106\20\uffff\1\110"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\24\30\uffff\1\20"), - DFA.unpack("\1\62\2\uffff\1\112\25\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\2\uffff\1\112\25\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\64\2\uffff\1\113\25\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\2\uffff\1\113\25\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\115\5\uffff\1\117\1\114\20\uffff\1\116"), - DFA.unpack("\1\121\5\uffff\1\123\1\120\20\uffff\1\122"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\62\30\uffff\1\52"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60"), - DFA.unpack("\1\64\30\uffff\1\60") - ] - - # class definition for DFA #5 - - class DFA5(DFA): - pass - - - - - FOLLOW_statement_in_prog265 = frozenset([15, 26, 35, 56, 57, 58]) - FOLLOW_EOF_in_prog268 = frozenset([1]) - FOLLOW_EOF_in_prog285 = frozenset([1]) - FOLLOW_formula_in_statement304 = frozenset([1, 53, 55]) - FOLLOW_formula_terminator_in_statement306 = frozenset([1]) - FOLLOW_COMMENT_in_statement319 = frozenset([1]) - FOLLOW_rule_in_formula336 = frozenset([1]) - FOLLOW_fact_in_formula344 = frozenset([1]) - FOLLOW_event_in_formula352 = frozenset([1]) - FOLLOW_event_op_in_event379 = frozenset([30]) - FOLLOW_LBRACKET_in_event381 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_rule_in_event383 = frozenset([38, 53, 55]) - FOLLOW_formula_terminator_in_event386 = frozenset([44]) - FOLLOW_STRING_in_event388 = frozenset([38]) - FOLLOW_RBRACKET_in_event392 = frozenset([1]) - FOLLOW_literal_list_in_rule472 = frozenset([11]) - FOLLOW_COLONMINUS_in_rule474 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_literal_list_in_rule476 = frozenset([1]) - FOLLOW_literal_in_literal_list503 = frozenset([1, 14]) - FOLLOW_COMMA_in_literal_list506 = frozenset([26, 35, 56, 57, 58]) - FOLLOW_literal_in_literal_list508 = frozenset([1, 14]) - FOLLOW_fact_in_literal536 = frozenset([1]) - FOLLOW_NEGATION_in_literal559 = frozenset([26, 56, 57, 58]) - FOLLOW_fact_in_literal561 = frozenset([1]) - FOLLOW_atom_in_fact590 = frozenset([1]) - FOLLOW_modal_op_in_fact598 = frozenset([30]) - FOLLOW_LBRACKET_in_fact600 = frozenset([26]) - FOLLOW_atom_in_fact602 = frozenset([38]) - FOLLOW_RBRACKET_in_fact604 = frozenset([1]) - FOLLOW_relation_constant_in_atom664 = frozenset([1, 32]) - FOLLOW_LPAREN_in_atom667 = frozenset([20, 26, 27, 39, 44]) - FOLLOW_parameter_list_in_atom669 = frozenset([39]) - FOLLOW_RPAREN_in_atom672 = frozenset([1]) - FOLLOW_parameter_in_parameter_list702 = frozenset([1, 14]) - FOLLOW_COMMA_in_parameter_list705 = frozenset([20, 26, 27, 44]) - FOLLOW_parameter_in_parameter_list707 = frozenset([1, 14]) - FOLLOW_term_in_parameter731 = frozenset([1]) - FOLLOW_column_ref_in_parameter743 = frozenset([17]) - FOLLOW_EQUAL_in_parameter745 = frozenset([20, 26, 27, 44]) - FOLLOW_term_in_parameter747 = frozenset([1]) - FOLLOW_ID_in_column_ref774 = frozenset([1]) - FOLLOW_INT_in_column_ref793 = frozenset([1]) - FOLLOW_object_constant_in_term820 = frozenset([1]) - FOLLOW_variable_in_term828 = frozenset([1]) - FOLLOW_INT_in_object_constant845 = frozenset([1]) - FOLLOW_FLOAT_in_object_constant866 = frozenset([1]) - FOLLOW_STRING_in_object_constant885 = frozenset([1]) - FOLLOW_ID_in_variable912 = frozenset([1]) - FOLLOW_ID_in_relation_constant937 = frozenset([1, 41, 54]) - FOLLOW_54_in_relation_constant940 = frozenset([26]) - FOLLOW_ID_in_relation_constant942 = frozenset([1, 41, 54]) - FOLLOW_SIGN_in_relation_constant946 = frozenset([1]) - - - -def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): - from antlr3.main import ParserMain - main = ParserMain("CongressLexer", CongressParser) - - main.stdin = stdin - main.stdout = stdout - main.stderr = stderr - main.execute(argv) - - - -if __name__ == '__main__': - main(sys.argv) diff --git a/congress/datalog/Python3/__init__.py b/congress/datalog/Python3/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/datalog/README-Congress.g.txt b/congress/datalog/README-Congress.g.txt deleted file mode 100644 index d3e8806b..00000000 --- a/congress/datalog/README-Congress.g.txt +++ /dev/null @@ -1,11 +0,0 @@ -If you modify the congress/datalog/Congress.g file, you need to use antlr3 -to re-generate the CongressLexer.py and CongressParser.py files with -the following steps: - -1. Make sure a recent version of Java is installed. http://java.com/ -2. Download ANTLR 3.5.2 or another compatible version from http://www.antlr3.org/download/antlr-3.5.2-complete.jar -3. Execute the following commands in shell - -$ cd path/to/congress_repo/congress/datalog -$ java -jar path/to/antlr-3.5.2-complete.jar Congress.g -o Python2 -language Python -$ java -jar path/to/antlr-3.5.2-complete.jar Congress.g -o Python3 -language Python3 diff --git a/congress/datalog/__init__.py b/congress/datalog/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/datalog/analysis.py b/congress/datalog/analysis.py deleted file mode 100644 index 832a7774..00000000 --- a/congress/datalog/analysis.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -# TODO(thinrichs): move algorithms from compile.py that do analysis -# into this file. - -import copy - - -class ModalIndex(object): - def __init__(self): - # Dict mapping modal name to a ref-counted list of tablenames - # Refcounted list of tablenames is a dict from tablename to count - self.index = {} - - def add(self, modal, tablename): - if modal not in self.index: - self.index[modal] = {} - if tablename not in self.index[modal]: - self.index[modal][tablename] = 0 - self.index[modal][tablename] += 1 - - def remove(self, modal, tablename): - if modal not in self.index: - raise KeyError("Modal %s has no entries" % modal) - if tablename not in self.index[modal]: - raise KeyError("Tablename %s for modal %s does not exist" % - (tablename, modal)) - self.index[modal][tablename] -= 1 - self._clean_up(modal, tablename) - - def modals(self): - return self.index.keys() - - def tables(self, modal): - if modal not in self.index: - return [] - return self.index[modal].keys() - - def __isub__(self, other): - changes = [] - for modal in self.index: - if modal not in other.index: - continue - for table in self.index[modal]: - if table not in other.index[modal]: - continue - self.index[modal][table] -= other.index[modal][table] - changes.append((modal, table)) - - for (modal, table) in changes: - self._clean_up(modal, table) - return self - - def __iadd__(self, other): - for modal in other.index: - if modal not in self.index: - self.index[modal] = other.index[modal] - continue - for table in other.index[modal]: - if table not in self.index[modal]: - self.index[modal][table] = other.index[modal][table] - continue - self.index[modal][table] += other.index[modal][table] - return self - - def _clean_up(self, modal, table): - if self.index[modal][table] <= 0: - del self.index[modal][table] - if not len(self.index[modal]): - del self.index[modal] - - def __eq__(self, other): - return self.index == other.index - - def __neq__(self, other): - return not self.__eq__(other) - - def __copy__(self): - new = ModalIndex() - new.index = copy.deepcopy(self.index) - return new - - def __str__(self): - return str(self.index) - - def __contains__(self, modal): - return modal in self.index diff --git a/congress/datalog/arithmetic_solvers.py b/congress/datalog/arithmetic_solvers.py deleted file mode 100644 index 0c3198f9..00000000 --- a/congress/datalog/arithmetic_solvers.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -from oslo_log import log as logging -import pulp -import six - -from congress import exception -from functools import reduce - -LOG = logging.getLogger(__name__) - - -class LpLang(object): - """Represent (mostly) linear programs generated from Datalog.""" - MIN_THRESHOLD = .00001 # for converting <= to < - - class Expression(object): - def __init__(self, *args, **meta): - self.args = args - self.meta = meta - - def __ne__(self, other): - return not self.__eq__(other) - - def __eq__(self, other): - if not isinstance(other, LpLang.Expression): - return False - if len(self.args) != len(other.args): - return False - if self.args[0] in ['AND', 'OR']: - return set(self.args) == set(other.args) - comm = ['plus', 'times'] - if self.args[0] == 'ARITH' and self.args[1].lower() in comm: - return set(self.args) == set(other.args) - if self.args[0] in ['EQ', 'NOTEQ']: - return ((self.args[1] == other.args[1] and - self.args[2] == other.args[2]) or - (self.args[1] == other.args[2] and - self.args[2] == other.args[1])) - return self.args == other.args - - def __str__(self): - return "(" + ", ".join(str(x) for x in self.args) + ")" - - def __repr__(self): - args = ", ".join(repr(x) for x in self.args) - meta = str(self.meta) - return "" % (args, meta) - - def __hash__(self): - return hash(tuple([hash(x) for x in self.args])) - - def operator(self): - return self.args[0] - - def arguments(self): - return self.args[1:] - - def tuple(self): - return tuple(self.args) - - @classmethod - def makeVariable(cls, *args, **meta): - return cls.Expression("VAR", *args, **meta) - - @classmethod - def makeBoolVariable(cls, *args, **meta): - meta['type'] = 'bool' - return cls.Expression("VAR", *args, **meta) - - @classmethod - def makeIntVariable(cls, *args, **meta): - meta['type'] = 'int' - return cls.Expression("VAR", *args, **meta) - - @classmethod - def makeOr(cls, *args, **meta): - if len(args) == 1: - return args[0] - return cls.Expression("OR", *args, **meta) - - @classmethod - def makeAnd(cls, *args, **meta): - if len(args) == 1: - return args[0] - return cls.Expression("AND", *args, **meta) - - @classmethod - def makeEqual(cls, arg1, arg2, **meta): - return cls.Expression("EQ", arg1, arg2, **meta) - - @classmethod - def makeNotEqual(cls, arg1, arg2, **meta): - return cls.Expression("NOTEQ", arg1, arg2, **meta) - - @classmethod - def makeArith(cls, *args, **meta): - return cls.Expression("ARITH", *args, **meta) - - @classmethod - def makeExpr(cls, obj): - if isinstance(obj, six.string_types): - return obj - if isinstance(obj, (float, six.integer_types)): - return obj - op = obj[0].upper() - if op == 'VAR': - return cls.makeVariable(*obj[1:]) - if op in ['EQ', 'NOTEQ', 'AND', 'OR']: - args = [cls.makeExpr(x) for x in obj[1:]] - if op == 'EQ': - return cls.makeEqual(*args) - if op == 'NOTEQ': - return cls.makeNotEqual(*args) - if op == 'AND': - return cls.makeAnd(*args) - if op == 'OR': - return cls.makeOr(*args) - raise cls.LpConversionFailure('should never happen') - args = [cls.makeExpr(x) for x in obj[1:]] - return cls.makeArith(obj[0], *args) - - @classmethod - def isConstant(cls, thing): - return (isinstance(thing, six.string_types) or - isinstance(thing, (float, six.integer_types))) - - @classmethod - def isVariable(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'VAR' - - @classmethod - def isEqual(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'EQ' - - @classmethod - def isOr(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'OR' - - @classmethod - def isAnd(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'AND' - - @classmethod - def isNotEqual(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'NOTEQ' - - @classmethod - def isArith(cls, thing): - return isinstance(thing, cls.Expression) and thing.args[0] == 'ARITH' - - @classmethod - def isBoolArith(cls, thing): - return (cls.isArith(thing) and - thing.args[1].lower() in ['lteq', 'lt', 'gteq', 'gt', 'equal']) - - @classmethod - def variables(cls, exp): - if cls.isConstant(exp): - return set() - elif cls.isVariable(exp): - return set([exp]) - else: - variables = set() - for arg in exp.arguments(): - variables |= cls.variables(arg) - return variables - - def __init__(self): - # instance variable so tests can be run in parallel - self.fresh_var_counter = 0 # for creating new variables - - def pure_lp(self, exp, bounds): - """Rewrite EXP to a pure LP problem. - - :param exp is an Expression of the form - var = (arith11 ^ ... ^ arith1n) | ... | (arithk1 ^ ... ^ arithkn) - where the degenerate cases are permitted as well. - - Returns a collection of expressions each of the form: - a1*x1 + ... + an*xn [<=, ==, >=] b. - """ - flat, support = self.flatten(exp, indicator=False) - flats = support - flats.append(flat) - result = [] - for flat in flats: - # LOG.info("flat: %s", flat) - no_and_or = self.remove_and_or(flat) - # LOG.info(" without and/or: %s", no_and_or) - no_indicator = self.indicator_to_pure_lp(no_and_or, bounds) - # LOG.info(" without indicator: %s", - # ";".join(str(x) for x in no_indicator)) - result.extend(no_indicator) - return result - - def pure_lp_term(self, exp, bounds): - """Rewrite term exp to a pure LP term. - - :param exp is an Expression of the form - (arith11 ^ ... ^ arith1n) | ... | (arithk1 ^ ... ^ arithkn) - where the degenerate cases are permitted as well. - - Returns (new-exp, support) where new-exp is a term, and support is - a expressions of the following form. - a1*x1 + ... + an*xn [<=, ==, >=] b. - """ - flat, support = self.flatten(exp, indicator=False) - flat_no_andor = self.remove_and_or_term(flat) - results = [] - for s in support: - results.extend(self.pure_lp(s, bounds)) - return flat_no_andor, results - - def remove_and_or(self, exp): - """Translate and/or operators into times/plus arithmetic. - - :param exp is an Expression that takes one of the following forms. - var [!]= term1 ^ ... ^ termn - var [!]= term1 | ... | termn - var [!]= term1 - where termi is an indicator variable. - - Returns an expression equivalent to exp but without any ands/ors. - """ - if self.isConstant(exp) or self.isVariable(exp): - return exp - op = exp.operator().lower() - if op in ['and', 'or']: - return self.remove_and_or_term(exp) - newargs = [self.remove_and_or(arg) for arg in exp.arguments()] - constructor = self.operator_to_constructor(exp.operator()) - return constructor(*newargs) - - def remove_and_or_term(self, exp): - if exp.operator().lower() == 'and': - op = 'times' - else: - op = 'plus' - return self.makeArith(op, *exp.arguments()) - - def indicator_to_pure_lp(self, exp, bounds): - """Translate exp into LP constraints without indicator variable. - - :param exp is an Expression of the form var = arith - :param bounds is a dictionary from variable to its upper bound - - Returns [EXP] if it is of the wrong form. Otherwise, translates - into the form y = x < 0, and then returns two constraints where - upper(x) is the upper bound of the expression x: - -x <= y * upper(x) - x < (1 - y) * upper(x) - Taken from section 7.4 of - http://www.aimms.com/aimms/download/manuals/ - aimms3om_integerprogrammingtricks.pdf - """ - # return exp unchanged if exp not of the form = - # and figure out whether it's = or = - if (self.isConstant(exp) or self.isVariable(exp) or - not self.isEqual(exp)): - return [exp] - args = exp.arguments() - - lhs = args[0] - rhs = args[1] - if self.isVariable(lhs) and self.isArith(rhs): - var = lhs - arith = rhs - elif self.isVariable(rhs) and self.isArith(lhs): - var = rhs - arith = lhs - else: - return [exp] - # if arithmetic side is not an inequality, not an indicator var - if not self.isBoolArith(arith): - return [exp] - - # Do the transformation. - x = self.arith_to_lt_zero(arith).arguments()[1] - y = var - LOG.info(" x: %s", x) - upper_x = self.upper_bound(x, bounds) + 1 - LOG.info(" bounds(x): %s", upper_x) - # -x <= y * upper(x) - c1 = self.makeArith( - 'lteq', - self.makeArith('times', -1, x), - self.makeArith('times', y, upper_x)) - # x < (1 - y) * upper(x) - c2 = self.makeArith( - 'lt', - x, - self.makeArith('times', self.makeArith('minus', 1, y), upper_x)) - return [c1, c2] - - def arith_to_lt_zero(self, expr): - """Returns Arith expression equivalent to expr but of the form A < 0. - - :param expr is an Expression - Returns an expression equivalent to expr but of the form A < 0. - """ - if not self.isArith(expr): - raise self.LpConversionFailure( - "arith_to_lt_zero takes Arith expr but received %s", expr) - args = expr.arguments() - op = args[0].lower() - lhs = args[1] - rhs = args[2] - if op == 'lt': - return LpLang.makeArith( - 'lt', LpLang.makeArith('minus', lhs, rhs), 0) - elif op == 'lteq': - return LpLang.makeArith( - 'lt', - LpLang.makeArith( - 'minus', - LpLang.makeArith('minus', lhs, rhs), - self.MIN_THRESHOLD), - 0) - elif op == 'gt': - return LpLang.makeArith( - 'lt', LpLang.makeArith('minus', rhs, lhs), 0) - elif op == 'gteq': - return LpLang.makeArith( - 'lt', - LpLang.makeArith( - 'minus', - LpLang.makeArith('minus', rhs, lhs), - self.MIN_THRESHOLD), - 0) - else: - raise self.LpConversionFailure( - "unhandled operator %s in %s" % (op, expr)) - - def upper_bound(self, expr, bounds): - """Returns number giving an upper bound on the given expr. - - :param expr is an Expression - :param bounds is a dictionary from tuple versions of variables - to the size of their upper bound. - """ - if self.isConstant(expr): - return expr - if self.isVariable(expr): - t = expr.tuple() - if t not in bounds: - raise self.LpConversionFailure("not bound given for %s" % expr) - return bounds[expr.tuple()] - if not self.isArith(expr): - raise self.LpConversionFailure( - "expression has no bound: %s" % expr) - args = expr.arguments() - op = args[0].lower() - exps = args[1:] - if op == 'times': - f = lambda x, y: x * y - return reduce(f, [self.upper_bound(x, bounds) for x in exps], 1) - if op == 'plus': - f = lambda x, y: x + y - return reduce(f, [self.upper_bound(x, bounds) for x in exps], 0) - if op == 'minus': - return self.upper_bound(exps[0], bounds) - if op == 'div': - raise self.LpConversionFailure("No bound on division %s" % expr) - raise self.LpConversionFailure("Unknown operator for bound: %s" % expr) - - def flatten(self, exp, indicator=True): - """Remove toplevel embedded and/ors by creating new equalities. - - :param exp is an Expression of the form - var = (arith11 ^ ... ^ arith1n) | ... | (arithk1 ^ ... ^ arithkn) - where arithij is either a variable or an arithmetic expression - where the degenerate cases are permitted as well. - - :param indicator controls whether the method Returns - a single variable (with supporting expressions) or it Returns - an expression that has operator with (flat) arguments - - Returns a collection of expressions each of one of the following - forms: - var1 = var2 * ... * varn - var1 = var2 + ... + varn - var1 = arith - - Returns (new-expression, supporting-expressions) - """ - if self.isConstant(exp) or self.isVariable(exp): - return exp, [] - new_args = [] - extras = [] - new_indicator = not (exp.operator().lower() in ['eq', 'noteq']) - for e in exp.arguments(): - newe, extra = self.flatten(e, indicator=new_indicator) - new_args.append(newe) - extras.extend(extra) - constructor = self.operator_to_constructor(exp.operator()) - new_exp = constructor(*new_args) - if indicator: - indic, extra = self.create_intermediate(new_exp) - return indic, extra + extras - return new_exp, extras - - def operator_to_constructor(self, operator): - """Given the operator, return the corresponding constructor.""" - op = operator.lower() - if op == 'eq': - return self.makeEqual - if op == 'noteq': - return self.makeNotEqual - if op == 'var': - return self.makeVariable - if op == 'and': - return self.makeAnd - if op == 'or': - return self.makeOr - if op == 'arith': - return self.makeArith - raise self.LpConversionFailure("Unknown operator: %s" % operator) - - def create_intermediate(self, exp): - """Given expression, create var = expr and return (var, var=expr).""" - if self.isBoolArith(exp) or self.isAnd(exp) or self.isOr(exp): - var = self.freshVar(type='bool') - else: - var = self.freshVar() - equality = self.makeEqual(var, exp) - return var, [equality] - - def freshVar(self, **meta): - var = self.makeVariable('internal', self.fresh_var_counter, **meta) - self.fresh_var_counter += 1 - return var - - class LpConversionFailure(exception.CongressException): - pass - - -class PulpLpLang(LpLang): - """Algorithms for translating LpLang into PuLP library problems.""" - MIN_THRESHOLD = .00001 - - def __init__(self): - # instance variable so tests can be run in parallel - super(PulpLpLang, self).__init__() - self.value_counter = 0 - - def problem(self, optimization, constraints, bounds): - """Return PuLP problem for given optimization and constraints. - - :param optimization is an LpLang.Expression that is either a sum - or product to minimize. - :param constraints is a collection of LpLang.Expression that - each evaluate to true/false (typically equalities) - :param bounds is a dictionary mapping LpLang.Expression variable - tuples to their upper bounds. - - Returns a pulp.LpProblem. - """ - # translate constraints to pure LP - optimization, hard = self.pure_lp_term(optimization, bounds) - for c in constraints: - hard.extend(self.pure_lp(c, bounds)) - LOG.info("* Converted DatalogLP to PureLP *") - LOG.info("optimization: %s", optimization) - LOG.info("constraints: \n%s", "\n".join(str(x) for x in hard)) - - # translate optimization and constraints into PuLP equivalents - variables = {} - values = {} - optimization = self.pulpify(optimization, variables, values) - hard = [self.pulpify(c, variables, values) for c in hard] - - # add them to the problem. - prob = pulp.LpProblem("VM re-assignment", pulp.LpMinimize) - prob += optimization - for c in hard: - prob += c - - # invert values - return prob, {value: key for key, value in values.items()} - - def pulpify(self, expr, variables, values): - """Return PuLP version of expr. - - :param expr is an Expression of one of the following forms. - arith - arith = arith - arith <= arith - arith >= arith - :param vars is a dictionary from Expression variables to PuLP variables - - Returns a PuLP representation of expr. - """ - # LOG.info("pulpify(%s, %s)", expr, variables) - if self.isConstant(expr): - return expr - elif self.isVariable(expr): - return self._pulpify_variable(expr, variables, values) - elif self.isArith(expr): - args = expr.arguments() - op = args[0] - args = [self.pulpify(arg, variables, values) for arg in args[1:]] - if op == 'times': - return reduce(lambda x, y: x * y, args) - elif op == 'plus': - return reduce(lambda x, y: x + y, args) - elif op == 'div': - return reduce(lambda x, y: x / y, args) - elif op == 'minus': - return reduce(lambda x, y: x - y, args) - elif op == 'lteq': - return (args[0] <= args[1]) - elif op == 'gteq': - return (args[0] >= args[1]) - elif op == 'gt': # pulp makes MIN_THRESHOLD 1 - return (args[0] >= args[1] + self.MIN_THRESHOLD) - elif op == 'lt': # pulp makes MIN_THRESHOLD 1 - return (args[0] + self.MIN_THRESHOLD <= args[1]) - else: - raise self.LpConversionFailure( - "Found unsupported operator %s in %s" % (op, expr)) - else: - args = [self.pulpify(arg, variables, values) - for arg in expr.arguments()] - op = expr.operator().lower() - if op == 'eq': - return (args[0] == args[1]) - elif op == 'noteq': - return (args[0] != args[1]) - else: - raise self.LpConversionFailure( - "Found unsupported operator: %s" % expr) - - def _new_value(self, old, values): - """Create a new value for old and store values[old] = new.""" - if old in values: - return values[old] - new = self.value_counter - self.value_counter += 1 - values[old] = new - return new - - def _pulpify_variable(self, expr, variables, values): - """Translate DatalogLp variable expr into PuLP variable. - - :param expr is an instance of Expression - :param variables is a dictionary from Expressions to pulp variables - :param values is a 1-1 dictionary from strings/floats to integers - representing a mapping of non-integer arguments to variable - names to their integer equivalents. - """ - # pulp mangles variable names that contain certain characters. - # Replace actual args with integers when constructing - # variable names. Includes integers since we don't want to - # have namespace collision problems. - oldargs = expr.arguments() - args = [oldargs[0]] - for arg in oldargs[1:]: - newarg = self._new_value(arg, values) - args.append(newarg) - # name - name = "_".join([str(x) for x in args]) - # type - typ = expr.meta.get('type', None) - if typ == 'bool': - cat = pulp.LpBinary - elif typ == 'int': - cat = pulp.LpInteger - else: - cat = pulp.LpContinuous - # set bounds - lowbound = expr.meta.get('lowbound', None) - upbound = expr.meta.get('upbound', None) - var = pulp.LpVariable( - name=name, cat=cat, lowBound=lowbound, upBound=upbound) - - # merge with existing variable, if any - if expr in variables: - newvar = self._resolve_var_conflicts(variables[expr], var) - oldvar = variables[expr] - oldvar.cat = newvar.cat - oldvar.lowBound = newvar.lowBound - oldvar.upBound = newvar.upBound - else: - variables[expr] = var - return variables[expr] - - def _resolve_var_conflicts(self, var1, var2): - """Returns variable that combines information from var1 and var2. - - :param meta1 is a pulp.LpVariable - :param meta2 is a pulp.LpVariable - - Returns new pulp.LpVariable representing the conjunction of constraints - from var1 and var2. - Raises LpConversionFailure if the names of var1 and var2 differ. - """ - - def type_lessthan(x, y): - return ((x == pulp.LpBinary and y == pulp.LpInteger) or - (x == pulp.LpBinary and y == pulp.LpContinuous) or - (x == pulp.LpInteger and y == pulp.LpContinuous)) - - if var1.name != var2.name: - raise self.LpConversionFailure( - "Can't resolve variable name conflict: %s and %s" % ( - var1, var2)) - name = var1.name - if type_lessthan(var1.cat, var2.cat): - cat = var1.cat - else: - cat = var2.cat - if var1.lowBound is None: - lowbound = var2.lowBound - elif var2.lowBound is None: - lowbound = var1.lowBound - else: - lowbound = max(var1.lowBound, var2.lowBound) - if var1.upBound is None: - upbound = var2.upBound - elif var2.upBound is None: - upbound = var1.upBound - else: - upbound = min(var1.upBound, var2.upBound) - return pulp.LpVariable( - name=name, lowBound=lowbound, upBound=upbound, cat=cat) diff --git a/congress/datalog/base.py b/congress/datalog/base.py deleted file mode 100644 index 4ab9ba77..00000000 --- a/congress/datalog/base.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import collections - -from oslo_log import log as logging -import six - -from congress import exception - -LOG = logging.getLogger(__name__) - -DATABASE_POLICY_TYPE = 'database' -NONRECURSIVE_POLICY_TYPE = 'nonrecursive' -ACTION_POLICY_TYPE = 'action' -MATERIALIZED_POLICY_TYPE = 'materialized' -DELTA_POLICY_TYPE = 'delta' -DATASOURCE_POLICY_TYPE = 'datasource' - - -class Tracer(object): - def __init__(self): - self.expressions = [] - self.funcs = [LOG.debug] # functions to call to trace - - def trace(self, table): - self.expressions.append(table) - - def is_traced(self, table): - return table in self.expressions or '*' in self.expressions - - def log(self, table, msg, *args, **kwargs): - depth = kwargs.pop("depth", 0) - if kwargs: - raise TypeError("Unexpected keyword arguments: %s" % kwargs) - if self.is_traced(table): - for func in self.funcs: - func(("| " * depth) + msg, *args) - - -class StringTracer(Tracer): - def __init__(self): - super(StringTracer, self).__init__() - self.stream = six.moves.StringIO() - self.funcs.append(self.string_output) - - def string_output(self, msg, *args): - self.stream.write((msg % args) + "\n") - - def get_value(self): - return self.stream.getvalue() - - -############################################################################## -# Logical Building Blocks -############################################################################## - -class Proof(object): - """A single proof. - - Differs semantically from Database's - Proof in that this version represents a proof that spans rules, - instead of just a proof for a single rule. - """ - def __init__(self, root, children): - self.root = root - self.children = children - - def __str__(self): - return self.str_tree(0) - - def str_tree(self, depth): - s = " " * depth - s += str(self.root) - s += "\n" - for child in self.children: - s += child.str_tree(depth + 1) - return s - - def leaves(self): - if len(self.children) == 0: - return [self.root] - result = [] - for child in self.children: - result.extend(child.leaves()) - return result - - -############################################################################## -# Events -############################################################################## - -class EventQueue(object): - def __init__(self): - self.queue = collections.deque() - - def enqueue(self, event): - self.queue.append(event) - - def dequeue(self): - return self.queue.popleft() - - def __len__(self): - return len(self.queue) - - def __str__(self): - return "[" + ",".join([str(x) for x in self.queue]) + "]" - - -############################################################################## -# Abstract Theories -############################################################################## - -class Theory(object): - def __init__(self, name=None, abbr=None, schema=None, theories=None, - id=None, desc=None, owner=None, kind=None): - self.schema = schema - self.theories = theories - self.kind = kind - self.id = id - self.desc = desc - self.owner = owner - - self.tracer = Tracer() - if name is None: - self.name = repr(self) - else: - self.name = name - if abbr is None: - self.abbr = "th" - else: - self.abbr = abbr - maxlength = 6 - if len(self.abbr) > maxlength: - self.trace_prefix = self.abbr[0:maxlength] - else: - self.trace_prefix = self.abbr + " " * (maxlength - len(self.abbr)) - - def set_id(self, id): - self.id = id - - def initialize_tables(self, tablenames, facts): - """initialize_tables - - Event handler for (re)initializing a collection of tables. Clears - tables befores assigning the new table content. - - @facts must be an iterable containing compile.Fact objects. - """ - raise NotImplementedError - - def actual_events(self, events): - """Returns subset of EVENTS that are not noops.""" - actual = [] - for event in events: - if event.insert: - if event.formula not in self: - actual.append(event) - else: - if event.formula in self: - actual.append(event) - return actual - - def debug_mode(self): - tr = Tracer() - tr.trace('*') - self.set_tracer(tr) - - def set_tracer(self, tracer): - self.tracer = tracer - - def get_tracer(self): - return self.tracer - - def log(self, table, msg, *args, **kwargs): - msg = self.trace_prefix + ": " + msg - self.tracer.log(table, msg, *args, **kwargs) - - def policy(self): - """Return a list of the policy statements in this theory.""" - raise NotImplementedError() - - def content(self): - """Return a list of the contents of this theory. - - Maybe rules and/or data. Note: do not change name to CONTENTS, as this - is reserved for a dictionary of stuff used by TopDownTheory. - """ - raise NotImplementedError() - - def tablenames(self, body_only=False, include_builtin=False, - include_modal=True, include_facts=False): - tablenames = set() - for rule in self.policy(): - tablenames |= rule.tablenames( - body_only=body_only, include_builtin=include_builtin, - include_modal=include_modal) - # also include tables in facts - # FIXME: need to conform with intended abstractions - if include_facts and hasattr(self, 'rules'): - tablenames |= set(self.rules.facts.keys()) - return tablenames - - def __str__(self): - return "Theory %s" % self.name - - def content_string(self): - return '\n'.join([str(p) for p in self.content()]) + '\n' - - def get_rule(self, ident): - for p in self.policy(): - if hasattr(p, 'id') and str(p.id) == str(ident): - return p - raise exception.NotFound('rule_id %s is not found.' % ident) - - def arity(self, tablename, modal=None): - """Return the number of columns for the given tablename. - - TABLENAME is of the form : or
. - MODAL is the value of the modal operator. - """ - return NotImplementedError - - def get_attr_dict(self): - '''return dict containing the basic attributes of this theory''' - d = {'id': self.id, - 'name': self.name, - 'abbreviation': self.abbr, - 'description': self.desc, - 'owner_id': self.owner, - 'kind': self.kind} - return d diff --git a/congress/datalog/builtin.py b/congress/datalog/builtin.py deleted file mode 100644 index 780c310f..00000000 --- a/congress/datalog/builtin.py +++ /dev/null @@ -1,412 +0,0 @@ -#! /usr/bin/python -# -# Copyright (c) 2014 IBM, Corp. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -import datetime -import netaddr - -import six -from six.moves import range - -from dateutil import parser as datetime_parser - -BUILTIN_NAMESPACE = 'builtin' - - -class DatetimeBuiltins(object): - - # casting operators (used internally) - @classmethod - def to_timedelta(cls, x): - if isinstance(x, six.string_types): - fields = x.split(":") - num_fields = len(fields) - args = {} - keys = ['seconds', 'minutes', 'hours', 'days', 'weeks'] - for i in range(0, len(fields)): - args[keys[i]] = int(fields[num_fields - 1 - i]) - return datetime.timedelta(**args) - else: - return datetime.timedelta(seconds=x) - - @classmethod - def to_datetime(cls, x): - return datetime_parser.parse(x, ignoretz=True) - - # current time - @classmethod - def now(cls): - return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - # extraction and creation of datetimes - @classmethod - def unpack_time(cls, x): - x = cls.to_datetime(x) - return (x.hour, x.minute, x.second) - - @classmethod - def unpack_date(cls, x): - x = cls.to_datetime(x) - return (x.year, x.month, x.day) - - @classmethod - def unpack_datetime(cls, x): - x = cls.to_datetime(x) - return (x.year, x.month, x.day, x.hour, x.minute, x.second) - - @classmethod - def pack_time(cls, hour, minute, second): - return "{}:{}:{}".format(hour, minute, second) - - @classmethod - def pack_date(cls, year, month, day): - return "{}-{}-{}".format(year, month, day) - - @classmethod - def pack_datetime(cls, year, month, day, hour, minute, second): - return "{}-{}-{} {}:{}:{}".format( - year, month, day, hour, minute, second) - - # extraction/creation convenience function - @classmethod - def extract_date(cls, x): - return str(cls.to_datetime(x).date()) - - @classmethod - def extract_time(cls, x): - return str(cls.to_datetime(x).time()) - - # conversion to seconds - @classmethod - def datetime_to_seconds(cls, x): - since1900 = cls.to_datetime(x) - datetime.datetime(year=1900, - month=1, - day=1) - return int(since1900.total_seconds()) - - # native operations on datetime - @classmethod - def datetime_plus(cls, x, y): - return str(cls.to_datetime(x) + cls.to_timedelta(y)) - - @classmethod - def datetime_minus(cls, x, y): - return str(cls.to_datetime(x) - cls.to_timedelta(y)) - - @classmethod - def datetime_lessthan(cls, x, y): - return cls.to_datetime(x) < cls.to_datetime(y) - - @classmethod - def datetime_lessthanequal(cls, x, y): - return cls.to_datetime(x) <= cls.to_datetime(y) - - @classmethod - def datetime_greaterthan(cls, x, y): - return cls.to_datetime(x) > cls.to_datetime(y) - - @classmethod - def datetime_greaterthanequal(cls, x, y): - return cls.to_datetime(x) >= cls.to_datetime(y) - - @classmethod - def datetime_equal(cls, x, y): - return cls.to_datetime(x) == cls.to_datetime(y) - - -class NetworkAddressBuiltins(object): - @classmethod - def ips_equal(cls, ip1, ip2): - return netaddr.IPAddress(ip1) == netaddr.IPAddress(ip2) - - @classmethod - def ips_lessthan(cls, ip1, ip2): - return netaddr.IPAddress(ip1) < netaddr.IPAddress(ip2) - - @classmethod - def ips_lessthan_equal(cls, ip1, ip2): - return netaddr.IPAddress(ip1) <= netaddr.IPAddress(ip2) - - @classmethod - def ips_greaterthan(cls, ip1, ip2): - return netaddr.IPAddress(ip1) > netaddr.IPAddress(ip2) - - @classmethod - def ips_greaterthan_equal(cls, ip1, ip2): - return netaddr.IPAddress(ip1) >= netaddr.IPAddress(ip2) - - @classmethod - def networks_equal(cls, cidr1, cidr2): - return netaddr.IPNetwork(cidr1) == netaddr.IPNetwork(cidr2) - - @classmethod - def networks_overlap(cls, cidr1, cidr2): - cidr1_obj = netaddr.IPNetwork(cidr1) - cidr2_obj = netaddr.IPNetwork(cidr2) - return (cidr1_obj.first <= cidr2_obj.first <= cidr1_obj.last or - cidr1_obj.first <= cidr2_obj.last <= cidr1_obj.last) - - @classmethod - def ip_in_network(cls, ip, cidr): - cidr_obj = netaddr.IPNetwork(cidr) - ip_obj = netaddr.IPAddress(ip) - return ip_obj in cidr_obj - - -# the registry for builtins -_builtin_map = { - 'comparison': [ - {'func': 'lt(x,y)', 'num_inputs': 2, 'code': lambda x, y: x < y}, - {'func': 'lteq(x,y)', 'num_inputs': 2, 'code': lambda x, y: x <= y}, - {'func': 'equal(x,y)', 'num_inputs': 2, 'code': lambda x, y: x == y}, - {'func': 'gt(x,y)', 'num_inputs': 2, 'code': lambda x, y: x > y}, - {'func': 'gteq(x,y)', 'num_inputs': 2, 'code': lambda x, y: x >= y}, - {'func': 'max(x,y,z)', 'num_inputs': 2, - 'code': lambda x, y: max(x, y)}], - 'arithmetic': [ - {'func': 'plus(x,y,z)', 'num_inputs': 2, 'code': lambda x, y: x + y}, - {'func': 'minus(x,y,z)', 'num_inputs': 2, 'code': lambda x, y: x - y}, - {'func': 'mul(x,y,z)', 'num_inputs': 2, 'code': lambda x, y: x * y}, - {'func': 'div(x,y,z)', 'num_inputs': 2, 'code': lambda x, y: - ((x // y) if (type(x) == int and type(y) == int) else (x / y))}, - {'func': 'float(x,y)', 'num_inputs': 1, 'code': lambda x: float(x)}, - {'func': 'int(x,y)', 'num_inputs': 1, 'code': lambda x: int(x)}], - 'string': [ - {'func': 'concat(x,y,z)', 'num_inputs': 2, 'code': lambda x, y: x + y}, - {'func': 'len(x, y)', 'num_inputs': 1, 'code': lambda x: len(x)}], - 'datetime': [ - {'func': 'now(x)', 'num_inputs': 0, - 'code': DatetimeBuiltins.now}, - {'func': 'unpack_date(x, year, month, day)', 'num_inputs': 1, - 'code': DatetimeBuiltins.unpack_date}, - {'func': 'unpack_time(x, hours, minutes, seconds)', 'num_inputs': 1, - 'code': DatetimeBuiltins.unpack_time}, - {'func': 'unpack_datetime(x, y, m, d, h, i, s)', 'num_inputs': 1, - 'code': DatetimeBuiltins.unpack_datetime}, - {'func': 'pack_time(hours, minutes, seconds, result)', 'num_inputs': 3, - 'code': DatetimeBuiltins.pack_time}, - {'func': 'pack_date(year, month, day, result)', 'num_inputs': 3, - 'code': DatetimeBuiltins.pack_date}, - {'func': 'pack_datetime(y, m, d, h, i, s, result)', 'num_inputs': 6, - 'code': DatetimeBuiltins.pack_datetime}, - {'func': 'extract_date(x, y)', 'num_inputs': 1, - 'code': DatetimeBuiltins.extract_date}, - {'func': 'extract_time(x, y)', 'num_inputs': 1, - 'code': DatetimeBuiltins.extract_time}, - {'func': 'datetime_to_seconds(x, y)', 'num_inputs': 1, - 'code': DatetimeBuiltins.datetime_to_seconds}, - {'func': 'datetime_plus(x,y,z)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_plus}, - {'func': 'datetime_minus(x,y,z)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_minus}, - {'func': 'datetime_lt(x,y)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_lessthan}, - {'func': 'datetime_lteq(x,y)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_lessthanequal}, - {'func': 'datetime_gt(x,y)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_greaterthan}, - {'func': 'datetime_gteq(x,y)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_greaterthanequal}, - {'func': 'datetime_equal(x,y)', 'num_inputs': 2, - 'code': DatetimeBuiltins.datetime_equal}], - 'netaddr': [ - {'func': 'ips_equal(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ips_equal}, - {'func': 'ips_lt(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ips_lessthan}, - {'func': 'ips_lteq(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ips_lessthan_equal}, - {'func': 'ips_gt(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ips_greaterthan}, - {'func': 'ips_gteq(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ips_greaterthan_equal}, - {'func': 'networks_equal(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.networks_equal}, - {'func': 'networks_overlap(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.networks_overlap}, - {'func': 'ip_in_network(x,y)', 'num_inputs': 2, - 'code': NetworkAddressBuiltins.ip_in_network}] - } - - -class CongressBuiltinPred(object): - - def __init__(self, name, arglist, num_inputs, code): - self.predname = name - self.predargs = arglist - self.num_inputs = num_inputs - self.code = code - self.num_outputs = len(arglist) - num_inputs - - def string_to_pred(self, predstring): - try: - self.predname = predstring.split('(')[0] - self.predargs = predstring.split('(')[1].split(')')[0].split(',') - except Exception: - print("Unexpected error in parsing predicate string") - - def __str__(self): - return self.predname + '(' + ",".join(self.predargs) + ')' - - -class CongressBuiltinCategoryMap(object): - - def __init__(self, start_builtin_map): - self.categorydict = dict() - self.preddict = dict() - for key, value in start_builtin_map.items(): - self.categorydict[key] = [] - for predtriple in value: - pred = self.dict_predtriple_to_pred(predtriple) - self.categorydict[key].append(pred) - self.sync_with_predlist(pred.predname, pred, key, 'add') - - def mapequal(self, othercbc): - if self.categorydict == othercbc.categorydict: - return True - else: - return False - - def dict_predtriple_to_pred(self, predtriple): - ncode = predtriple['code'] - ninputs = predtriple['num_inputs'] - nfunc = predtriple['func'] - nfunc_pred = nfunc.split("(")[0] - nfunc_arglist = nfunc.split("(")[1].split(")")[0].split(",") - pred = CongressBuiltinPred(nfunc_pred, nfunc_arglist, ninputs, ncode) - return pred - - def add_map(self, newmap): - for key, value in newmap.items(): - if key not in self.categorydict: - self.categorydict[key] = [] - for predtriple in value: - pred = self.dict_predtriple_to_pred(predtriple) - if not self.builtin_is_registered(pred): - self.categorydict[key].append(pred) - self.sync_with_predlist(pred.predname, pred, key, 'add') - - def delete_map(self, newmap): - for key, value in newmap.items(): - for predtriple in value: - predtotest = self.dict_predtriple_to_pred(predtriple) - for pred in self.categorydict[key]: - if pred.predname == predtotest.predname: - if pred.num_inputs == predtotest.num_inputs: - self.categorydict[key].remove(pred) - self.sync_with_predlist(pred.predname, - pred, key, 'del') - if self.categorydict[key] == []: - del self.categorydict[key] - - def sync_with_predlist(self, predname, pred, category, operation): - if operation == 'add': - self.preddict[predname] = [pred, category] - if operation == 'del': - if predname in self.preddict: - del self.preddict[predname] - - def delete_builtin(self, category, name, inputs): - if category not in self.categorydict: - self.categorydict[category] = [] - for pred in self.categorydict[category]: - if pred.num_inputs == inputs and pred.predname == name: - self.categorydict[category].remove(pred) - self.sync_with_predlist(name, pred, category, 'del') - - def get_category_name(self, predname, predinputs): - if predname in self.preddict: - if self.preddict[predname][0].num_inputs == predinputs: - return self.preddict[predname][1] - return None - - def exists_category(self, category): - return category in self.categorydict - - def insert_category(self, category): - self.categorydict[category] = [] - - def delete_category(self, category): - if category in self.categorydict: - categorypreds = self.categorydict[category] - for pred in categorypreds: - self.sync_with_predlist(pred.predname, pred, category, 'del') - del self.categorydict[category] - - def insert_to_category(self, category, pred): - if category in self.categorydict: - self.categorydict[category].append(pred) - self.sync_with_predlist(pred.predname, pred, category, 'add') - else: - assert("Category does not exist") - - def delete_from_category(self, category, pred): - if category in self.categorydict: - self.categorydict[category].remove(pred) - self.sync_with_predlist(pred.predname, pred, category, 'del') - else: - assert("Category does not exist") - - def delete_all_in_category(self, category): - if category in self.categorydict: - categorypreds = self.categorydict[category] - for pred in categorypreds: - self.sync_with_predlist(pred.predname, pred, category, 'del') - self.categorydict[category] = [] - else: - assert("Category does not exist") - - def builtin_is_registered(self, predtotest): - """Given a CongressBuiltinPred, check if it has been registered.""" - pname = predtotest.predname - if pname in self.preddict: - if self.preddict[pname][0].num_inputs == predtotest.num_inputs: - return True - return False - - def is_builtin(self, table, arity=None): - """Given a Tablename and arity, check if it is a builtin.""" - # Note: for now we grandfather in old builtin tablenames but will - # deprecate those tablenames in favor of builtin:tablename - if ((table.service == BUILTIN_NAMESPACE and - table.table in self.preddict) or - table.table in self.preddict): # grandfather - if not arity: - return True - if len(self.preddict[table.table][0].predargs) == arity: - return True - return False - - def builtin(self, table): - """Return a CongressBuiltinPred for given Tablename or None.""" - if not isinstance(table, six.string_types): - table = table.table - if table in self.preddict: - return self.preddict[table][0] - return None - - def list_available_builtins(self): - """Print out the list of builtins, by category.""" - for key, value in self.categorydict.items(): - predlist = self.categorydict[key] - for pred in predlist: - print(str(pred)) - - -# a Singleton that serves as the entry point for builtin functionality -builtin_registry = CongressBuiltinCategoryMap(_builtin_map) diff --git a/congress/datalog/compile.py b/congress/datalog/compile.py deleted file mode 100644 index 68f2e805..00000000 --- a/congress/datalog/compile.py +++ /dev/null @@ -1,2326 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import argparse -import collections -import copy -import functools - -import six -from six.moves import range - -from oslo_log import log as logging -from oslo_utils import uuidutils - -from congress.datalog import analysis -from congress.datalog import base -from congress.datalog import builtin - -# set up appropriate antlr paths per python version and import runtime -# import appropriate Lexer & Parser per python version -import os -import sys -_congressDir = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -if six.PY2: - sys.path.append(_congressDir + - "/antlr3runtime/Python/") - from congress.datalog.Python2 import CongressLexer - from congress.datalog.Python2 import CongressParser -else: - sys.path.append(_congressDir + - "/antlr3runtime/Python3/") - from congress.datalog.Python3 import CongressLexer - from congress.datalog.Python3 import CongressParser -import antlr3 - -from congress.datalog import utility -from congress import exception -from congress import utils - -LOG = logging.getLogger(__name__) - -PERMITTED_MODALS = ['execute'] - - -############################################################################## -# Internal representation of policy language -############################################################################## - - -class Schema(object): - """Meta-data about a collection of tables.""" - def __init__(self, dictionary=None, complete=False): - if dictionary is None: - self.map = {} - self.count = {} - elif isinstance(dictionary, Schema): - self.map = dict(dictionary.map) - self.count = dictionary.count - else: - self.map = dictionary - self.count = None - # whether to assume there is an entry in this schema for - # every permitted table - self.complete = complete - - def __contains__(self, tablename): - return tablename in self.map - - @classmethod - def col(self, cols): - # For Datasource tables, columns would be in the format - - # {'name': 'colname', 'desc': 'description'} - if len(cols) and isinstance(cols[0], dict): - return [x['name'] for x in cols] - else: - return [x for x in cols] - - def columns(self, tablename): - """Returns the list of column names for the given TABLENAME. - - Return None if the tablename's columns are unknown. - """ - if tablename not in self.map.keys(): - return - cols = self.map[tablename] - return Schema.col(cols) - - def arity(self, tablename): - """Returns the number of columns for the given TABLENAME. - - Return None if TABLENAME is unknown. - """ - if tablename in self.map: - return len(self.map[tablename]) - - def update(self, item, is_insert): - """Returns the schema change of this update. - - Return schema change. - """ - if self.count is None: - return None - if isinstance(item, Fact): - tablename, tablelen = item.table, len(item) - th = None - elif isinstance(item, Literal): - tablename, tablelen = item.table.table, len(item.arguments) - th = item.table.service - else: - raise exception.PolicyException( - "Schema cannot update item: %r" % item) - - schema_change = None - if is_insert: - if tablename in self: - self.count[tablename] += 1 - schema_change = (tablename, None, True, th) - else: - self.count[tablename] = 1 - val = ["Col"+str(i) for i in range(0, tablelen)] - self.map[tablename] = val - schema_change = (tablename, val, True, th) - else: - if tablename not in self: - LOG.warning("Attempt to delete a nonexistent rule: %s", item) - elif self.count[tablename] > 1: - self.count[tablename] -= 1 - schema_change = (tablename, None, False, th) - else: - schema_change = (tablename, self.map[tablename], False, th) - del self.count[tablename] - del self.map[tablename] - return schema_change - - def revert(self, change): - """Revert change made by update. - - Return None - """ - if change is None: - return - - inserted = change[2] - tablename = change[0] - val = change[1] - - if inserted: - if self.count[tablename] > 1: - self.count[tablename] -= 1 - else: - del self.map[tablename] - del self.count[tablename] - else: - if tablename in self.count: - self.count[tablename] += 1 - else: - assert val is not None - self.map[tablename] = val - self.count[tablename] = 1 - - def column_number(self, tablename, column): - """Returns the 0-indexed position of the given COLUMN for TABLENAME. - - Returns None if TABLENAME or COLUMNNAME are unknown. - Returns COLUMN if it is a number. - """ - table_columns = self.columns(tablename) - if table_columns is None: - return - - if isinstance(column, six.integer_types): - if column > len(table_columns): - return - return column - try: - return table_columns.index(column) - except ValueError: - return - - def column_name(self, tablename, column): - """Returns name for given COLUMN or None if it is unknown.""" - table_columns = self.columns(tablename) - if table_columns is None: - return - if isinstance(column, six.string_types): - if column in table_columns: - return column - return - try: - return self.map[tablename][column] - except IndexError: - return - - def __str__(self): - schemas = [] - for table, columns in self.map.items(): - cols = ",".join(str(x) for x in columns) - schemas.append("schema[%s(%s)]" % (table, cols)) - return " ".join(schemas) - - def __len__(self): - return len(self.map) - - -class Term(object): - """Represents the union of Variable and ObjectConstant. - - Should only be instantiated via factory method. - """ - def __init__(self): - assert False, "Cannot instantiate Term directly--use factory method" - - @staticmethod - def create_from_python(value, force_var=False): - """Create Variable or ObjectConstants. - - To create variable, FORCE_VAR needs to be true. There is currently - no way to avoid this since variables are strings. - """ - if isinstance(value, Term): - return value - elif force_var: - return Variable(str(value)) - elif isinstance(value, six.string_types): - return ObjectConstant(value, ObjectConstant.STRING) - elif isinstance(value, six.integer_types): - return ObjectConstant(value, ObjectConstant.INTEGER) - elif isinstance(value, float): - return ObjectConstant(value, ObjectConstant.FLOAT) - else: - assert False, "No Term corresponding to {}".format(repr(value)) - - -@functools.total_ordering -class Variable (Term): - """Represents a term without a fixed value.""" - - SORT_RANK = 1 - __slots__ = ['name', 'location', '_hash'] - - def __init__(self, name, location=None): - assert isinstance(name, six.string_types) - self.name = name - self.location = location - self._hash = None - - def __str__(self): - return str(self.name) - - def __lt__(self, other): - if self.SORT_RANK < other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - return self.name < other.name - - def __eq__(self, other): - return isinstance(other, Variable) and self.name == other.name - - def __ne__(self, other): - return not self == other - - def __repr__(self): - # Use repr to hash rule--can't include location - return "Variable(name={})".format(repr(self.name)) - - def __hash__(self): - if self._hash is None: - self._hash = hash(('Variable', hash(self.name))) - return self._hash - - def is_variable(self): - return True - - def is_object(self): - return False - - -@functools.total_ordering -class ObjectConstant (Term): - """Represents a term with a fixed value.""" - STRING = 'STRING' - FLOAT = 'FLOAT' - INTEGER = 'INTEGER' - SORT_RANK = 2 - __slots__ = ['name', 'type', 'location', '_hash'] - - def __init__(self, name, type, location=None): - assert(type in [self.STRING, self.FLOAT, self.INTEGER]) - self.name = name - self.type = type - self.location = location - self._hash = None - - def __str__(self): - if self.type == ObjectConstant.STRING: - return '"' + str(self.name) + '"' - else: - return str(self.name) - - def __repr__(self): - # Use repr to hash rule--can't include location - return "ObjectConstant(name={}, type={})".format( - repr(self.name), repr(self.type)) - - def __hash__(self): - if self._hash is None: - self._hash = hash(('ObjectConstant', hash(self.name), - hash(self.type))) - return self._hash - - def __lt__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - if self.name != other.name: - return self.name < other.name - return self.type < other.type - - def __eq__(self, other): - return (isinstance(other, ObjectConstant) and - self.name == other.name and - self.type == other.type) - - def __ne__(self, other): - return not self == other - - def is_variable(self): - return False - - def is_object(self): - return True - - -@functools.total_ordering -class Fact (tuple): - """Represent a Fact (a ground literal) - - Use this class to represent a fact such as Foo(1,2,3). While one could - use a Rule to represent the same fact, this Fact datastructure is more - memory efficient than a Rule object since this Fact stores the information - as a native tuple, containing native values like ints and strings. Notes - that this subclasses from tuple. - """ - SORT_RANK = 3 - - def __new__(cls, table, values): - return super(Fact, cls).__new__(cls, values) - - def __init__(self, table, values): - self.table = table - - def __lt__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - if self.table != other.table: - return self.table < other.table - return super(Fact, self).__lt__(other) - - def __eq__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return False - if self.table != other.table: - return False - return super(Fact, self).__eq__(other) - - def __hash__(self): - return hash((self.SORT_RANK, self.table, super(Fact, self).__hash__())) - - -@functools.total_ordering -class Tablename(object): - SORT_RANK = 4 - __slots__ = ['service', 'table', 'modal', '_hash'] - - def __init__(self, table=None, service=None, modal=None): - self.table = table - self.service = service - self.modal = modal - self._hash = None - - @classmethod - def create_from_tablename(cls, tablename, service=None, use_modules=True): - # if use_modules is True, - # break full tablename up into 2 pieces. Example: "nova:servers:cpu" - # self.theory = "nova" - # self.table = "servers:cpu" - if service is None and use_modules: - (service, tablename) = cls.parse_service_table(tablename) - return cls(service=service, table=tablename) - - @classmethod - def parse_service_table(cls, tablename): - """Given tablename returns (service, name).""" - pieces = tablename.split(':') - if len(pieces) == 1: - table = pieces[0] - service = None - else: - service = pieces[0] - table = ':'.join(pieces[1:]) - return service, table - - @classmethod - def build_service_table(cls, service, table): - """Return string service:table.""" - return str(service) + ":" + str(table) - - def global_tablename(self, prefix=None): - pieces = [x for x in [prefix, self.service, self.table] - if x is not None] - return ":".join(pieces) - - def matches(self, service, table, modal): - if (service == self.service and table == self.table and - modal == self.modal): - return True - self_service, self_table = self.parse_service_table(self.table) - return (service == self_service and - table == self_table and - modal == self.modal) - - def __copy__(self): - return Tablename( - table=self.table, modal=self.modal, service=self.service) - - def __lt__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - if self.modal != other.modal: - return self.modal < other.modal - if self.service != other.service: - # manually handle None cases for py3 compat - if (self.service is None): - return True - if (other.service is None): - return False - return self.service < other.service - if self.table != other.table: - return self.table < other.table - return False - - def __eq__(self, other): - return (isinstance(other, Tablename) and - self.table == other.table and - self.service == other.service and - self.modal == other.modal) - - def same(self, other, default_service): - """Equality but where default_service is used for None service.""" - if self.table != other.table: - return False - if self.modal != other.modal: - return False - selfservice = self.service or default_service - otherservice = other.service or default_service - return selfservice == otherservice - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - if self._hash is None: - self._hash = hash(('Tablename', - hash(self.service), - hash(self.table), - hash(self.modal))) - return self._hash - - def __str__(self): - return ":".join([x for x in [self.modal, self.service, self.table] - if x is not None]) - - def __repr__(self): - return "Tablename(table=%s, service=%s, modal=%s)" % ( - self.table, self.service, self.modal) - - def name(self, default_service=None): - """Compute string name with default service.""" - service = self.service or default_service - if service is None: - return self.table - return service + ":" + self.table - - def invert_update(self): - """Invert the update. - - If end of table name is + or -, return a copy after switching - the copy's sign. - Does not make a copy if table name does not end in + or -. - """ - if self.table.endswith('+'): - suffix = '-' - elif self.table.endswith('-'): - suffix = '+' - else: - return self, False - - new = copy.copy(self) - new.table = self.table[:-1] + suffix - return new, True - - def drop_update(self): - """Drop the update. - - If end of table name is + or -, return a copy without the sign. - If table name does not end in + or -, make no copy. - """ - if self.table.endswith('+') or self.table.endswith('-'): - new = copy.copy(self) - new.table = new.table[:-1] - return new, True - else: - return self, False - - def make_update(self, is_insert=True): - """Turn the tablename into a +/- update.""" - new = copy.copy(self) - if is_insert: - new.table = new.table + "+" - else: - new.table = new.table + "-" - return new, True - - def is_update(self): - return self.table.endswith('+') or self.table.endswith('-') - - def drop_service(self): - self.service = None - - -@functools.total_ordering -class Literal (object): - """Represents a possibly negated atomic statement, e.g. p(a, 17, b).""" - SORT_RANK = 5 - __slots__ = ['table', 'arguments', 'location', 'negated', '_hash', - 'id', 'name', 'comment', 'original_str', 'named_arguments'] - - def __init__(self, table, arguments, location=None, negated=False, - use_modules=True, id_=None, name=None, comment=None, - original_str=None, named_arguments=None): - if isinstance(table, Tablename): - self.table = table - else: - self.table = Tablename.create_from_tablename( - table, use_modules=use_modules) - self.arguments = arguments - self.location = location - self.negated = negated - self._hash = None - self.id = id_ - self.name = name - self.comment = comment - self.original_str = original_str - if named_arguments is None: - self.named_arguments = collections.OrderedDict() - else: - # Python3: explicitly split out the integer names from others - self.named_arguments = collections.OrderedDict( - sorted([(n, o) - for n, o in named_arguments.items() if - isinstance(n, six.integer_types)]) - + - sorted([(n, o) - for n, o in named_arguments.items() if - not isinstance(n, six.integer_types)]) - ) - - def __copy__(self): - # use_modules=False so that we get exactly what we started - # with - newone = Literal(self.table, self.arguments, self.location, - self.negated, False, self.id, - self.name, self.comment, self.original_str, - self.named_arguments) - return newone - - def set_id(self, id): - self.id = id - - def set_name(self, name): - self.name = name - - def set_comment(self, comment): - self.comment = comment - - def set_original_str(self, original_str): - self.original_str = original_str - - @classmethod - def create_from_table_tuple(cls, table, tuple): - """Create Literal from table and tuple. - - TABLE is a string tablename. - TUPLE is a python list representing a row, e.g. - [17, "string", 3.14]. Returns the corresponding Literal. - """ - return cls(table, [Term.create_from_python(x) for x in tuple]) - - @classmethod - def create_from_iter(cls, list): - """Create Literal from list. - - LIST is a python list representing an atom, e.g. - ['p', 17, "string", 3.14]. Returns the corresponding Literal. - """ - arguments = [] - for i in range(1, len(list)): - arguments.append(Term.create_from_python(list[i])) - return cls(list[0], arguments) - - def __str__(self): - args = ", ".join([str(x) for x in self.arguments]) - named = ", ".join("{}={}".format(key, val) - for key, val in self.named_arguments.items()) - if len(args) > 0: - if len(named): - args += "," + named - else: - args = named - s = "{}({})".format(self.tablename(), args) - if self.table.modal is not None: - s = "{}[{}]".format(self.table.modal, s) - if self.negated: - s = "not " + s - return s - - def pretty_str(self): - return self.__str__() - - def __lt__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - if self.table != other.table: - return self.table < other.table - if self.negated != other.negated: - return self.negated < other.negated - if len(self.arguments) != len(other.arguments): - return len(self.arguments) < len(other.arguments) - if len(self.named_arguments) != len(other.named_arguments): - return len(self.named_arguments) < len(other.named_arguments) - # final case - # explicitly convert OrderedDict to list for comparison - - def od_list(input): - return ( - list(input.items()) if isinstance( - input, collections.OrderedDict) - else input) - - return (self.arguments < other.arguments or - od_list(self.named_arguments) < od_list(other.named_arguments)) - - def __eq__(self, other): - return (isinstance(other, Literal) and - self.table == other.table and - self.negated == other.negated and - len(self.arguments) == len(other.arguments) and - self.arguments == other.arguments and - self.named_arguments == other.named_arguments) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - named = ",".join("%r: %r" % (key, value) - for key, value in self.named_arguments.items()) - named = "{" + named + "}" - args = ",".join(repr(arg) for arg in self.arguments) - args = "[" + args + "]" - return ("Literal(table={}, arguments={}, negated={}, " - "named_arguments={})").format( - repr(self.table), args, repr(self.negated), named) - - def __hash__(self): - if self._hash is None: - args = tuple([hash(a) for a in self.arguments]) - named = tuple([(hash(key), hash(value)) - for key, value in self.named_arguments.items()]) - self._hash = hash(('Literal', - hash(self.table), - args, - hash(self.negated), - named)) - return self._hash - - def is_negated(self): - return self.negated - - def is_atom(self): - return not self.negated - - def is_rule(self): - return False - - def variable_names(self): - """Return variable names in arguments. Ignores named_arguments.""" - return set([x.name for x in self.arguments if x.is_variable()]) - - def variables(self): - """Return variables in arguments. Ignores named_arguments.""" - return set([x for x in self.arguments if x.is_variable()]) - - def is_ground(self): - """Return True if all args are non-vars. Ignores named_arguments.""" - return all(not arg.is_variable() for arg in self.arguments) - - def plug(self, binding, caller=None): - """Assumes domain of BINDING is Terms. Ignores named_arguments.""" - new = copy.copy(self) - if isinstance(binding, dict): - args = [] - for arg in self.arguments: - if arg in binding: - args.append(Term.create_from_python(binding[arg])) - else: - args.append(arg) - new.arguments = args - return new - else: - args = [Term.create_from_python(binding.apply(arg, caller)) - for arg in self.arguments] - new.arguments = args - return new - - def argument_names(self): - """Return names of all arguments. Ignores named_arguments.""" - return tuple([arg.name for arg in self.arguments]) - - def complement(self): - """Copies SELF and inverts is_negated.""" - new = copy.copy(self) - new.negated = not new.negated - return new - - def make_positive(self): - """Return handle to self or copy of self based on positive check. - - Either returns SELF if is_negated() is false or - returns copy of SELF where is_negated() is set to false. - """ - if self.negated: - new = copy.copy(self) - new.negated = False - return new - else: - return self - - def invert_update(self): - return self._modify_table(lambda x: x.invert_update()) - - def drop_update(self): - return self._modify_table(lambda x: x.drop_update()) - - def make_update(self, is_insert=True): - return self._modify_table(lambda x: x.make_update(is_insert=is_insert)) - - def _modify_table(self, func): - """Apply func to self.table and return a copy that uses the result.""" - newtable, is_different = func(self.table) - if is_different: - new = copy.copy(self) - new.table = newtable - return new - return self - - def is_update(self): - return self.table.is_update() - - def is_builtin(self, check_arguments=True): - if check_arguments: - return builtin.builtin_registry.is_builtin( - self.table, len(self.arguments)) - else: - return builtin.builtin_registry.is_builtin( - self.table) - - def tablename(self, default_service=None): - return self.table.name(default_service) - - def theory_name(self): - return self.table.service - - def drop_theory(self): - """Destructively sets the theory to None.""" - self._hash = None - self.table.drop_service() - return self - - def eliminate_column_references_and_pad_positional( - self, theories, default_theory=None, index=0, prefix=''): - """Expand column references to positional args and pad positional args. - - Expand column references to traditional datalog positional args. - Also pad positional args if too few are provided. - Returns a new literal. If no column reference, unless no schema found - for the table. - """ - # TODO(ekcs): remove unused parameter: index - # corner cases - if len(self.named_arguments) > 0: - theory = literal_theory(self, theories, default_theory) - if theory is None or theory.schema is None: - raise exception.IncompleteSchemaException( - "Literal %s uses named arguments, but the " - "schema is unknown." % self) - if theory.kind != base.DATASOURCE_POLICY_TYPE: # eventually remove - raise exception.PolicyException( - "Literal {} uses column references, but '{}' does not " - "reference a datasource policy.".format(self, theory.name)) - schema = theory.schema - if self.table.table not in schema: - raise exception.IncompleteSchemaException( - "Literal {} uses unknown table {}.".format( - str(self), str(self.table.table))) - - # check if named arguments conflict with positional or named args - errors = [] - term_index = {} - for col, arg in self.named_arguments.items(): - if isinstance(col, six.string_types): # column name - index = schema.column_number(self.table.table, col) - if index is None: - errors.append(exception.PolicyException( - "In literal {} column name {} does not " - "exist".format(str(self), col))) - continue - if index < len(self.arguments): - errors.append(exception.PolicyException( - "In literal {} column name {} references position " - "{}, which is already provided by " - "position.".format(str(self), col, index))) - if index in self.named_arguments: - errors.append(exception.PolicyException( - "In literal {} column name {} references position " - "{}, which is also referenced by number.))".format( - str(self), col, index))) - if index in term_index: - # should have already caught this case above - errors.append(exception.PolicyException( - "In literal {}, column name {} references " - "position {}, which already has reference " - "{}".format(str(self), col, index, - str(term_index[index])))) - term_index[index] = arg - else: # column number - if col >= schema.arity(self.table.table): - errors.append(exception.PolicyException( - "In literal {} column index {} is too " - "large".format(str(self), col))) - if col < len(self.arguments): - errors.append(exception.PolicyException( - "In literal {} column index {} " - " is already provided by position.".format( - str(self), col))) - name = schema.column_name(self.table.table, col) - if name in self.named_arguments: - errors.append(exception.PolicyException( - "In literal {} column index {} references column " - "{}, which is also referenced by name.))".format( - str(self), col, name))) - if col in term_index: - # should have already caught this case above - errors.append(exception.PolicyException( - "In literal {} column index {} already has a " - "reference {}".format( - str(self), col, str(term_index[col])))) - term_index[col] = arg - if errors: - raise exception.PolicyException( - " ".join(str(err) for err in errors)) - else: - theory = literal_theory(self, theories, default_theory) - if theory is None or theory.schema is None: - return self - schema = theory.schema - if self.table.table not in schema: - return self - term_index = {} - - # turn reference args into position args - position_args = list(self.arguments) # copy the original list - for i in range(len(position_args), schema.arity(self.table.table)): - term = term_index.get(i, None) - if term is None: - term = Variable("%s%s" % (prefix, i)) - position_args.append(term) - newlit = self.__copy__() - newlit.named_arguments = collections.OrderedDict() - newlit.arguments = position_args - return newlit - - -@functools.total_ordering -class Rule(object): - """Represents a rule, e.g. p(x) :- q(x).""" - - SORT_RANK = 6 - __slots__ = ['heads', 'head', 'body', 'location', '_hash', 'id', 'name', - 'comment', 'original_str'] - - def __init__(self, head, body, location=None, id=None, name=None, - comment=None, original_str=None): - # self.head is self.heads[0] - # Keep self.head around since a rule with multiple - # heads is not used by reasoning algorithms. - # Most code ignores self.heads entirely. - if is_literal(head): - self.heads = [head] - self.head = head - else: - self.heads = head - self.head = self.heads[0] - - self.body = body - self.location = location - self._hash = None - self.id = id or uuidutils.generate_uuid() - self.name = name - self.comment = comment - self.original_str = original_str - - def __copy__(self): - newone = Rule(self.head, self.body, self.location, self.id, - self.name, self.comment, self.original_str) - return newone - - def set_id(self, id): - self.id = id - - def set_name(self, name): - self.name = name - - def set_comment(self, comment): - self.comment = comment - - def set_original_str(self, original_str): - self.original_str = original_str - - def __str__(self): - if len(self.body) == 0: - return " ".join([str(atom) for atom in self.heads]) - return "{} :- {}".format( - ", ".join([str(atom) for atom in self.heads]), - ", ".join([str(lit) for lit in self.body])) - - def pretty_str(self): - if len(self.body) == 0: - return self.__str__() - else: - return "{} :- \n {}".format( - ", ".join([str(atom) for atom in self.heads]), - ",\n ".join([str(lit) for lit in self.body])) - - def __lt__(self, other): - if self.SORT_RANK != other.SORT_RANK: - return self.SORT_RANK < other.SORT_RANK - if len(self.heads) != len(other.heads): - return len(self.heads) < len(other.heads) - if len(self.body) != len(other.body): - return len(self.body) < len(other.body) - x = sorted(self.heads) - y = sorted(other.heads) - if x != y: - return x < y - x = sorted(self.body) - y = sorted(other.body) - return x < y - - def __eq__(self, other): - return (isinstance(other, Rule) and - len(self.heads) == len(other.heads) and - len(self.body) == len(other.body) and - sorted(self.heads) == sorted(other.heads) and - sorted(self.body) == sorted(other.body)) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "Rule(head={}, body={}, location={})".format( - "[" + ",".join(repr(arg) for arg in self.heads) + "]", - "[" + ",".join(repr(arg) for arg in self.body) + "]", - repr(self.location)) - - def __hash__(self): - # won't properly treat a positive literal and an atom as the same - if self._hash is None: - self._hash = hash(('Rule', - tuple([hash(h) for h in sorted(self.heads)]), - tuple([hash(b) for b in sorted(self.body)]))) - return self._hash - - def is_atom(self): - return False - - def is_rule(self): - return True - - def tablename(self, theory=None): - return self.head.tablename(theory) - - def theory_name(self): - return self.head.theory_name() - - def drop_theory(self): - """Destructively sets the theory to None in all heads.""" - for head in self.heads: - head.drop_theory() - self._hash = None - return self - - def tablenames(self, theory=None, body_only=False, include_builtin=False, - include_modal=True): - """Return all the tablenames occurring in this rule.""" - result = set() - if not body_only: - for lit in self.heads: - if include_modal or not lit.table.modal: - result.add(lit.tablename(theory)) - for lit in self.body: - if include_builtin or not lit.is_builtin(): - result.add(lit.tablename(theory)) - return result - - def variables(self): - vs = set() - for lit in self.heads: - vs |= lit.variables() - for lit in self.body: - vs |= lit.variables() - return vs - - def variable_names(self): - vs = set() - for lit in self.heads: - vs |= lit.variable_names() - for lit in self.body: - vs |= lit.variable_names() - return vs - - def plug(self, binding, caller=None): - newheads = self.plug_heads(binding, caller) - newbody = self.plug_body(binding, caller) - return Rule(newheads, newbody) - - def plug_body(self, binding, caller=None): - return [lit.plug(binding, caller=caller) for lit in self.body] - - def plug_heads(self, binding, caller=None): - return [atom.plug(binding, caller=caller) for atom in self.heads] - - def invert_update(self): - new = copy.copy(self) - new.heads = [atom.invert_update() for atom in self.heads] - new.head = new.heads[0] - return new - - def drop_update(self): - new = copy.copy(self) - new.heads = [atom.drop_update() for atom in self.heads] - new.head = new.heads[0] - return new - - def make_update(self, is_insert=True): - new = copy.copy(self) - new.heads = [atom.make_update(is_insert) for atom in self.heads] - new.head = new.heads[0] - return new - - def is_update(self): - return self.head.is_update() - - def eliminate_column_references_and_pad_positional( - self, theories, default_theory=None): - """Return version of SELF /w col refs removed and pos args padded. - - All column references removed. Positional args padded up to required - length. - Throws exception if RULE is inconsistent with schemas. - """ - pre = self._unused_variable_prefix() - heads = [] - for i in range(0, len(self.heads)): - heads.append( - self.heads[i].eliminate_column_references_and_pad_positional( - theories, default_theory=default_theory, - index=i, prefix='%s%s' % (pre, i))) - - body = [] - sorted_lits = sorted(self.body) - lit_rank = {} # associate each literal with sort rank w/in body - for i in range(0, len(sorted_lits)): - lit_rank[sorted_lits[i]] = i - - for i in range(0, len(self.body)): - body.append( - self.body[i].eliminate_column_references_and_pad_positional( - theories, default_theory=default_theory, - index=i, prefix='%s%s' % (pre, lit_rank[self.body[i]]))) - - return Rule(heads, body, self.location, name=self.name, - comment=self.comment, original_str=self.original_str) - - def _unused_variable_prefix(self): - """Get unused variable prefix. - - Returns variable prefix (string) that is used by no other variable - in the rule. - """ - variables = self.variable_names() - found = False - prefix = "x_" - while not found: - if next((var for var in variables if var.startswith(prefix)), - False): - prefix += "_" - else: - found = True - return prefix - - -class Event(object): - """Represents a change to a formula.""" - - __slots__ = ['formula', 'proofs', 'insert', 'target'] - - def __init__(self, formula=None, insert=True, proofs=None, target=None): - if proofs is None: - proofs = [] - self.formula = formula - self.proofs = proofs - self.insert = insert - self.target = target - - def is_insert(self): - return self.insert - - def tablename(self, default_theory=None): - return self.formula.tablename(default_theory) - - def __str__(self): - if self.insert: - text = "insert" - else: - text = "delete" - if self.target is None: - target = "" - else: - target = " for {}".format(str(self.target)) - return "{}[{}]{}".format( - text, str(self.formula), target) - - def lstr(self): - return self.__str__() + " with proofs " + utility.iterstr(self.proofs) - - def __hash__(self): - return hash("Event(formula={}, proofs={}, insert={}".format( - str(self.formula), str(self.proofs), str(self.insert))) - - def __eq__(self, other): - return (self.formula == other.formula and - self.proofs == other.proofs and - self.insert == other.insert) - - def __ne__(self, other): - return not self.__eq__(other) - - -def formulas_to_string(formulas): - """Convert formulas to string. - - Takes an iterable of compiler sentence objects and returns a - string representing that iterable, which the compiler will parse - into the original iterable. - - """ - if formulas is None: - return "None" - return " ".join([str(formula) for formula in formulas]) - - -def is_update(x): - """Returns T iff x is a formula or tablename representing an update.""" - if isinstance(x, six.string_types): - return x.endswith('+') or x.endswith('-') - elif is_atom(x): - return is_update(x.table) - elif is_regular_rule(x): - return is_update(x.head.table) - else: - return False - - -def is_result(x): - """Check if x is result representation. - - Returns T iff x is a formula or tablename representing the result of - an action invocation. - """ - if isinstance(x, six.string_types): - return x == 'result' - elif is_atom(x): - return is_update(x.table) - elif is_rule(x): - return is_update(x.head.table) - else: - return False - - -def is_recursive(x): - """Check for recursive. - - X can be either a Graph or a list of rules. - Returns T iff the list of rules RULES has a table defined in Terms - of itself. - """ - if isinstance(x, utility.Graph): - return x.has_cycle() - return RuleDependencyGraph(x).has_cycle() - - -def stratification(rules): - """Stratify the rules. - - Returns a dictionary from table names to an integer representing - the strata to which the table is assigned or None if the rules - are not stratified. - """ - return RuleDependencyGraph(rules).stratification([True]) - - -def is_stratified(rules): - """Check if rules are stratified. - - Returns T iff the list of rules RULES has no table defined in terms - of its negated self. - """ - return stratification(rules) is not None - - -class RuleDependencyGraph(utility.BagGraph): - """A Graph representing the table dependencies of rules. - - Creates a Graph that includes one node for each table and an edge - if there is some rule with u in the head and v in the body. - THEORY is the name of the theory to be used for any literal whose - theory is None. - INCLUDE_ATOMS is a boolean controlling whether atoms should contribute - to nodes. - SELECT_HEAD is a function that returns True for those head literals - that should be included in the graph. - SELECT_BODY is a function that returns True for those body literals - that should be included in the graph. - HEAD_TO_BODY controls whether edges are oriented from the tables in - the head toward the tables in the body, or vice versa. - """ - def __init__(self, formulas=None, theory=None, include_atoms=True, - select_head=None, select_body=None, head_to_body=True): - super(RuleDependencyGraph, self).__init__() - # direction of edges - self.head_to_body = head_to_body - # dict from modal name to set of tablenames appearing in rule head - # with that modal (with refcounts) - self.modal_index = analysis.ModalIndex() - # insert formulas - if formulas: - for formula in formulas: - self.formula_insert( - formula, - theory=theory, - include_atoms=include_atoms, - select_head=select_head, - select_body=select_body) - - def formula_update(self, events, - include_atoms=True, select_head=None, select_body=None): - """Modify graph with inserts/deletes in EVENTS. - - Returns list of changes. - """ - changes = [] - for event in events: - theory = event.target - nodes, edges, modals = self.formula_nodes_edges( - event.formula, - theory=theory, - include_atoms=include_atoms, - select_head=select_head, - select_body=select_body) - if event.insert: - for node in nodes: - self.add_node(node) - changes.append(('node', node, True)) - for (src, dst, label) in edges: - self.add_edge(src, dst, label) - changes.append(('edge', src, dst, label, True)) - self.modal_index += modals - changes.append(('modal', modals, True)) - else: - for node in nodes: - self.delete_node(node) - changes.append(('node', node, False)) - for (src, dst, label) in edges: - self.delete_edge(src, dst, label) - changes.append(('edge', src, dst, label, False)) - self.modal_index -= modals - changes.append(('modal', modals, False)) - return changes - - def undo_changes(self, changes): - """Reverse the given changes. - - Each change is either ('node', , ) or - ('edge', , ,
/rows - - For updating 'events' table, the request body should be following - style. The request will replace all rows in the table with the body, - which means if you update the table with [] it will clear the table. - One {} object in the list represents one row of the table. - - request body: - [ - { - "time": "2016-02-22T11:48:55Z", - "type": "compute.host.down", - "details": { - "hostname": "compute1", - "status": "down", - "monitor": "zabbix1", - "monitor_event_id": "111" - } - }, - ..... - ] - """ - - value_trans = {'translation-type': 'VALUE'} - - def safe_id(x): - if isinstance(x, six.string_types): - return x - try: - return x['id'] - except Exception: - return str(x) - - def flatten_events(row_events): - flatten = [] - for event in row_events: - details = event.pop('details') - for k, v in details.items(): - event[k] = v - flatten.append(event) - return flatten - - events_translator = { - 'translation-type': 'HDICT', - 'table-name': 'events', - 'selector-type': 'DICT_SELECTOR', - 'objects-extract-fn': flatten_events, - 'field-translators': - ({'fieldname': 'time', 'translator': value_trans}, - {'fieldname': 'type', 'translator': value_trans}, - {'fieldname': 'hostname', 'translator': value_trans}, - {'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'monitor', 'translator': value_trans}, - {'fieldname': 'monitor_event_id', 'translator': value_trans},) - } - - TRANSLATORS = [events_translator] - - def __init__(self, name='', args=None): - super(DoctorDriver, self).__init__(name, args=args) - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'doctor' - result['description'] = ('Datasource driver that allows external ' - 'systems to push data in accordance with ' - 'OPNFV Doctor Inspector southbound interface ' - 'specification.') - result['config'] = {'persist_data': constants.OPTIONAL} - return result diff --git a/congress/datasources/glancev2_driver.py b/congress/datasources/glancev2_driver.py deleted file mode 100644 index af687945..00000000 --- a/congress/datasources/glancev2_driver.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import glanceclient.v2.client as glclient -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -class GlanceV2Driver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - IMAGES = "images" - TAGS = "tags" - - value_trans = {'translation-type': 'VALUE'} - images_translator = { - 'translation-type': 'HDICT', - 'table-name': IMAGES, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'UUID of image', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'The image status', - 'translator': value_trans}, - {'fieldname': 'name', - 'desc': 'Image Name', 'translator': value_trans}, - {'fieldname': 'container_format', - 'desc': 'The container format of image', - 'translator': value_trans}, - {'fieldname': 'created_at', - 'desc': 'The date and time when the resource was created', - 'translator': value_trans}, - {'fieldname': 'updated_at', - 'desc': 'The date and time when the resource was updated.', - 'translator': value_trans}, - {'fieldname': 'disk_format', - 'desc': 'The disk format of the image.', - 'translator': value_trans}, - {'fieldname': 'owner', - 'desc': 'The ID of the owner or tenant of the image', - 'translator': value_trans}, - {'fieldname': 'protected', - 'desc': 'Indicates whether the image can be deleted.', - 'translator': value_trans}, - {'fieldname': 'min_ram', - 'desc': 'minimum amount of RAM in MB required to boot the image', - 'translator': value_trans}, - {'fieldname': 'min_disk', - 'desc': 'minimum disk size in GB required to boot the image', - 'translator': value_trans}, - {'fieldname': 'checksum', 'desc': 'Hash of the image data used', - 'translator': value_trans}, - {'fieldname': 'size', - 'desc': 'The size of the image data, in bytes.', - 'translator': value_trans}, - {'fieldname': 'file', - 'desc': 'URL for the virtual machine image file', - 'translator': value_trans}, - {'fieldname': 'kernel_id', 'desc': 'kernel id', - 'translator': value_trans}, - {'fieldname': 'ramdisk_id', 'desc': 'ramdisk id', - 'translator': value_trans}, - {'fieldname': 'schema', - 'desc': 'URL for schema of the virtual machine image', - 'translator': value_trans}, - {'fieldname': 'visibility', 'desc': 'The image visibility', - 'translator': value_trans}, - {'fieldname': 'tags', - 'translator': {'translation-type': 'LIST', - 'table-name': TAGS, - 'val-col': 'tag', - 'val-col-desc': 'List of image tags', - 'parent-key': 'id', - 'parent-col-name': 'image_id', - 'parent-key-desc': 'UUID of image', - 'translator': value_trans}})} - - TRANSLATORS = [images_translator] - - def __init__(self, name='', args=None): - super(GlanceV2Driver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = ds_utils.get_keystone_session(self.creds) - self.glance = glclient.Client(session=session) - self.add_executable_client_methods(self.glance, 'glanceclient.v2.') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'glancev2' - result['description'] = ('Datasource driver that interfaces with ' - 'OpenStack Images aka Glance.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - images_method = lambda: self._translate_images( - {'images': self.glance.images.list()}) - self.add_update_method(images_method, self.images_translator) - - @ds_utils.update_state_on_changed(IMAGES) - def _translate_images(self, obj): - """Translate the images represented by OBJ into tables.""" - LOG.debug("IMAGES: %s", str(dict(obj))) - row_data = GlanceV2Driver.convert_objs( - obj['images'], GlanceV2Driver.images_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.glance, action, action_args) diff --git a/congress/datasources/heatv1_driver.py b/congress/datasources/heatv1_driver.py deleted file mode 100644 index 6489d85c..00000000 --- a/congress/datasources/heatv1_driver.py +++ /dev/null @@ -1,245 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import heatclient.v1.client as heatclient -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -class HeatV1Driver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - STACKS = "stacks" - STACKS_LINKS = "stacks_links" - DEPLOYMENTS = "deployments" - DEPLOYMENT_OUTPUT_VALUES = "deployment_output_values" - RESOURCES = "resources" - RESOURCES_LINKS = "resources_links" - EVENTS = "events" - EVENTS_LINKS = "events_links" - - # TODO(thinrichs): add snapshots - value_trans = {'translation-type': 'VALUE'} - stacks_links_translator = { - 'translation-type': 'HDICT', - 'table-name': STACKS_LINKS, - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'href', 'translator': value_trans}, - {'fieldname': 'rel', 'translator': value_trans})} - - stacks_translator = { - 'translation-type': 'HDICT', - 'table-name': STACKS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'stack_name', 'translator': value_trans}, - {'fieldname': 'description', 'translator': value_trans}, - {'fieldname': 'creation_time', 'translator': value_trans}, - {'fieldname': 'updated_time', 'translator': value_trans}, - {'fieldname': 'stack_status', 'translator': value_trans}, - {'fieldname': 'stack_status_reason', 'translator': value_trans}, - {'fieldname': 'stack_owner', 'translator': value_trans}, - {'fieldname': 'parent', 'translator': value_trans}, - {'fieldname': 'links', 'translator': stacks_links_translator})} - - deployments_output_values_translator = { - 'translation-type': 'HDICT', - 'table-name': DEPLOYMENT_OUTPUT_VALUES, - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'deploy_stdout', 'translator': value_trans}, - {'fieldname': 'deploy_stderr', 'translator': value_trans}, - {'fieldname': 'deploy_status_code', 'translator': value_trans}, - {'fieldname': 'result', 'translator': value_trans})} - - software_deployment_translator = { - 'translation-type': 'HDICT', - 'table-name': DEPLOYMENTS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'server_id', 'translator': value_trans}, - {'fieldname': 'config_id', 'translator': value_trans}, - {'fieldname': 'action', 'translator': value_trans}, - {'fieldname': 'status_reason', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'output_values', - 'translator': deployments_output_values_translator})} - - resources_links_translator = { - 'translation-type': 'HDICT', - 'table-name': RESOURCES_LINKS, - 'parent-key': 'physical_resource_id', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'href', 'translator': value_trans}, - {'fieldname': 'rel', 'translator': value_trans})} - - resources_translator = { - 'translation-type': 'HDICT', - 'table-name': RESOURCES, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'physical_resource_id', 'translator': value_trans}, - {'fieldname': 'logical_resource_id', 'translator': value_trans}, - {'fieldname': 'stack_id', 'translator': value_trans}, - {'fieldname': 'resource_name', 'translator': value_trans}, - {'fieldname': 'resource_type', 'translator': value_trans}, - {'fieldname': 'creation_time', 'translator': value_trans}, - {'fieldname': 'updated_time', 'translator': value_trans}, - {'fieldname': 'resource_status', 'translator': value_trans}, - {'fieldname': 'resource_status_reason', 'translator': value_trans}, - {'fieldname': 'links', 'translator': resources_links_translator})} - - events_links_translator = { - 'translation-type': 'HDICT', - 'table-name': EVENTS_LINKS, - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'href', 'translator': value_trans}, - {'fieldname': 'rel', 'translator': value_trans})} - - events_translator = { - 'translation-type': 'HDICT', - 'table-name': EVENTS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'physical_resource_id', 'translator': value_trans}, - {'fieldname': 'logical_resource_id', 'translator': value_trans}, - {'fieldname': 'stack_id', 'translator': value_trans}, - {'fieldname': 'resource_name', 'translator': value_trans}, - {'fieldname': 'event_time', 'translator': value_trans}, - {'fieldname': 'resource_status', 'translator': value_trans}, - {'fieldname': 'resource_status_reason', 'translator': value_trans}, - {'fieldname': 'links', 'translator': events_links_translator})} - - TRANSLATORS = [stacks_translator, software_deployment_translator, - resources_translator, events_translator] - - def __init__(self, name='', args=None): - super(HeatV1Driver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = ds_utils.get_keystone_session(self.creds) - endpoint = session.get_endpoint(service_type='orchestration', - interface='publicURL') - self.heat = heatclient.Client(session=session, endpoint=endpoint) - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'heat' - result['description'] = ('Datasource driver that interfaces with' - ' OpenStack orchestration aka heat.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - stacks_method = lambda: self._translate_stacks( - {'stacks': self.heat.stacks.list()}) - self.add_update_method(stacks_method, self.stacks_translator) - - resources_method = lambda: self._translate_resources( - self._get_resources(self.heat.stacks.list())) - self.add_update_method(resources_method, self.resources_translator) - - events_method = lambda: self._translate_events( - self._get_events(self.heat.stacks.list())) - self.add_update_method(events_method, self.events_translator) - - deployments_method = lambda: self._translate_software_deployment( - {'deployments': self.heat.software_deployments.list()}) - self.add_update_method(deployments_method, - self.software_deployment_translator) - - def _get_resources(self, stacks): - rval = [] - for stack in stacks: - resources = self.heat.resources.list(stack.id) - for resource in resources: - resource = resource.to_dict() - resource['stack_id'] = stack.id - rval.append(resource) - return {'resources': rval} - - def _get_events(self, stacks): - rval = [] - for stack in stacks: - events = self.heat.events.list(stack.id) - for event in events: - event = event.to_dict() - event['stack_id'] = stack.id - rval.append(event) - return {'events': rval} - - @ds_utils.update_state_on_changed(STACKS) - def _translate_stacks(self, obj): - """Translate the stacks represented by OBJ into tables.""" - LOG.debug("STACKS: %s", str(dict(obj))) - row_data = HeatV1Driver.convert_objs( - obj['stacks'], HeatV1Driver.stacks_translator) - return row_data - - @ds_utils.update_state_on_changed(DEPLOYMENTS) - def _translate_software_deployment(self, obj): - """Translate the stacks represented by OBJ into tables.""" - LOG.debug("Software Deployments: %s", str(dict(obj))) - row_data = HeatV1Driver.convert_objs( - obj['deployments'], HeatV1Driver.software_deployment_translator) - return row_data - - @ds_utils.update_state_on_changed(RESOURCES) - def _translate_resources(self, obj): - """Translate the resources represented by OBJ into tables.""" - LOG.debug("Resources: %s", str(dict(obj))) - row_data = HeatV1Driver.convert_objs( - obj['resources'], HeatV1Driver.resources_translator) - return row_data - - @ds_utils.update_state_on_changed(EVENTS) - def _translate_events(self, obj): - """Translate the events represented by OBJ into tables.""" - LOG.debug("Events: %s", str(dict(obj))) - row_data = HeatV1Driver.convert_objs( - obj['events'], HeatV1Driver.events_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.heat, action, action_args) diff --git a/congress/datasources/ironic_driver.py b/congress/datasources/ironic_driver.py deleted file mode 100644 index 866b2a04..00000000 --- a/congress/datasources/ironic_driver.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright (c) 2015 Intel Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from ironicclient import client -import six - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - - -class IronicDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - CHASSISES = "chassises" - NODES = "nodes" - NODE_PROPERTIES = "node_properties" - PORTS = "ports" - DRIVERS = "drivers" - ACTIVE_HOSTS = "active_hosts" - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - def safe_id(x): - if isinstance(x, six.string_types): - return x - try: - return x['id'] - except KeyError: - return str(x) - - def safe_port_extra(x): - try: - return x['vif_port_id'] - except KeyError: - return "" - - chassises_translator = { - 'translation-type': 'HDICT', - 'table-name': CHASSISES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'uuid', 'col': 'id', 'translator': value_trans}, - {'fieldname': 'created_at', 'translator': value_trans}, - {'fieldname': 'updated_at', 'translator': value_trans})} - - nodes_translator = { - 'translation-type': 'HDICT', - 'table-name': NODES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'uuid', 'col': 'id', - 'desc': '', 'translator': value_trans}, - {'fieldname': 'chassis_uuid', 'desc': '', - 'col': 'owner_chassis', 'translator': value_trans}, - {'fieldname': 'power_state', 'desc': '', - 'translator': value_trans}, - {'fieldname': 'maintenance', 'desc': '', - 'translator': value_trans}, - {'fieldname': 'properties', 'desc': '', - 'translator': - {'translation-type': 'HDICT', - 'table-name': NODE_PROPERTIES, - 'parent-key': 'id', - 'parent-col-name': 'properties', - 'selector-type': 'DICT_SELECTOR', - 'in-list': False, - 'field-translators': - ({'fieldname': 'memory_mb', - 'translator': value_trans}, - {'fieldname': 'cpu_arch', - 'translator': value_trans}, - {'fieldname': 'local_gb', - 'translator': value_trans}, - {'fieldname': 'cpus', - 'translator': value_trans})}}, - {'fieldname': 'driver', 'translator': value_trans}, - {'fieldname': 'instance_uuid', 'col': 'running_instance', - 'translator': value_trans}, - {'fieldname': 'created_at', 'translator': value_trans}, - {'fieldname': 'provision_updated_at', 'translator': value_trans}, - {'fieldname': 'updated_at', 'translator': value_trans})} - - ports_translator = { - 'translation-type': 'HDICT', - 'table-name': PORTS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'uuid', 'col': 'id', 'translator': value_trans}, - {'fieldname': 'node_uuid', 'col': 'owner_node', - 'translator': value_trans}, - {'fieldname': 'address', 'col': 'mac_address', - 'translator': value_trans}, - {'fieldname': 'extra', 'col': 'vif_port_id', 'translator': - {'translation-type': 'VALUE', - 'extract-fn': safe_port_extra}}, - {'fieldname': 'created_at', 'translator': value_trans}, - {'fieldname': 'updated_at', 'translator': value_trans})} - - drivers_translator = { - 'translation-type': 'HDICT', - 'table-name': DRIVERS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'hosts', 'translator': - {'translation-type': 'LIST', - 'table-name': ACTIVE_HOSTS, - 'parent-key': 'name', - 'parent-col-name': 'name', - 'val-col': 'hosts', - 'translator': - {'translation-type': 'VALUE'}}})} - - TRANSLATORS = [chassises_translator, nodes_translator, ports_translator, - drivers_translator] - - def __init__(self, name='', args=None): - super(IronicDriver, self).__init__(name, args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = self.get_ironic_credentials(args) - session = ds_utils.get_keystone_session(self.creds) - self.ironic_client = client.get_client( - api_version=self.creds.get('api_version', '1'), session=session) - self.add_executable_client_methods(self.ironic_client, - 'ironicclient.v1.') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'ironic' - result['description'] = ('Datasource driver that interfaces with ' - 'OpenStack bare metal aka ironic.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def get_ironic_credentials(self, creds): - d = {} - d['api_version'] = '1' - d['insecure'] = False - # save a copy to renew auth token - d['username'] = creds['username'] - d['password'] = creds['password'] - d['auth_url'] = creds['auth_url'] - d['tenant_name'] = creds['tenant_name'] - # ironicclient.get_client() uses different names - d['os_username'] = creds['username'] - d['os_password'] = creds['password'] - d['os_auth_url'] = creds['auth_url'] - d['os_tenant_name'] = creds['tenant_name'] - return d - - def initialize_update_methods(self): - chassises_method = lambda: self._translate_chassises( - self.ironic_client.chassis.list(detail=True, limit=0)) - self.add_update_method(chassises_method, self.chassises_translator) - - nodes_method = lambda: self._translate_nodes( - self.ironic_client.node.list(detail=True, limit=0)) - self.add_update_method(nodes_method, self.nodes_translator) - - ports_method = lambda: self._translate_ports( - self.ironic_client.port.list(detail=True, limit=0)) - self.add_update_method(ports_method, self.ports_translator) - - drivers_method = lambda: self._translate_drivers( - self.ironic_client.driver.list()) - self.add_update_method(drivers_method, self.drivers_translator) - - @ds_utils.update_state_on_changed(CHASSISES) - def _translate_chassises(self, obj): - row_data = IronicDriver.convert_objs(obj, - IronicDriver.chassises_translator) - return row_data - - @ds_utils.update_state_on_changed(NODES) - def _translate_nodes(self, obj): - row_data = IronicDriver.convert_objs(obj, - IronicDriver.nodes_translator) - return row_data - - @ds_utils.update_state_on_changed(PORTS) - def _translate_ports(self, obj): - row_data = IronicDriver.convert_objs(obj, - IronicDriver.ports_translator) - return row_data - - @ds_utils.update_state_on_changed(DRIVERS) - def _translate_drivers(self, obj): - row_data = IronicDriver.convert_objs(obj, - IronicDriver.drivers_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.ironic_client, action, action_args) diff --git a/congress/datasources/keystone_driver.py b/congress/datasources/keystone_driver.py deleted file mode 100644 index 2f2aa9a1..00000000 --- a/congress/datasources/keystone_driver.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import keystoneclient.v2_0.client - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - - -class KeystoneDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - # Table names - USERS = "users" - ROLES = "roles" - TENANTS = "tenants" - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - users_translator = { - 'translation-type': 'HDICT', - 'table-name': USERS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'username', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'enabled', 'translator': value_trans}, - {'fieldname': 'tenantId', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'email', 'translator': value_trans})} - - roles_translator = { - 'translation-type': 'HDICT', - 'table-name': ROLES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans})} - - tenants_translator = { - 'translation-type': 'HDICT', - 'table-name': TENANTS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'enabled', 'translator': value_trans}, - {'fieldname': 'description', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans})} - - TRANSLATORS = [users_translator, roles_translator, tenants_translator] - - def __init__(self, name='', args=None): - super(KeystoneDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = self.get_keystone_credentials_v2(args) - self.client = keystoneclient.v2_0.client.Client(**self.creds) - self.add_executable_client_methods(self.client, - 'keystoneclient.v2_0.client') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'keystone' - result['description'] = ('Datasource driver that interfaces with ' - 'keystone.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def get_keystone_credentials_v2(self, args): - creds = args - d = {} - d['version'] = '2' - d['username'] = creds['username'] - d['password'] = creds['password'] - d['auth_url'] = creds['auth_url'] - d['tenant_name'] = creds['tenant_name'] - return d - - def initialize_update_methods(self): - users_method = lambda: self._translate_users(self.client.users.list()) - self.add_update_method(users_method, self.users_translator) - - roles_method = lambda: self._translate_roles(self.client.roles.list()) - self.add_update_method(roles_method, self.roles_translator) - - tenants_method = lambda: self._translate_tenants( - self.client.tenants.list()) - self.add_update_method(tenants_method, self.tenants_translator) - - @ds_utils.update_state_on_changed(USERS) - def _translate_users(self, obj): - row_data = KeystoneDriver.convert_objs(obj, - KeystoneDriver.users_translator) - return row_data - - @ds_utils.update_state_on_changed(ROLES) - def _translate_roles(self, obj): - row_data = KeystoneDriver.convert_objs(obj, - KeystoneDriver.roles_translator) - return row_data - - @ds_utils.update_state_on_changed(TENANTS) - def _translate_tenants(self, obj): - row_data = KeystoneDriver.convert_objs( - obj, KeystoneDriver.tenants_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.client, action, action_args) diff --git a/congress/datasources/keystonev3_driver.py b/congress/datasources/keystonev3_driver.py deleted file mode 100644 index 463e9ecc..00000000 --- a/congress/datasources/keystonev3_driver.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from keystoneclient.v3 import client - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - - -class KeystoneV3Driver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - # Table names - USERS = "users" - ROLES = "roles" - PROJECTS = "projects" - DOMAINS = "domains" - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - users_translator = { - 'translation-type': 'HDICT', - 'table-name': USERS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'The ID for the user.', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'username, unique within domain', - 'translator': value_trans}, - {'fieldname': 'enabled', 'desc': 'user is enabled or not', - 'translator': value_trans}, - {'fieldname': 'default_project_id', - 'desc': 'ID of the default project for the user', - 'translator': value_trans}, - {'fieldname': 'domain_id', - 'desc': 'The ID of the domain for the user.', - 'translator': value_trans})} - - roles_translator = { - 'translation-type': 'HDICT', - 'table-name': ROLES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'role ID', 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'role name', - 'translator': value_trans})} - - projects_translator = { - 'translation-type': 'HDICT', - 'table-name': PROJECTS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'enabled', 'desc': 'project is enabled or not', - 'translator': value_trans}, - {'fieldname': 'description', 'desc': 'project description', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'project name', - 'translator': value_trans}, - {'fieldname': 'domain_id', - 'desc': 'The ID of the domain for the project', - 'translator': value_trans}, - {'fieldname': 'id', 'desc': 'ID for the project', - 'translator': value_trans})} - - domains_translator = { - 'translation-type': 'HDICT', - 'table-name': DOMAINS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'enabled', 'desc': 'domain is enabled or disabled', - 'translator': value_trans}, - {'fieldname': 'description', 'desc': 'domain description', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'domain name', - 'translator': value_trans}, - {'fieldname': 'id', 'desc': 'domain ID', - 'translator': value_trans})} - - TRANSLATORS = [users_translator, roles_translator, projects_translator, - domains_translator] - - def __init__(self, name='', args=None): - super(KeystoneV3Driver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = ds_utils.get_keystone_session(args) - self.client = client.Client(session=session) - self.add_executable_client_methods(self.client, - 'keystoneclient.v3.client') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'keystonev3' - result['description'] = ('Datasource driver that interfaces with ' - 'keystone.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - users_method = lambda: self._translate_users(self.client.users.list()) - self.add_update_method(users_method, self.users_translator) - - roles_method = lambda: self._translate_roles(self.client.roles.list()) - self.add_update_method(roles_method, self.roles_translator) - - projects_method = lambda: self._translate_projects( - self.client.projects.list()) - self.add_update_method(projects_method, self.projects_translator) - - domains_method = lambda: self._translate_domains( - self.client.domains.list()) - self.add_update_method(domains_method, self.domains_translator) - - @ds_utils.update_state_on_changed(USERS) - def _translate_users(self, obj): - row_data = KeystoneV3Driver.convert_objs( - obj, KeystoneV3Driver.users_translator) - return row_data - - @ds_utils.update_state_on_changed(ROLES) - def _translate_roles(self, obj): - row_data = KeystoneV3Driver.convert_objs( - obj, KeystoneV3Driver.roles_translator) - return row_data - - @ds_utils.update_state_on_changed(PROJECTS) - def _translate_projects(self, obj): - row_data = KeystoneV3Driver.convert_objs( - obj, KeystoneV3Driver.projects_translator) - return row_data - - @ds_utils.update_state_on_changed(DOMAINS) - def _translate_domains(self, obj): - row_data = KeystoneV3Driver.convert_objs( - obj, KeystoneV3Driver.domains_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.client, action, action_args) diff --git a/congress/datasources/monasca_driver.py b/congress/datasources/monasca_driver.py deleted file mode 100644 index c9269a3a..00000000 --- a/congress/datasources/monasca_driver.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2015 Cisco. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import datetime - -import keystoneclient.v3.client as ksclient -from monascaclient import client as monasca_client -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -# TODO(thinrichs): figure out how to move even more of this boilerplate -# into DataSourceDriver. E.g. change all the classes to Driver instead of -# NeutronDriver, CeilometerDriver, etc. and move the d6instantiate function -# to DataSourceDriver. -class MonascaDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - METRICS = "metrics" - DIMENSIONS = "dimensions" - STATISTICS = "statistics" - DATA = "statistics.data" - # TODO(fabiog): add events and logs when fully supported in Monasca - # EVENTS = "events" - # LOGS = "logs" - - value_trans = {'translation-type': 'VALUE'} - - metric_translator = { - 'translation-type': 'HDICT', - 'table-name': METRICS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'dimensions', - 'translator': {'translation-type': 'VDICT', - 'table-name': DIMENSIONS, - 'id-col': 'id', - 'key-col': 'key', 'val-col': 'value', - 'translator': value_trans}}) - } - - statistics_translator = { - 'translation-type': 'HDICT', - 'table-name': STATISTICS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'statistics', - 'translator': {'translation-type': 'LIST', - 'table-name': DATA, - 'id-col': 'name', - 'val-col': 'value_col', - 'translator': value_trans}}) - } - - TRANSLATORS = [metric_translator, statistics_translator] - - def __init__(self, name='', args=None): - super(MonascaDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - if not self.creds.get('project_name'): - self.creds['project_name'] = self.creds['tenant_name'] - - if not self.creds.get('poll_time'): - # set default polling time to 1hr - self.creds['poll_time'] = 3600 - - # Monasca uses Keystone V3 - self.creds['auth_url'] = self.creds['auth_url'].replace("v2.0", "v3") - self.keystone = ksclient.Client(**self.creds) - self.creds['token'] = self.keystone.auth_token - - if not self.creds.get('endpoint'): - # if the endpoint not defined retrieved it from keystone catalog - self.creds['endpoint'] = self.keystone.service_catalog.url_for( - service_type='monitoring', endpoint_type='publicURL') - - self.monasca = monasca_client.Client('2_0', **self.creds) - self.add_executable_client_methods(self.monasca, 'monascaclient.') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'monasca' - result['description'] = ('Datasource driver that interfaces with ' - 'monasca.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - metrics_method = lambda: self._translate_metric( - self.monasca.metrics.list()) - self.add_update_method(metrics_method, self.metric_translator) - - statistics_method = self.update_statistics - self.add_update_method(statistics_method, self.statistics_translator) - - def update_statistics(self): - today = datetime.datetime.now() - yesterday = datetime.timedelta(hours=24) - start_from = datetime.datetime.isoformat(today-yesterday) - - for metric in self.monasca.metrics.list_names(): - LOG.debug("Monasca statistics for metric %s", metric['name']) - _query_args = dict( - start_time=start_from, - name=metric['name'], - statistics='avg', - period=int(self.creds['poll_time']), - merge_metrics='true') - statistics = self.monasca.metrics.list_statistics( - **_query_args) - self._translate_statistics(statistics) - - @ds_utils.update_state_on_changed(METRICS) - def _translate_metric(self, obj): - """Translate the metrics represented by OBJ into tables.""" - LOG.debug("METRIC: %s", str(obj)) - - row_data = MonascaDriver.convert_objs(obj, - self.metric_translator) - return row_data - - @ds_utils.update_state_on_changed(STATISTICS) - def _translate_statistics(self, obj): - """Translate the metrics represented by OBJ into tables.""" - - LOG.debug("STATISTICS: %s", str(obj)) - - row_data = MonascaDriver.convert_objs(obj, - self.statistics_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.monasca, action, action_args) diff --git a/congress/datasources/murano_classes.py b/congress/datasources/murano_classes.py deleted file mode 100644 index 7fef7ddd..00000000 --- a/congress/datasources/murano_classes.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -logger = logging.getLogger(__name__) - - -class IOMuranoObject(object): - name = 'io.murano.Object' - - @classmethod - def is_class_type(cls, name): - if name == cls.name: - return True - else: - return False - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - return [cls.name] - - -class IOMuranoEnvironment(IOMuranoObject): - name = 'io.murano.Environment' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoObject.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesInstance(IOMuranoObject): - name = 'io.murano.resources.Instance' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoObject.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesLinuxInstance(IOMuranoResourcesInstance): - name = 'io.murano.resources.LinuxInstance' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoResourcesInstance.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesLinuxMuranoInstance(IOMuranoResourcesLinuxInstance): - name = 'io.murano.resources.LinuxMuranoInstance' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoResourcesLinuxInstance.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesWindowsInstance(IOMuranoResourcesInstance): - name = 'io.murano.resources.WindowsInstance' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoResourcesInstance.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesNetwork(IOMuranoObject): - name = 'io.murano.resources.Network' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoObject.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoResourcesNeutronNetwork(IOMuranoResourcesNetwork): - name = 'io.murano.resources.NeutronNetwork' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoResourcesNetwork.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoApplication(IOMuranoObject): - name = 'io.murano.Application' - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoObject.get_parent_types() - types.append(cls.name) - return types - - -class IOMuranoApps(IOMuranoApplication): - # This is a common class for all applications - # name should be set to actual apps type before use - # (e.g io.murano.apps.apache.ApacheHttpServer) - name = None - - @classmethod - def get_parent_types(cls, class_name=None): - if class_name and not cls.is_class_type(class_name): - return [] - types = IOMuranoApplication.get_parent_types() - types.append(cls.name) - return types diff --git a/congress/datasources/murano_driver.py b/congress/datasources/murano_driver.py deleted file mode 100644 index 6c2000c3..00000000 --- a/congress/datasources/murano_driver.py +++ /dev/null @@ -1,510 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import inspect - -import muranoclient.client -from muranoclient.common import exceptions as murano_exceptions -from oslo_log import log as logging -from oslo_utils import uuidutils -import six - -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils -from congress.datasources import murano_classes -from congress import utils - - -logger = logging.getLogger(__name__) - - -class MuranoDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - OBJECTS = "objects" - PARENT_TYPES = "parent_types" - PROPERTIES = "properties" - RELATIONSHIPS = "relationships" - CONNECTED = "connected" - STATES = "states" - ACTIONS = "actions" - UNUSED_PKG_PROPERTIES = ['id', 'owner_id', 'description'] - UNUSED_ENV_PROPERTIES = ['id', 'tenant_id'] - APPS_TYPE_PREFIXES = ['io.murano.apps', 'io.murano.databases'] - - def __init__(self, name='', args=None): - super(MuranoDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = datasource_utils.get_keystone_session(self.creds) - client_version = "1" - self.murano_client = muranoclient.client.Client( - client_version, session=session, endpoint_type='publicURL', - service_type='application-catalog') - self.add_executable_client_methods( - self.murano_client, - 'muranoclient.v1.') - logger.debug("Successfully created murano_client") - - self.action_call_returns = [] - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'murano' - result['description'] = ('Datasource driver that interfaces with ' - 'murano') - result['config'] = datasource_utils.get_openstack_required_config() - result['secret'] = ['password'] - return result - - def update_from_datasource(self): - """Called when it is time to pull new data from this datasource. - - Sets self.state[tablename] = - for every tablename exported by this datasource. - """ - self.state[self.STATES] = set() - self.state[self.OBJECTS] = set() - self.state[self.PROPERTIES] = set() - self.state[self.PARENT_TYPES] = set() - self.state[self.RELATIONSHIPS] = set() - self.state[self.CONNECTED] = set() - self.state[self.ACTIONS] = dict() - - # Workaround for 401 error issue - try: - # Moves _translate_packages above translate_services to - # make use of properties table in translate_services - logger.debug("Murano grabbing packages") - packages = self.murano_client.packages.list() - self._translate_packages(packages) - - logger.debug("Murano grabbing environments") - environments = self.murano_client.environments.list() - self._translate_environments(environments) - self._translate_services(environments) - self._translate_deployments(environments) - self._translate_connected() - except murano_exceptions.HTTPException: - raise - - @classmethod - def get_schema(cls): - """Returns a dictionary of table schema. - - The dictionary mapping tablenames to the list of column names - for that table. Both tablenames and columnnames are strings. - """ - d = {} - d[cls.OBJECTS] = ('object_id', 'owner_id', 'type') - # parent_types include not only the type of object's immediate - # parent but also all of its ancestors and its own type. The - # additional info helps writing better datalog rules. - d[cls.PARENT_TYPES] = ('id', 'parent_type') - d[cls.PROPERTIES] = ('owner_id', 'name', 'value') - d[cls.RELATIONSHIPS] = ('source_id', 'target_id', 'name') - d[cls.CONNECTED] = ('source_id', 'target_id') - d[cls.STATES] = ('id', 'state') - return d - - def _translate_environments(self, environments): - """Translate the environments into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from environments - """ - logger.debug("_translate_environments: %s", environments) - if not environments: - return - self.state[self.STATES] = set() - if self.OBJECTS not in self.state: - self.state[self.OBJECTS] = set() - if self.PROPERTIES not in self.state: - self.state[self.PROPERTIES] = set() - if self.PARENT_TYPES not in self.state: - self.state[self.PARENT_TYPES] = set() - if self.RELATIONSHIPS not in self.state: - self.state[self.RELATIONSHIPS] = set() - if self.CONNECTED not in self.state: - self.state[self.CONNECTED] = set() - - env_type = 'io.murano.Environment' - for env in environments: - self.state[self.OBJECTS].add( - (env.id, env.tenant_id, env_type)) - self.state[self.STATES].add((env.id, env.status)) - parent_types = self._get_parent_types(env_type) - self._add_parent_types(env.id, parent_types) - for key, value in env.to_dict().items(): - if key in self.UNUSED_ENV_PROPERTIES: - continue - self._add_properties(env.id, key, value) - - def _translate_services(self, environments): - """Translate the environment services into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from services - """ - logger.debug("Murano grabbing environments services") - if not environments: - return - for env in environments: - services = self.murano_client.services.list(env.id) - self._translate_environment_services(services, env.id) - - def _translate_environment_services(self, services, env_id): - """Translate the environment services into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from services - """ - - # clean actions for given environment - if self.ACTIONS not in self.state: - self.state[self.ACTIONS] = dict() - env_actions = self.state[self.ACTIONS][env_id] = set() - - if not services: - return - for s in services: - s_dict = s.to_dict() - s_id = s_dict['?']['id'] - s_type = s_dict['?']['type'] - self.state[self.OBJECTS].add((s_id, env_id, s_type)) - for key, value in s_dict.items(): - if key in ['instance', '?']: - continue - self._add_properties(s_id, key, value) - self._add_relationships(s_id, key, value) - - parent_types = self._get_parent_types(s_type) - self._add_parent_types(s_id, parent_types) - self._add_relationships(env_id, 'services', s_id) - self._translate_service_action(s_dict, env_actions) - - if 'instance' not in s_dict: - continue - # populate service instance - si_dict = s.instance - si_id = si_dict['?']['id'] - si_type = si_dict['?']['type'] - self.state[self.OBJECTS].add((si_id, s_id, si_type)) - - for key, value in si_dict.items(): - if key in ['?']: - continue - self._add_properties(si_id, key, value) - if key not in ['image']: - # there's no murano image object in the environment, - # therefore glance 'image' relationship is irrelevant - # at this point. - self._add_relationships(si_id, key, value) - # There's a relationship between the service and instance - self._add_relationships(s_id, 'instance', si_id) - - parent_types = self._get_parent_types(si_type) - self._add_parent_types(si_id, parent_types) - self._translate_service_action(si_dict, env_actions) - - def _translate_service_action(self, obj_dict, env_actions): - """Translates environment's object actions to env_actions structure. - - env_actions: [(obj_id, action_id, action_name, enabled)] - :param obj_dict: object dictionary - :param env_actions: set of environment actions - """ - obj_id = obj_dict['?']['id'] - if '_actions' in obj_dict['?']: - o_actions = obj_dict['?']['_actions'] - if not o_actions: - return - for action_id, action_value in o_actions.items(): - action_name = action_value.get('name', '') - enabled = action_value.get('enabled', False) - action = (obj_id, action_id, action_name, enabled) - env_actions.add(action) - # TODO(tranldt): support action arguments. - # If action arguments are included in '_actions', - # they can be populated into tables. - - def _translate_deployments(self, environments): - """Translate the environment deployments into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from deployments - """ - if not environments: - return - for env in environments: - deployments = self.murano_client.deployments.list(env.id) - self._translate_environment_deployments(deployments, env.id) - - def _translate_environment_deployments(self, deployments, env_id): - """Translate the environment deployments into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from deployments - """ - if not deployments: - return - for d in deployments: - if 'defaultNetworks' not in d.description: - continue - default_networks = d.description['defaultNetworks'] - net_id = None - if 'environment' in default_networks: - net_id = default_networks['environment']['?']['id'] - net_type = default_networks['environment']['?']['type'] - self.state[self.OBJECTS].add((net_id, env_id, net_type)) - - parent_types = self._get_parent_types(net_type) - self._add_parent_types(net_id, parent_types) - - for key, value in default_networks['environment'].items(): - if key in ['?']: - continue - self._add_properties(net_id, key, value) - - if not net_id: - continue - self._add_relationships(env_id, 'defaultNetworks', net_id) - for key, value in default_networks.items(): - if key in ['environment']: - # data from environment already populated - continue - new_key = 'defaultNetworks.' + key - self._add_properties(net_id, new_key, value) - # services from deployment are not of interest because the same - # info is obtained from services API - - def _translate_packages(self, packages): - """Translate the packages into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from packages/applications - """ - # packages is a generator type - if not packages: - return - if self.OBJECTS not in self.state: - self.state[self.OBJECTS] = set() - if self.PROPERTIES not in self.state: - self.state[self.PROPERTIES] = set() - - for pkg in packages: - logger.debug("pkg=%s", pkg.to_dict()) - pkg_type = pkg.type - if pkg.type == 'Application': - pkg_type = 'io.murano.Application' - self.state[self.OBJECTS].add((pkg.id, pkg.owner_id, pkg_type)) - - for key, value in pkg.to_dict().items(): - if key in self.UNUSED_PKG_PROPERTIES: - continue - self._add_properties(pkg.id, key, value) - - def _add_properties(self, obj_id, key, value): - """Add a set of (obj_id, key, value) to properties table. - - :param obj_id: uuid of object - :param key: property name. For the case value is a list, the - same key is used for multiple values. - :param value: property value. If value is a dict, the nested - properties will be mapped using dot notation. - """ - if value is None or value == '': - return - if isinstance(value, dict): - for k, v in value.items(): - new_key = key + "." + k - self._add_properties(obj_id, new_key, v) - elif isinstance(value, list): - if len(value) == 0: - return - for item in value: - self.state[self.PROPERTIES].add( - (obj_id, key, utils.value_to_congress(item))) - else: - self.state[self.PROPERTIES].add( - (obj_id, key, utils.value_to_congress(value))) - - def _add_relationships(self, obj_id, key, value): - """Add a set of (obj_id, value, key) to relationships table. - - :param obj_id: source uuid - :param key: relationship name - :param value: target uuid - """ - if (not isinstance(value, six.string_types) or - not uuidutils.is_uuid_like(value)): - return - logger.debug("Relationship: source = %s, target = %s, rel_name = %s" - % (obj_id, value, key)) - self.state[self.RELATIONSHIPS].add((obj_id, value, key)) - - def _transitive_closure(self): - """Computes transitive closure on a directed graph. - - In other words computes reachability within the graph. - E.g. {(1, 2), (2, 3)} -> {(1, 2), (2, 3), (1, 3)} - (1, 3) was added because there is path from 1 to 3 in the graph. - """ - closure = self.state[self.CONNECTED] - while True: - # Attempts to discover new transitive relations - # by joining 2 subsequent relations/edges within the graph. - new_relations = {(x, w) for x, y in closure - for q, w in closure if q == y} - # Creates union with already discovered relations. - closure_until_now = closure | new_relations - # If no new relations were discovered in last cycle - # the computation is finished. - if closure_until_now == closure: - self.state[self.CONNECTED] = closure - break - closure = closure_until_now - - def _add_connected(self, source_id, target_id): - """Looks up the target_id in objects and add links to connected table. - - Adds sets of (source_id, target_id) to connected table along - with its indirections. - :param source_id: source uuid - :param target_id: target uuid - """ - for row in self.state[self.OBJECTS]: - if row[1] == target_id: - self.state[self.CONNECTED].add((row[1], row[0])) - self.state[self.CONNECTED].add((source_id, row[0])) - self.state[self.CONNECTED].add((source_id, target_id)) - - def _translate_connected(self): - """Translates relationships table into connected table.""" - for row in self.state[self.RELATIONSHIPS]: - self._add_connected(row[0], row[1]) - self._transitive_closure() - - def _add_parent_types(self, obj_id, parent_types): - """Add sets of (obj_id, parent_type) to parent_types table. - - :param obj_id: uuid of object - :param parent_types: list of parent type string - """ - if parent_types: - for p_type in parent_types: - self.state[self.PARENT_TYPES].add((obj_id, p_type)) - - def _get_package_type(self, class_name): - """Determine whether obj_type is an Application or Library. - - :param class_name: service/application class name - e.g. io.murano.apps.linux.Telnet. - :return: - package type (e.g. 'Application') if found. - - None if no package type found. - """ - pkg_type = None - if self.PROPERTIES in self.state: - idx_uuid = 0 - idx_value = 2 - uuid = None - for row in self.state[self.PROPERTIES]: - if 'class_definitions' in row and class_name in row: - uuid = row[idx_uuid] - break - if uuid: - for row in self.state[self.PROPERTIES]: - if 'type' in row and uuid == row[idx_uuid]: - pkg_type = row[idx_value] - - # If the package is removed after deployed, its properties - # are not known and so above search will fail. In that case - # let's check for class_name prefix as the last resort. - if not pkg_type: - for prefix in self.APPS_TYPE_PREFIXES: - if prefix in class_name: - pkg_type = 'Application' - break - return pkg_type - - def _get_parent_types(self, obj_type): - """Get class types of all OBJ_TYPE's parents including itself. - - Look up the hierarchy of OBJ_TYPE and return types of all its - ancestor including its own type. - :param obj_type: - """ - class_types = [] - p = lambda x: inspect.isclass(x) - g = inspect.getmembers(murano_classes, p) - for name, cls in g: - logger.debug("%s: %s" % (name, cls)) - if (cls is murano_classes.IOMuranoApps and - self._get_package_type(obj_type) == 'Application'): - cls.name = obj_type - if 'get_parent_types' in dir(cls): - class_types = cls.get_parent_types(obj_type) - if class_types: - break - return class_types - - def _call_murano_action(self, environment_id, object_id, action_name): - """Invokes action of object in Murano environment. - - :param environment_id: uuid - :param object_id: uuid - :param action_name: string - """ - # get action id using object_id, env_id and action name - logger.debug("Requested Murano action invoke %s on %s in %s", - action_name, object_id, environment_id) - if (not self.state[self.ACTIONS] or - environment_id not in self.state[self.ACTIONS]): - logger.warning('Datasource "%s" found no actions for ' - 'environment "%s"', self.name, environment_id) - return - env_actions = self.state[self.ACTIONS][environment_id] - for env_action in env_actions: - ea_obj_id, ea_action_id, ea_action_name, ea_enabled = env_action - if (object_id == ea_obj_id and action_name == ea_action_name - and ea_enabled): - logger.debug("Invoking Murano action_id = %s, action_name %s", - ea_action_id, ea_action_name) - # TODO(tranldt): support action arguments - task_id = self.murano_client.actions.call(environment_id, - ea_action_id) - logger.debug("Murano action invoked %s - task id %s", - ea_action_id, task_id) - self.action_call_returns.append(task_id) - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - logger.info("%s:: executing %s on %s", self.name, action, action_args) - self.action_call_returns = [] - positional_args = action_args.get('positional', []) - logger.debug('Processing action execution: action = %s, ' - 'positional args = %s', action, positional_args) - try: - env_id = positional_args[0] - obj_id = positional_args[1] - action_name = positional_args[2] - self._call_murano_action(env_id, obj_id, action_name) - except Exception as e: - logger.exception(str(e)) diff --git a/congress/datasources/neutron_driver.py b/congress/datasources/neutron_driver.py deleted file mode 100644 index 38e5813b..00000000 --- a/congress/datasources/neutron_driver.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import neutronclient.v2_0.client -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils - -LOG = logging.getLogger(__name__) - - -class NeutronDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - NETWORKS = "networks" - NETWORKS_SUBNETS = "networks.subnets" - PORTS = "ports" - PORTS_ADDR_PAIRS = "ports.address_pairs" - PORTS_SECURITY_GROUPS = "ports.security_groups" - PORTS_BINDING_CAPABILITIES = "ports.binding_capabilities" - PORTS_FIXED_IPS = "ports.fixed_ips" - PORTS_FIXED_IPS_GROUPS = "ports.fixed_ips_groups" - PORTS_EXTRA_DHCP_OPTS = "ports.extra_dhcp_opts" - ROUTERS = "routers" - ROUTERS_EXTERNAL_GATEWAYS = "routers.external_gateways" - SECURITY_GROUPS = "security_groups" - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - networks_translator = { - 'translation-type': 'HDICT', - 'table-name': NETWORKS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'subnets', 'col': 'subnet_group_id', - 'translator': {'translation-type': 'LIST', - 'table-name': 'networks.subnets', - 'id-col': 'subnet_group_id', - 'val-col': 'subnet', - 'translator': value_trans}}, - {'fieldname': 'provider:physical_network', - 'translator': value_trans}, - {'fieldname': 'admin_state_up', 'translator': value_trans}, - {'fieldname': 'tenant_id', 'translator': value_trans}, - {'fieldname': 'provider:network_type', 'translator': value_trans}, - {'fieldname': 'router:external', 'translator': value_trans}, - {'fieldname': 'shared', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'provider:segmentation_id', - 'translator': value_trans})} - - ports_translator = { - 'translation-type': 'HDICT', - 'table-name': PORTS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'allowed_address_pairs', - 'col': 'allowed_address_pairs_id', - 'translator': {'translation-type': 'LIST', - 'table-name': PORTS_ADDR_PAIRS, - 'id-col': 'allowed_address_pairs_id', - 'val-col': 'address', - 'translator': value_trans}}, - {'fieldname': 'security_groups', - 'col': 'security_groups_id', - 'translator': {'translation-type': 'LIST', - 'table-name': PORTS_SECURITY_GROUPS, - 'id-col': 'security_groups_id', - 'val-col': 'security_group_id', - 'translator': value_trans}}, - {'fieldname': 'extra_dhcp_opts', - 'col': 'extra_dhcp_opt_group_id', - 'translator': {'translation-type': 'LIST', - 'table-name': PORTS_EXTRA_DHCP_OPTS, - 'id-col': 'extra_dhcp_opt_group_id', - 'val-col': 'dhcp_opt', - 'translator': value_trans}}, - {'fieldname': 'binding:capabilities', - 'col': 'binding:capabilities_id', - 'translator': {'translation-type': 'VDICT', - 'table-name': PORTS_BINDING_CAPABILITIES, - 'id-col': 'binding:capabilities_id', - 'key-col': 'key', 'val-col': 'value', - 'translator': value_trans}}, - {'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'admin_state_up', 'translator': value_trans}, - {'fieldname': 'network_id', 'translator': value_trans}, - {'fieldname': 'tenant_id', 'translator': value_trans}, - {'fieldname': 'binding:vif_type', 'translator': value_trans}, - {'fieldname': 'device_owner', 'translator': value_trans}, - {'fieldname': 'mac_address', 'translator': value_trans}, - - {'fieldname': 'fixed_ips', - 'col': 'fixed_ips', - 'translator': {'translation-type': 'LIST', - 'table-name': PORTS_FIXED_IPS_GROUPS, - 'id-col': 'fixed_ips_group_id', - 'val-col': 'fixed_ip_id', - 'translator': {'translation-type': 'VDICT', - 'table-name': PORTS_FIXED_IPS, - 'id-col': 'fixed_ip_id', - 'key-col': 'key', - 'val-col': 'value', - 'translator': value_trans}}}, - {'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'device_id', 'translator': value_trans}, - {'fieldname': 'binding:host_id', 'translator': value_trans})} - - routers_translator = { - 'translation-type': 'HDICT', - 'table-name': ROUTERS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'external_gateway_info', - 'translator': {'translation-type': 'VDICT', - 'table-name': ROUTERS_EXTERNAL_GATEWAYS, - 'id-col': 'external_gateway_info', - 'key-col': 'key', 'val-col': 'value', - 'translator': value_trans}}, - {'fieldname': 'networks', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'admin_state_up', 'translator': value_trans}, - {'fieldname': 'tenant_id', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans})} - - security_groups_translator = { - 'translation-type': 'HDICT', - 'table-name': SECURITY_GROUPS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'tenant_id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'description', 'translator': value_trans}, - {'fieldname': 'id', 'translator': value_trans})} - - TRANSLATORS = [networks_translator, ports_translator, routers_translator, - security_groups_translator] - - def __init__(self, name='', args=None): - super(NeutronDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = self.get_neutron_credentials(args) - self.neutron = neutronclient.v2_0.client.Client(**self.creds) - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'neutron' - result['description'] = ('Do not use this driver is deprecated') - result['config'] = datasource_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def get_neutron_credentials(self, creds): - d = {} - d['username'] = creds['username'] - d['tenant_name'] = creds['tenant_name'] - d['password'] = creds['password'] - d['auth_url'] = creds['auth_url'] - return d - - def initialize_update_methods(self): - networks_method = lambda: self._translate_networks( - self.neutron.list_networks()) - self.add_update_method(networks_method, self.networks_translator) - - ports_method = lambda: self._translate_ports(self.neutron.list_ports()) - self.add_update_method(ports_method, self.ports_translator) - - routers_method = lambda: self._translate_routers( - self.neutron.list_routers()) - self.add_update_method(routers_method, self.routers_translator) - - security_method = lambda: self._translate_security_group( - self.neutron.list_security_groups()) - self.add_update_method(security_method, - self.security_groups_translator) - - @datasource_utils.update_state_on_changed(NETWORKS) - def _translate_networks(self, obj): - """Translate the networks represented by OBJ into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from OBJ: NETWORKS, NETWORKS_SUBNETS - """ - LOG.debug("NETWORKS: %s", dict(obj)) - - row_data = NeutronDriver.convert_objs(obj['networks'], - self.networks_translator) - return row_data - - @datasource_utils.update_state_on_changed(PORTS) - def _translate_ports(self, obj): - """Translate the ports represented by OBJ into tables. - - Assigns self.state[tablename] for all those TABLENAMEs - generated from OBJ: PORTS, PORTS_ADDR_PAIRS, - PORTS_SECURITY_GROUPS, PORTS_BINDING_CAPABILITIES, - PORTS_FIXED_IPS, PORTS_FIXED_IPS_GROUPS, - PORTS_EXTRA_DHCP_OPTS. - """ - LOG.debug("PORTS: %s", obj) - - row_data = NeutronDriver.convert_objs(obj['ports'], - self.ports_translator) - return row_data - - @datasource_utils.update_state_on_changed(ROUTERS) - def _translate_routers(self, obj): - """Translates the routers represented by OBJ into a single table. - - Assigns self.state[SECURITY_GROUPS] to that table. - """ - LOG.debug("ROUTERS: %s", dict(obj)) - - row_data = NeutronDriver.convert_objs(obj['routers'], - self.routers_translator) - return row_data - - @datasource_utils.update_state_on_changed(SECURITY_GROUPS) - def _translate_security_groups(self, obj): - LOG.debug("SECURITY_GROUPS: %s", dict(obj)) - - row_data = NeutronDriver.convert_objs(obj['security_groups'], - self.security_groups_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.neutron, action, action_args) - -# Sample Mapping -# Network : -# ======== -# -# json -# ------ -# {u'status': u'ACTIVE', u'subnets': -# [u'4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], -# u'name':u'test-network', u'provider:physical_network': None, -# u'admin_state_up': True, -# u'tenant_id': u'570fe78a1dc54cffa053bd802984ede2', -# u'provider:network_type': u'gre', -# u'router:external': False, u'shared': False, u'id': -# u'240ff9df-df35-43ae-9df5-27fae87f2492', -# u'provider:segmentation_id': 4} -# -# tuple -# ----- -# -# Networks : (u'ACTIVE', 'cdca5538-ae2d-11e3-92c1-bcee7bdf8d69', -# u'vova_network', None, -# True, u'570fe78a1dc54cffa053bd802984ede2', u'gre', 'False', 'False', -# u'1e3bc4fe-85c2-4b04-9b7f-ee40239787ef', 7) -# -# Networks and subnets -# ('cdcaa1a0-ae2d-11e3-92c1-bcee7bdf8d69', -# u'4cef03d0-1d02-40bb-8c99-2f442aac6ab0') -# -# -# Ports -# ====== -# json -# ---- -# {u'status': u'ACTIVE', -# u'binding:host_id': u'havana', u'name': u'', -# u'allowed_address_pairs': [], -# u'admin_state_up': True, u'network_id': -# u'240ff9df-df35-43ae-9df5-27fae87f2492', -# u'tenant_id': u'570fe78a1dc54cffa053bd802984ede2', -# u'extra_dhcp_opts': [], -# u'binding:vif_type': u'ovs', u'device_owner': -# u'network:router_interface', -# u'binding:capabilities': {u'port_filter': True}, -# u'mac_address': u'fa:16:3e:ab:90:df', -# u'fixed_ips': [{u'subnet_id': -# u'4cef03d0-1d02-40bb-8c99-2f442aac6ab0', -# u'ip_address': u'90.0.0.1'}], u'id': -# u'0a2ce569-85a8-45ec-abb3-0d4b34ff69ba',u'security_groups': [], -# u'device_id': u'864e4acf-bf8e-4664-8cf7-ad5daa95681e'}, -# tuples -# ------- -# Ports [(u'ACTIVE', u'havana', u'', -# '6425751e-ae2c-11e3-bba1-bcee7bdf8d69', 'True', -# u'240ff9df-df35-43ae-9df5-27fae87f2492', -# u'570fe78a1dc54cffa053bd802984ede2', -# '642579e2-ae2c-11e3-bba1-bcee7bdf8d69', u'ovs', -# u'network:router_interface', '64257dac-ae2c-11e3-bba1-bcee7bdf8d69', -# u'fa:16:3e:ab:90:df', -# '64258126-ae2c-11e3-bba1-bcee7bdf8d69', -# u'0a2ce569-85a8-45ec-abb3-0d4b34ff69ba', -# '64258496-ae2c-11e3-bba1-bcee7bdf8d69', -# u'864e4acf-bf8e-4664-8cf7-ad5daa95681e') -# -# Ports and Address Pairs -# [('6425751e-ae2c-11e3-bba1-bcee7bdf8d69', '') -# Ports and Security Groups -# [('64258496-ae2c-11e3-bba1-bcee7bdf8d69', '') -# Ports and Binding Capabilities [ -# ('64257dac-ae2c-11e3-bba1-bcee7bdf8d69',u'port_filter','True') -# Ports and Fixed IPs [('64258126-ae2c-11e3-bba1-bcee7bdf8d69', -# u'subnet_id',u'4cef03d0-1d02-40bb-8c99-2f442aac6ab0'), -# ('64258126-ae2c-11e3-bba1-bcee7bdf8d69', u'ip_address', -# u'90.0.0.1') -# -# Ports and Extra dhcp opts [ -# ('642579e2-ae2c-11e3-bba1-bcee7bdf8d69', '') diff --git a/congress/datasources/neutronv2_driver.py b/congress/datasources/neutronv2_driver.py deleted file mode 100644 index ef1869b4..00000000 --- a/congress/datasources/neutronv2_driver.py +++ /dev/null @@ -1,468 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -import neutronclient.v2_0.client -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -class NeutronV2Driver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - NETWORKS = 'networks' - FIXED_IPS = 'fixed_ips' - SECURITY_GROUP_PORT_BINDINGS = 'security_group_port_bindings' - PORTS = 'ports' - ALLOCATION_POOLS = 'allocation_pools' - DNS_NAMESERVERS = 'dns_nameservers' - HOST_ROUTES = 'host_routes' - SUBNETS = 'subnets' - EXTERNAL_FIXED_IPS = 'external_fixed_ips' - EXTERNAL_GATEWAY_INFOS = 'external_gateway_infos' - ROUTERS = 'routers' - SECURITY_GROUP_RULES = 'security_group_rules' - SECURITY_GROUPS = 'security_groups' - FLOATING_IPS = 'floating_ips' - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - floating_ips_translator = { - 'translation-type': 'HDICT', - 'table-name': FLOATING_IPS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'The UUID of the floating IP address', - 'translator': value_trans}, - {'fieldname': 'router_id', 'desc': 'UUID of router', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'Tenant ID', - 'translator': value_trans}, - {'fieldname': 'floating_network_id', - 'desc': 'The UUID of the network associated with floating IP', - 'translator': value_trans}, - {'fieldname': 'fixed_ip_address', - 'desc': 'Fixed IP address associated with floating IP address', - 'translator': value_trans}, - {'fieldname': 'floating_ip_address', - 'desc': 'The floating IP address', 'translator': value_trans}, - {'fieldname': 'port_id', 'desc': 'UUID of port', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'The floating IP status', - 'translator': value_trans})} - - networks_translator = { - 'translation-type': 'HDICT', - 'table-name': NETWORKS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'Network ID', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'Tenant ID', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'Network name', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'Network status', - 'translator': value_trans}, - {'fieldname': 'admin_state_up', - 'desc': 'Administrative state of the network (true/false)', - 'translator': value_trans}, - {'fieldname': 'shared', - 'desc': 'Indicates if network is shared across all tenants', - 'translator': value_trans})} - - ports_fixed_ips_translator = { - 'translation-type': 'HDICT', - 'table-name': FIXED_IPS, - 'parent-key': 'id', - 'parent-col-name': 'port_id', - 'parent-key-desc': 'UUID of Port', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'ip_address', - 'desc': 'The IP addresses for the port', - 'translator': value_trans}, - {'fieldname': 'subnet_id', - 'desc': 'The UUID of the subnet to which the port is attached', - 'translator': value_trans})} - - ports_security_groups_translator = { - 'translation-type': 'LIST', - 'table-name': SECURITY_GROUP_PORT_BINDINGS, - 'parent-key': 'id', - 'parent-col-name': 'port_id', - 'parent-key-desc': 'UUID of port', - 'val-col': 'security_group_id', - 'val-col-desc': 'UUID of security group', - 'translator': value_trans} - - ports_translator = { - 'translation-type': 'HDICT', - 'table-name': PORTS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'UUID of port', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'tenant ID', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'port name', - 'translator': value_trans}, - {'fieldname': 'network_id', 'desc': 'UUID of attached network', - 'translator': value_trans}, - {'fieldname': 'mac_address', 'desc': 'MAC address of the port', - 'translator': value_trans}, - {'fieldname': 'admin_state_up', - 'desc': 'Administrative state of the port', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'Port status', - 'translator': value_trans}, - {'fieldname': 'device_id', - 'desc': 'The UUID of the device that uses this port', - 'translator': value_trans}, - {'fieldname': 'device_owner', - 'desc': 'The UUID of the entity that uses this port', - 'translator': value_trans}, - {'fieldname': 'fixed_ips', - 'desc': 'The IP addresses for the port', - 'translator': ports_fixed_ips_translator}, - {'fieldname': 'security_groups', - 'translator': ports_security_groups_translator})} - - subnets_allocation_pools_translator = { - 'translation-type': 'HDICT', - 'table-name': ALLOCATION_POOLS, - 'parent-key': 'id', - 'parent-col-name': 'subnet_id', - 'parent-key-desc': 'UUID of subnet', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'start', - 'desc': 'The start address for the allocation pools', - 'translator': value_trans}, - {'fieldname': 'end', - 'desc': 'The end address for the allocation pools', - 'translator': value_trans})} - - subnets_dns_nameservers_translator = { - 'translation-type': 'LIST', - 'table-name': DNS_NAMESERVERS, - 'parent-key': 'id', - 'parent-col-name': 'subnet_id', - 'parent-key-desc': 'UUID of subnet', - 'val-col': 'dns_nameserver', - 'val-col-desc': 'The DNS server', - 'translator': value_trans} - - subnets_routes_translator = { - 'translation-type': 'HDICT', - 'table-name': HOST_ROUTES, - 'parent-key': 'id', - 'parent-col-name': 'subnet_id', - 'parent-key-desc': 'UUID of subnet', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'destination', - 'desc': 'The destination for static route', - 'translator': value_trans}, - {'fieldname': 'nexthop', - 'desc': 'The next hop for the destination', - 'translator': value_trans})} - - subnets_translator = { - 'translation-type': 'HDICT', - 'table-name': SUBNETS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'UUID of subnet', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'tenant ID', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'subnet name', - 'translator': value_trans}, - {'fieldname': 'network_id', 'desc': 'UUID of attached network', - 'translator': value_trans}, - {'fieldname': 'ip_version', - 'desc': 'The IP version, which is 4 or 6', - 'translator': value_trans}, - {'fieldname': 'cidr', 'desc': 'The CIDR', - 'translator': value_trans}, - {'fieldname': 'gateway_ip', 'desc': 'The gateway IP address', - 'translator': value_trans}, - {'fieldname': 'enable_dhcp', 'desc': 'Is DHCP is enabled or not', - 'translator': value_trans}, - {'fieldname': 'ipv6_ra_mode', 'desc': 'The IPv6 RA mode', - 'translator': value_trans}, - {'fieldname': 'ipv6_address_mode', - 'desc': 'The IPv6 address mode', 'translator': value_trans}, - {'fieldname': 'allocation_pools', - 'translator': subnets_allocation_pools_translator}, - {'fieldname': 'dns_nameservers', - 'translator': subnets_dns_nameservers_translator}, - {'fieldname': 'host_routes', - 'translator': subnets_routes_translator})} - - external_fixed_ips_translator = { - 'translation-type': 'HDICT', - 'table-name': EXTERNAL_FIXED_IPS, - 'parent-key': 'router_id', - 'parent-col-name': 'router_id', - 'parent-key-desc': 'UUID of router', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'subnet_id', 'desc': 'UUID of the subnet', - 'translator': value_trans}, - {'fieldname': 'ip_address', 'desc': 'IP Address', - 'translator': value_trans})} - - routers_external_gateway_infos_translator = { - 'translation-type': 'HDICT', - 'table-name': EXTERNAL_GATEWAY_INFOS, - 'parent-key': 'id', - 'parent-col-name': 'router_id', - 'parent-key-desc': 'UUID of router', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'network_id', 'desc': 'Network ID', - 'translator': value_trans}, - {'fieldname': 'enable_snat', - 'desc': 'current Source NAT status for router', - 'translator': value_trans}, - {'fieldname': 'external_fixed_ips', - 'translator': external_fixed_ips_translator})} - - routers_translator = { - 'translation-type': 'HDICT', - 'table-name': ROUTERS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'uuid of the router', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'tenant ID', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'router status', - 'translator': value_trans}, - {'fieldname': 'admin_state_up', - 'desc': 'administrative state of router', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'router name', - 'translator': value_trans}, - {'fieldname': 'distributed', - 'desc': "indicates if it's distributed router ", - 'translator': value_trans}, - {'fieldname': 'external_gateway_info', - 'translator': routers_external_gateway_infos_translator})} - - security_group_rules_translator = { - 'translation-type': 'HDICT', - 'table-name': SECURITY_GROUP_RULES, - 'parent-key': 'id', - 'parent-col-name': 'security_group_id', - 'parent-key-desc': 'uuid of security group', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'id', 'desc': 'The UUID of the security group rule', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'tenant ID', - 'translator': value_trans}, - {'fieldname': 'remote_group_id', - 'desc': 'remote group id to associate with security group rule', - 'translator': value_trans}, - {'fieldname': 'direction', - 'desc': 'Direction in which the security group rule is applied', - 'translator': value_trans}, - {'fieldname': 'ethertype', 'desc': 'IPv4 or IPv6', - 'translator': value_trans}, - {'fieldname': 'protocol', - 'desc': 'protocol that is matched by the security group rule.', - 'translator': value_trans}, - {'fieldname': 'port_range_min', - 'desc': 'Min port number in the range', - 'translator': value_trans}, - {'fieldname': 'port_range_max', - 'desc': 'Max port number in the range', - 'translator': value_trans}, - {'fieldname': 'remote_ip_prefix', - 'desc': 'Remote IP prefix to be associated', - 'translator': value_trans})} - - security_group_translator = { - 'translation-type': 'HDICT', - 'table-name': SECURITY_GROUPS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'The UUID for the security group', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'Tenant ID', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'The security group name', - 'translator': value_trans}, - {'fieldname': 'description', 'desc': 'security group description', - 'translator': value_trans}, - {'fieldname': 'security_group_rules', - 'translator': security_group_rules_translator})} - - TRANSLATORS = [networks_translator, ports_translator, subnets_translator, - routers_translator, security_group_translator, - floating_ips_translator] - - def __init__(self, name='', args=None): - super(NeutronV2Driver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = ds_utils.get_keystone_session(self.creds) - self.neutron = neutronclient.v2_0.client.Client(session=session) - self.add_executable_method('update_resource_attrs', - [{'name': 'resource_type', - 'description': 'resource type (e.g. ' + - 'port, network, subnet)'}, - {'name': 'id', - 'description': 'ID of the resource'}, - {'name': 'attr1', - 'description': 'attribute name to ' + - 'update (e.g. admin_state_up)'}, - {'name': 'attr1-value', - 'description': 'updated attr1 value'}, - {'name': 'attrN', - 'description': 'attribute name to ' + - 'update'}, - {'name': 'attrN-value', - 'description': 'updated attrN value'}], - "A wrapper for update_()") - self.add_executable_client_methods(self.neutron, - 'neutronclient.v2_0.client') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'neutronv2' - result['description'] = ('Datasource driver that interfaces with ' - 'OpenStack Networking aka Neutron.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - networks_method = lambda: self._translate_networks( - self.neutron.list_networks()) - self.add_update_method(networks_method, self.networks_translator) - - subnets_method = lambda: self._translate_subnets( - self.neutron.list_subnets()) - self.add_update_method(subnets_method, self.subnets_translator) - - ports_method = lambda: self._translate_ports(self.neutron.list_ports()) - self.add_update_method(ports_method, self.ports_translator) - - routers_method = lambda: self._translate_routers( - self.neutron.list_routers()) - self.add_update_method(routers_method, self.routers_translator) - - security_method = lambda: self._translate_security_groups( - self.neutron.list_security_groups()) - self.add_update_method(security_method, - self.security_group_translator) - - floatingips_method = lambda: self._translate_floating_ips( - self.neutron.list_floatingips()) - self.add_update_method(floatingips_method, - self.floating_ips_translator) - - @ds_utils.update_state_on_changed(FLOATING_IPS) - def _translate_floating_ips(self, obj): - LOG.debug("floating_ips: %s", dict(obj)) - - row_data = NeutronV2Driver.convert_objs(obj['floatingips'], - self.floating_ips_translator) - return row_data - - @ds_utils.update_state_on_changed(NETWORKS) - def _translate_networks(self, obj): - LOG.debug("networks: %s", dict(obj)) - - row_data = NeutronV2Driver.convert_objs(obj['networks'], - self.networks_translator) - return row_data - - @ds_utils.update_state_on_changed(PORTS) - def _translate_ports(self, obj): - LOG.debug("ports: %s", obj) - row_data = NeutronV2Driver.convert_objs(obj['ports'], - self.ports_translator) - return row_data - - @ds_utils.update_state_on_changed(SUBNETS) - def _translate_subnets(self, obj): - LOG.debug("subnets: %s", obj) - row_data = NeutronV2Driver.convert_objs(obj['subnets'], - self.subnets_translator) - return row_data - - @ds_utils.update_state_on_changed(ROUTERS) - def _translate_routers(self, obj): - LOG.debug("routers: %s", obj) - row_data = NeutronV2Driver.convert_objs(obj['routers'], - self.routers_translator) - return row_data - - @ds_utils.update_state_on_changed(SECURITY_GROUPS) - def _translate_security_groups(self, obj): - LOG.debug("security_groups: %s", obj) - row_data = NeutronV2Driver.convert_objs(obj['security_groups'], - self.security_group_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.neutron, action, action_args) - - def update_resource_attrs(self, args): - positional_args = args.get('positional', []) - if not positional_args or len(positional_args) < 4: - LOG.error('Args for update_resource_attrs() must contain resource ' - 'type, resource ID and pairs of key-value attributes to ' - 'update') - return - - resource_type = positional_args.pop(0) - resource_id = positional_args.pop(0) - action = 'update_%s' % resource_type - update_attrs = self._convert_args(positional_args) - body = {resource_type: update_attrs} - - action_args = {'named': {resource_type: resource_id, - 'body': body}} - self._execute_api(self.neutron, action, action_args) diff --git a/congress/datasources/nova_driver.py b/congress/datasources/nova_driver.py deleted file mode 100644 index 40e423ee..00000000 --- a/congress/datasources/nova_driver.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import novaclient.client -from oslo_log import log as logging -import six - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -class NovaDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - SERVERS = "servers" - FLAVORS = "flavors" - HOSTS = "hosts" - SERVICES = 'services' - AVAILABILITY_ZONES = "availability_zones" - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - def safe_id(x): - if isinstance(x, six.string_types): - return x - try: - return x['id'] - except Exception: - return str(x) - - servers_translator = { - 'translation-type': 'HDICT', - 'table-name': SERVERS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'The UUID for the server', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'Name of the server', - 'translator': value_trans}, - {'fieldname': 'hostId', 'col': 'host_id', - 'desc': 'The UUID for the host', 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'The server status', - 'translator': value_trans}, - {'fieldname': 'tenant_id', 'desc': 'The tenant ID', - 'translator': value_trans}, - {'fieldname': 'user_id', - 'desc': 'The user ID of the user who owns the server', - 'translator': value_trans}, - {'fieldname': 'image', 'col': 'image_id', - 'desc': 'Name or ID of image', - 'translator': {'translation-type': 'VALUE', - 'extract-fn': safe_id}}, - {'fieldname': 'flavor', 'col': 'flavor_id', - 'desc': 'ID of the flavor', - 'translator': {'translation-type': 'VALUE', - 'extract-fn': safe_id}}, - {'fieldname': 'OS-EXT-AZ:availability_zone', 'col': 'zone', - 'desc': 'The availability zone of host', - 'translator': value_trans}, - {'fieldname': 'OS-EXT-SRV-ATTR:hypervisor_hostname', - 'desc': ('The hostname of hypervisor where the server is ' - 'running'), - 'col': 'host_name', 'translator': value_trans})} - - flavors_translator = { - 'translation-type': 'HDICT', - 'table-name': FLAVORS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'desc': 'ID of the flavor', - 'translator': value_trans}, - {'fieldname': 'name', 'desc': 'Name of the flavor', - 'translator': value_trans}, - {'fieldname': 'vcpus', 'desc': 'Number of vcpus', - 'translator': value_trans}, - {'fieldname': 'ram', 'desc': 'Memory size in MB', - 'translator': value_trans}, - {'fieldname': 'disk', 'desc': 'Disk size in GB', - 'translator': value_trans}, - {'fieldname': 'ephemeral', 'desc': 'Ephemeral space size in GB', - 'translator': value_trans}, - {'fieldname': 'rxtx_factor', 'desc': 'RX/TX factor', - 'translator': value_trans})} - - hosts_translator = { - 'translation-type': 'HDICT', - 'table-name': HOSTS, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'host_name', 'desc': 'Name of host', - 'translator': value_trans}, - {'fieldname': 'service', 'desc': 'Enabled service', - 'translator': value_trans}, - {'fieldname': 'zone', 'desc': 'The availability zone of host', - 'translator': value_trans})} - - services_translator = { - 'translation-type': 'HDICT', - 'table-name': SERVICES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'col': 'service_id', 'desc': 'Service ID', - 'translator': value_trans}, - {'fieldname': 'binary', 'desc': 'Service binary', - 'translator': value_trans}, - {'fieldname': 'host', 'desc': 'Host Name', - 'translator': value_trans}, - {'fieldname': 'zone', 'desc': 'Availability Zone', - 'translator': value_trans}, - {'fieldname': 'status', 'desc': 'Status of service', - 'translator': value_trans}, - {'fieldname': 'state', 'desc': 'State of service', - 'translator': value_trans}, - {'fieldname': 'updated_at', 'desc': 'Last updated time', - 'translator': value_trans}, - {'fieldname': 'disabled_reason', 'desc': 'Disabled reason', - 'translator': value_trans})} - - availability_zones_translator = { - 'translation-type': 'HDICT', - 'table-name': AVAILABILITY_ZONES, - 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'zoneName', 'col': 'zone', - 'desc': 'Availability zone name', 'translator': value_trans}, - {'fieldname': 'zoneState', 'col': 'state', - 'desc': 'Availability zone state', - 'translator': value_trans})} - - TRANSLATORS = [servers_translator, flavors_translator, hosts_translator, - services_translator, availability_zones_translator] - - def __init__(self, name='', args=None): - super(NovaDriver, self).__init__(name, args) - datasource_driver.ExecutionDriver.__init__(self) - self.creds = args - session = ds_utils.get_keystone_session(self.creds) - self.nova_client = novaclient.client.Client( - version=self.creds.get('api_version', '2'), session=session) - self.add_executable_method('servers_set_meta', - [{'name': 'server', - 'description': 'server id'}, - {'name': 'meta-key1', - 'description': 'meta key 1'}, - {'name': 'meta-value1', - 'description': 'value for meta key1'}, - {'name': 'meta-keyN', - 'description': 'meta key N'}, - {'name': 'meta-valueN', - 'description': 'value for meta keyN'}], - "A wrapper for servers.set_meta()") - self.add_executable_client_methods(self.nova_client, 'novaclient.v2.') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'nova' - result['description'] = ('Datasource driver that interfaces with ' - 'OpenStack Compute aka nova.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['api_version'] = constants.OPTIONAL - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def initialize_update_methods(self): - servers_method = lambda: self._translate_servers( - self.nova_client.servers.list( - detailed=True, search_opts={"all_tenants": 1})) - self.add_update_method(servers_method, self.servers_translator) - - flavors_method = lambda: self._translate_flavors( - self.nova_client.flavors.list()) - self.add_update_method(flavors_method, self.flavors_translator) - - hosts_method = lambda: self._translate_hosts( - self.nova_client.hosts.list()) - self.add_update_method(hosts_method, self.hosts_translator) - - services_method = lambda: self._translate_services( - self.nova_client.services.list()) - self.add_update_method(services_method, self.services_translator) - - az_method = lambda: self._translate_availability_zones( - self.nova_client.availability_zones.list()) - self.add_update_method(az_method, self.availability_zones_translator) - - @ds_utils.update_state_on_changed(SERVERS) - def _translate_servers(self, obj): - row_data = NovaDriver.convert_objs(obj, NovaDriver.servers_translator) - return row_data - - @ds_utils.update_state_on_changed(FLAVORS) - def _translate_flavors(self, obj): - row_data = NovaDriver.convert_objs(obj, NovaDriver.flavors_translator) - return row_data - - @ds_utils.update_state_on_changed(HOSTS) - def _translate_hosts(self, obj): - row_data = NovaDriver.convert_objs(obj, NovaDriver.hosts_translator) - return row_data - - @ds_utils.update_state_on_changed(SERVICES) - def _translate_services(self, obj): - row_data = NovaDriver.convert_objs(obj, NovaDriver.services_translator) - return row_data - - @ds_utils.update_state_on_changed(AVAILABILITY_ZONES) - def _translate_availability_zones(self, obj): - row_data = NovaDriver.convert_objs( - obj, - NovaDriver.availability_zones_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.nova_client, action, action_args) - - # "action" methods - to be used with "execute" - def servers_set_meta(self, args): - """A wrapper for servers.set_meta(). - - 'execute[p(x)]' doesn't take optional args at the moment. - Therefore, this function translates the positional ARGS - to optional args and call the servers.set_meta() api. - :param args: expected server ID and pairs of meta - data in positional args such as: - {'positional': ['server_id', 'meta1', 'value1', 'meta2', 'value2']} - - Usage: - execute[nova.servers_set_meta(svr_id, meta1, val1, meta2, val2) :- - triggering_table(id) - """ - action = 'servers.set_meta' - positional_args = args.get('positional', []) - if not positional_args: - LOG.error('Args not found for servers_set_meta()') - return - - # Strip off the server_id before converting meta data pairs - server_id = positional_args.pop(0) - meta_data = self._convert_args(positional_args) - - action_args = {'named': {'server': server_id, - 'metadata': meta_data}} - self._execute_api(self.nova_client, action, action_args) diff --git a/congress/datasources/plexxi_driver.py b/congress/datasources/plexxi_driver.py deleted file mode 100644 index 4858afd5..00000000 --- a/congress/datasources/plexxi_driver.py +++ /dev/null @@ -1,647 +0,0 @@ -# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -try: - from plexxi.core.api.binding import AffinityGroup - from plexxi.core.api.binding import Job - from plexxi.core.api.binding import PhysicalPort - from plexxi.core.api.binding import PlexxiSwitch - from plexxi.core.api.binding import VirtualizationHost - from plexxi.core.api.binding import VirtualMachine - from plexxi.core.api.binding import VirtualSwitch - from plexxi.core.api.binding import VmwareVirtualMachine - from plexxi.core.api.session import CoreSession -except ImportError: - pass - -from oslo_config import cfg -from oslo_log import log as logging -import requests - -from congress.datasources import constants -from congress.datasources import datasource_driver - -LOG = logging.getLogger(__name__) - - -class PlexxiDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - HOSTS = "hosts" - HOST_MACS = HOSTS + '.macs' - HOST_GUESTS = HOSTS + '.guests' - VMS = "vms" - VM_MACS = VMS + '.macs' - AFFINITIES = "affinities" - VSWITCHES = "vswitches" - VSWITCHES_MACS = VSWITCHES + '.macs' - VSWITCHES_HOSTS = VSWITCHES + '.hosts' - PLEXXISWITCHES = "plexxiswitches" - PLEXXISWITCHES_MACS = PLEXXISWITCHES + '.macs' - PORTS = "ports" - NETWORKLINKS = "networklinks" - - def __init__(self, name='', args=None, session=None): - super(PlexxiDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - self.exchange = session - self.creds = args - self.raw_state = {} - try: - self.unique_names = self.string_to_bool(args['unique_names']) - except KeyError: - LOG.warning("unique_names has not been configured, " - "defaulting to False.") - self.unique_names = False - port = str(cfg.CONF.bind_port) - host = str(cfg.CONF.bind_host) - self.headers = {'content-type': 'application/json'} - self.name_cooldown = False - self.api_address = "http://" + host + ":" + port + "/v1" - self.name_rule_needed = True - if str(cfg.CONF.auth_strategy) == 'keystone': - if 'keystone_pass' not in args: - LOG.error("Keystone is enabled, but a password was not " + - "provided. All automated API calls are disabled") - self.unique_names = False - self.name_rule_needed = False - elif 'keystone_user' not in args: - LOG.error("Keystone is enabled, but a username was not " + - "provided. All automated API calls are disabled") - self.unique_names = False - self.name_rule_needed = False - else: - self.keystone_url = str(cfg.CONF.keystone_authtoken.auth_uri) - self.keystoneauth() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'plexxi' - result['description'] = ('Datasource driver that interfaces with ' - 'PlexxiCore.') - result['config'] = {'auth_url': constants.REQUIRED, # PlexxiCore url - 'username': constants.REQUIRED, - 'password': constants.REQUIRED, - 'poll_time': constants.OPTIONAL, - 'tenant_name': constants.REQUIRED, - 'unique_names': constants.OPTIONAL, - 'keystone_pass': constants.OPTIONAL, - 'keystone_user': constants.OPTIONAL} - result['secret'] = ['password'] - return result - - def update_from_datasource(self): - """Called when it is time to pull new data from this datasource. - - Pulls lists of objects from PlexxiCore, if the data does not match - the correspondig table in the driver's raw state or has not yet been - added to the state, the driver calls methods to parse this data. - - Once all data has been updated,sets - self.state[tablename] = - for every tablename exported by PlexxiCore. - """ - - # Initialize instance variables that get set during update - self.hosts = [] - self.mac_list = [] - self.guest_list = [] - self.plexxi_switches = [] - self.affinities = [] - self.vswitches = [] - self.vms = [] - self.vm_macs = [] - self.ports = [] - self.network_links = [] - - if self.exchange is None: - self.connect_to_plexxi() - - # Get host data from PlexxiCore - hosts = VirtualizationHost.getAll(session=self.exchange) - if (self.HOSTS not in self.state or - hosts != self.raw_state[self.HOSTS]): - self._translate_hosts(hosts) - self.raw_state[self.HOSTS] = hosts - else: - self.hosts = self.state[self.HOSTS] - self.mac_list = self.state[self.HOST_MACS] - self.guest_list = self.state[self.HOST_GUESTS] - - # Get PlexxiSwitch Data from PlexxiCore - plexxiswitches = PlexxiSwitch.getAll(session=self.exchange) - if (self.PLEXXISWITCHES not in self.state or - plexxiswitches != self.raw_state[self.PLEXXISWITCHES]): - self._translate_pswitches(plexxiswitches) - self.raw_state[self.PLEXXISWITCHES] = plexxiswitches - else: - self.plexxi_switches = self.state[self.PLEXXISWITCHES] - - # Get affinity data from PlexxiCore - affinities = AffinityGroup.getAll(session=self.exchange) - if (self.AFFINITIES not in self.state or - affinities != self.raw_state[self.AFFINITIES]): - if AffinityGroup.getCount(session=self.exchange) == 0: - self.state[self.AFFINITIES] = ['No Affinities found'] - else: - self._translate_affinites(affinities) - self.raw_state[self.AFFINITIES] = affinities - else: - self.affinties = self.state[self.AFFINITIES] - - # Get vswitch data from PlexxiCore - vswitches = VirtualSwitch.getAll(session=self.exchange) - if (self.VSWITCHES not in self.state or - vswitches != self.raw_state[self.VSWITCHES]): - self._translate_vswitches(vswitches) - self.raw_state[self.VSWITCHES] = vswitches - else: - self.vswitches = self.state[self.VSWITCHES] - - # Get virtual machine data from PlexxiCore - vms = VirtualMachine.getAll(session=self.exchange) - if (self.VMS not in self.state or - vms != self.raw_state[self.VMS]): - self._translate_vms(vms) - self.raw_state[self.VMS] = set(vms) - else: - self.vms = self.state[self.VMS] - self.vm_macs = self.state[self.VMS_MACS] - # Get port data from PlexxiCore - ports = PhysicalPort.getAll(session=self.exchange) - if(self.PORTS not in self.state or - ports != self.raw_state[self.PORTS]): - self._translate_ports(ports) - self.raw_state[self.PORTS] = set(ports) - else: - self.ports = self.state[self.PORTS] - self.network_links = self.state[self.NETWORKLINKS] - - LOG.debug("Setting Plexxi State") - self.state = {} - self.state[self.HOSTS] = set(self.hosts) - self.state[self.HOST_MACS] = set(self.mac_list) - self.state[self.HOST_GUESTS] = set(self.guest_list) - self.state[self.PLEXXISWITCHES] = set(self.plexxi_switches) - self.state[self.PLEXXISWITCHES_MACS] = set(self.ps_macs) - self.state[self.AFFINITIES] = set(self.affinities) - self.state[self.VSWITCHES] = set(self.vswitches) - self.state[self.VSWITCHES_MACS] = set(self.vswitch_macs) - self.state[self.VSWITCHES_HOSTS] = set(self.vswitch_hosts) - self.state[self.VMS] = set(self.vms) - self.state[self.VM_MACS] = set(self.vm_macs) - self.state[self.PORTS] = set(self.ports) - self.state[self.NETWORKLINKS] = set(self.network_links) - - # Create Rules - if self.name_rule_needed is True: - if self.name_rule_check() is True: - self.name_rule_create() - else: - self.name_rule_needed = False - # Act on Policy - if self.unique_names is True: - if not self.name_cooldown: - self.name_response() - else: - self.name_cooldown = False - - @classmethod - def get_schema(cls): - """Creates a table schema for incoming data from PlexxiCore. - - Returns a dictionary map of tablenames corresponding to column names - for that table. Both tableNames and columnnames are strings. - """ - - d = {} - d[cls.HOSTS] = ("uuid", "name", "mac_count", "vmcount") - d[cls.HOST_MACS] = ("Host_uuid", "Mac_Address") - d[cls.HOST_GUESTS] = ("Host_uuid", "VM_uuid") - d[cls.VMS] = ("uuid", "name", "host_uuid", "ip", "mac_count") - d[cls.VM_MACS] = ("vmID", "Mac_Address") - d[cls.AFFINITIES] = ("uuid", "name") - d[cls.VSWITCHES] = ("uuid", "host_count", "vnic_count") - d[cls.VSWITCHES_MACS] = ("vswitch_uuid", "Mac_Address") - d[cls.VSWITCHES_HOSTS] = ("vswitch_uuid", "hostuuid") - d[cls.PLEXXISWITCHES] = ("uuid", "ip", "status") - d[cls.PLEXXISWITCHES_MACS] = ("Switch_uuid", "Mac_Address") - d[cls.PORTS] = ("uuid", "name") - d[cls.NETWORKLINKS] = ("uuid", "name", "port_uuid", "start_uuid", - "start_name", "stop_uuid", "stop_name") - return d - - def _translate_hosts(self, hosts): - """Translates data about Hosts from PlexxiCore for Congress. - - Responsible for the states 'hosts','hosts.macs' and 'hosts.guests' - """ - - row_keys = self.get_column_map(self.HOSTS) - hostlist = [] - maclist = [] - vm_uuids = [] - for host in hosts: - row = ['None'] * (max(row_keys.values()) + 1) - hostID = host.getForeignUuid() - row[row_keys['uuid']] = hostID - row[row_keys['name']] = host.getName() - pnics = host.getPhysicalNetworkInterfaces() - if pnics: - for pnic in pnics: - mac = str(pnic.getMacAddress()) - tuple_mac = (hostID, mac) - maclist.append(tuple_mac) - mac_count = len(maclist) - if (mac_count > 0): - row[row_keys['mac_count']] = mac_count - vmCount = host.getVirtualMachineCount() - row[row_keys['vmcount']] = vmCount - if vmCount != 0: - vms = host.getVirtualMachines() - for vm in vms: - tuple_vmid = (hostID, vm.getForeignUuid()) - vm_uuids.append(tuple_vmid) - hostlist.append(tuple(row)) - self.hosts = hostlist - self.mac_list = maclist - self.guest_list = vm_uuids - - def _translate_pswitches(self, plexxi_switches): - """Translates data on Plexxi Switches from PlexxiCore for Congress. - - Responsible for state 'Plexxi_swtiches' and 'Plexxi_switches.macs' - """ - - row_keys = self.get_column_map(self.PLEXXISWITCHES) - pslist = [] - maclist = [] - for switch in plexxi_switches: - row = ['None'] * (max(row_keys.values()) + 1) - psuuid = str(switch.getUuid()) - row[row_keys['uuid']] = psuuid - psip = str(switch.getIpAddress()) - row[row_keys['ip']] = psip - psstatus = str(switch.getStatus()) - row[row_keys['status']] = psstatus - pnics = switch.getPhysicalNetworkInterfaces() - for pnic in pnics: - mac = str(pnic.getMacAddress()) - macrow = [psuuid, mac] - maclist.append(tuple(macrow)) - pslist.append(tuple(row)) - self.plexxi_switches = pslist - self.ps_macs = maclist - - def _translate_affinites(self, affinites): - """Translates data about affinites from PlexxiCore for Congress. - - Responsible for state 'affinities' - """ - - row_keys = self.get_column_map(self.AFFINITIES) - affinitylist = [] - for affinity in affinites: - row = ['None'] * (max(row_keys.values()) + 1) - uuid = str(affinity.getUuid()) - row[row_keys['uuid']] = uuid - row[row_keys['name']] = affinity.getName() - affinitylist.append(tuple(row)) - self.affinities = affinitylist - - def _translate_vswitches(self, vswitches): - """Translates data about vswitches from PlexxiCore for Congress. - - Responsible for states vswitchlist,vswitch_macs,vswitch_hosts - """ - - # untested - row_keys = self.get_column_map(self.VSWITCHES) - vswitchlist = [] - tuple_macs = [] - vswitch_host_list = [] - for vswitch in vswitches: - row = ['None'] * (max(row_keys.values()) + 1) - vswitchID = vswitch.getForeignUuid() - row[row_keys['uuid']] = vswitchID - vSwitchHosts = vswitch.getVirtualizationHosts() - try: - host_count = len(vSwitchHosts) - except TypeError: - host_count = 0 - row[row_keys['host_count']] = host_count - if host_count != 0: - for host in vSwitchHosts: - hostuuid = host.getForeignUuid() - hostrow = [vswitchID, hostuuid] - vswitch_host_list.append(tuple(hostrow)) - vswitch_vnics = vswitch.getVirtualNetworkInterfaces() - try: - vnic_count = len(vswitch_vnics) - except TypeError: - vnic_count = 0 - row[row_keys['vnic_count']] = vnic_count - if vnic_count != 0: - for vnic in vswitch_vnics: - mac = vnic.getMacAddress() - macrow = [vswitchID, str(mac)] - tuple_macs.append(tuple(macrow)) - vswitchlist.append(tuple(row)) - self.vswitches = vswitchlist - self.vswitch_macs = tuple_macs - self.vswitch_hosts = vswitch_host_list - - def _translate_vms(self, vms): - """Translate data on VMs from PlexxiCore for Congress. - - Responsible for states 'vms' and 'vms.macs' - """ - - row_keys = self.get_column_map(self.VMS) - vmlist = [] - maclist = [] - for vm in vms: - row = ['None'] * (max(row_keys.values()) + 1) - vmID = vm.getForeignUuid() - row[row_keys['uuid']] = vmID - vmName = vm.getName() - row[row_keys['name']] = vmName - try: - vmhost = vm.getVirtualizationHost() - vmhostuuid = vmhost.getForeignUuid() - row[row_keys['host_uuid']] = vmhostuuid - except AttributeError: - LOG.debug("The host for " + vmName + " could not be found") - vmIP = vm.getIpAddress() - if vmIP: - row[row_keys['ip']] = vmIP - vmVnics = vm.getVirtualNetworkInterfaces() - mac_count = 0 - for vnic in vmVnics: - mac = str(vnic.getMacAddress()) - tuple_mac = (vmID, mac) - maclist.append(tuple_mac) - mac_count += 1 - row[row_keys['mac_count']] = mac_count - vmlist.append(tuple(row)) - self.vms = vmlist - self.vm_macs = maclist - - def _translate_ports(self, ports): - """Translate data about ports from PlexxiCore for Congress. - - Responsible for states 'ports' and 'ports.links' - """ - - row_keys = self.get_column_map(self.PORTS) - link_keys = self.get_column_map(self.NETWORKLINKS) - port_list = [] - link_list = [] - for port in ports: - row = ['None'] * (max(row_keys.values()) + 1) - portID = str(port.getUuid()) - row[row_keys['uuid']] = portID - portName = str(port.getName()) - row[row_keys['name']] = portName - links = port.getNetworkLinks() - if links: - link_keys = self.get_column_map(self.NETWORKLINKS) - for link in links: - link_row = self._translate_network_link(link, link_keys, - portID) - link_list.append(tuple(link_row)) - port_list.append(tuple(row)) - self.ports = port_list - self.network_links = link_list - - def _translate_network_link(self, link, row_keys, sourcePortUuid): - """Translates data about network links from PlexxiCore for Congress. - - Subfunction of translate_ports,each handles a set of network links - attached to a port. Directly responsible for the state of - 'ports.links' - """ - - row = ['None'] * (max(row_keys.values()) + 1) - linkID = str(link.getUuid()) - row[row_keys['uuid']] = linkID - row[row_keys['port_uuid']] = sourcePortUuid - linkName = str(link.getName()) - row[row_keys['name']] = linkName - linkStartObj = link.getStartNetworkInterface() - linkStartName = str(linkStartObj.getName()) - row[row_keys['start_name']] = linkStartName - linkStartUuid = str(linkStartObj.getUuid()) - row[row_keys['start_uuid']] = linkStartUuid - linkStopObj = link.getStopNetworkInterface() - linkStopUuid = str(linkStopObj.getUuid()) - row[row_keys['stop_uuid']] = linkStopUuid - linkStopName = str(linkStopObj.getName()) - row[row_keys['stop_name']] = linkStopName - return row - - def string_to_bool(self, string): - """Used for parsing boolean variables stated in datasources.conf.""" - - string = string.strip() - s = string.lower() - if s in['true', 'yes', 'on']: - return True - else: - return False - - def connect_to_plexxi(self): - """Connect to PlexxiCore. - - Create a CoreSession connecting congress to PlexxiCore using - credentials provided in datasources.conf - """ - - if 'auth_url' not in self.creds: - LOG.error("Plexxi url not supplied. Could not start Plexxi" + - "connection driver") - if 'username' not in self.creds: - LOG.error("Plexxi username not supplied. Could not start " + - "Plexxi connection driver") - if 'password' not in self.creds: - LOG.error("Plexxi password not supplied. Could not start " + - "Plexxi connection driver") - try: - self.exchange = CoreSession.connect( - baseUrl=self.creds['auth_url'], - allowUntrusted=True, - username=self.creds['username'], - password=self.creds['password']) - except requests.exceptions.HTTPError as error: - if (int(error.response.status_code) == 401 or - int(error.response.status_code) == 403): - msg = ("Incorrect username/password combination. Passed" + - "in username was " + self.creds['username']) - - raise Exception(requests.exceptions.HTTPErrror(msg)) - else: - raise Exception(requests.exceptions.HTTPError(error)) - - except requests.exceptions.ConnectionError: - msg = ("Cannot connect to PlexxiCore at " + - self.creds['auth_url'] + " with the username " + - self.creds['username']) - raise Exception(requests.exceptions.ConnectionError(msg)) - - def keystoneauth(self): - """Acquire a keystone auth token for API calls - - Called when congress is running with keystone as the authentication - method.This provides the driver a keystone token that is then placed - in the header of API calls made to congress. - """ - try: - authreq = { - "auth": { - "tenantName": self.creds['tenant_name'], - "passwordCredentials": { - "username": self.creds['keystone_user'], - "password": self.creds['keystone_pass'] - } - } - } - headers = {'content-type': 'application/json', - 'accept': 'application/json'} - request = requests.post(url=self.keystone_url+'/v2.0/tokens', - data=json.dumps(authreq), - headers=headers) - response = request.json() - token = response['access']['token']['id'] - self.headers['X-Auth-Token'] = token - except Exception: - LOG.exception("Could not authenticate with keystone." + - "All automated API calls have been disabled") - self.unique_names = False - self.name_rule_needed = False - - def name_rule_check(self): - """Checks to see if a RepeatedNames rule already exists - - This method is used to prevent the driver from recreating additional - RepeatedNames tables each time congress is restarted. - """ - try: - table = requests.get(self.api_address + "/policies/" + - "plexxi/rules", - headers=self.headers) - result = json.loads(table.text) - for entry in result['results']: - if entry['name'] == "RepeatedNames": - return False - return True - except Exception: - LOG.exception("An error has occurred when accessing the " + - "Congress API.All automated API calls have been " + - "disabled.") - self.unique_names = False - self.name_rule_needed = False - return False - - def name_rule_create(self): - """Creates RepeatedName table for unique names policy. - - The RepeatedName table contains the name and plexxiUuid of - VMs that have the same name in the Plexxi table and the Nova Table. - """ - try: - datasources = self.node.get_datasources() - for datasource in datasources: - if datasource['driver'] == 'nova': - repeated_name_rule = ('{"rule": "RepeatedName' + - '(vname,pvuuid):-' + self.name + - ':vms(0=pvuuid,1=vname),' + - datasource['name'] + - ':servers(1=vname)",' + - '"name": "RepeatedNames"}') - requests.post(url=self.api_address + - '/policies/plexxi/rules', - data=repeated_name_rule, - headers=self.headers) - self.name_rule_needed = False - break - except Exception: - LOG.exception("Could not create Repeated Name table") - - def name_response(self): - """Checks for any entries in the RepeatedName table. - - For all entries found in the RepeatedName table, the corresponding - VM will be then prefixed with 'conflict-' in PlexxiCore. - """ - - vmname = False - vmuuid = False - json_response = [] - self.name_cooldown = True - try: - plexxivms = VmwareVirtualMachine.getAll(session=self.exchange) - table = requests.get(self.api_address + "/policies/" + - "plexxi/tables/RepeatedName/rows", - headers=self.headers) - if table.text == "Authentication required": - self.keystoneauth() - table = requests.get(self.api_address + "/policies/" + - "plexxi/tables/RepeatedName/rows", - headers=self.headers) - json_response = json.loads(table.text) - - for row in json_response['results']: - vmname = row['data'][0] - vmuuid = row['data'][1] - if vmname and vmuuid: - for plexxivm in plexxivms: - if (plexxivm.getForeignUuid() == vmuuid): - new_vm_name = "Conflict-" + vmname - desc = ("Congress has found a VM with the same " + - "name on the nova network. This vm " + - "will now be renamed to " + new_vm_name) - job_name = (" Congress Driver:Changing virtual" + - "machine, " + vmname + "\'s name") - changenamejob = Job.create(name=job_name, - description=desc + ".", - session=self.exchange) - changenamejob.begin() - plexxivm.setName(new_vm_name) - changenamejob.commit() - LOG.info(desc + " in PlexxiCore.") - except Exception: - LOG.exception("error in name_response") - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - # TODO(aimeeu) - # The 'else' block is where we would execute native Plexxi actions - # (actions implemented in the Plexxi libraries). However, that is - # hard to do because of the rest of the way the driver is written. - # The question for the 'else' block is whether it's worth exposing - # all the native Plexxi actions. See comments in review - # https://review.openstack.org/#/c/335539/ diff --git a/congress/datasources/push_driver.py b/congress/datasources/push_driver.py deleted file mode 100644 index 5831d7e3..00000000 --- a/congress/datasources/push_driver.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 NTT All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime -from oslo_log import log as logging - -from congress.datasources import constants -from congress.datasources import datasource_driver - -LOG = logging.getLogger(__name__) - - -class PushDriver(datasource_driver.PushedDataSourceDriver): - """A DataSource Driver for pushing tuples of data. - - To use this driver, run the following API: - - PUT /v1/data-sources//tables/
/rows - - Still a work in progress, but intent is to allow a request body - to be any list of lists where the internal lists all have - the same number of elements. - - request body: - [ [1,2], [3,4] ] - """ - - def __init__(self, name='', args=None): - super(PushDriver, self).__init__(name, args=args) - self._table_deps['data'] = ['data'] - - @classmethod - def get_schema(cls): - schema = {} - # Hardcode the tables for now. Later, create the tables on the fly. - # May be as easy as deleting the following line. - schema['data'] = [] - return schema - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'push' - result['description'] = ('Datasource driver that allows external ' - 'systems to push data.') - # TODO(masa): Remove the REQUIRED config once python-congressclient - # has been able to retrieve non-dict object in config fields at - # $ openstack congress datasource list command - result['config'] = {'description': constants.REQUIRED, - 'persist_data': constants.OPTIONAL} - return result - - def update_entire_data(self, table_id, objs): - LOG.info('update %s table in %s datasource', table_id, self.name) - tablename = 'data' # hard code - self.prior_state = dict(self.state) - self._update_state(tablename, - [tuple([table_id, tuple(x)]) for x in objs]) - LOG.debug('publish a new state %s in %s', - self.state[tablename], tablename) - self.publish(tablename, self.state[tablename]) - self.number_of_updates += 1 - self.last_updated_time = datetime.datetime.now() diff --git a/congress/datasources/swift_driver.py b/congress/datasources/swift_driver.py deleted file mode 100644 index 691021f8..00000000 --- a/congress/datasources/swift_driver.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2014 Montavista Software, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging -import swiftclient.service - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - -LOG = logging.getLogger(__name__) - - -class SwiftDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - CONTAINERS = "containers" - OBJECTS = "objects" - - value_trans = {'translation-type': 'VALUE'} - - containers_translator = { - 'translation-type': 'HDICT', - 'table-name': CONTAINERS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'count', 'translator': value_trans}, - {'fieldname': 'bytes', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans})} - - objects_translator = { - 'translation-type': 'HDICT', - 'table-name': OBJECTS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'bytes', 'translator': value_trans}, - {'fieldname': 'last_modified', 'translator': value_trans}, - {'fieldname': 'hash', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'content_type', 'translator': value_trans}, - {'fieldname': 'container_name', 'translator': value_trans})} - - TRANSLATORS = [containers_translator, objects_translator] - - def __init__(self, name='', args=None): - if args is None: - args = self.empty_credentials() - super(SwiftDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - options = self.get_swift_credentials_v1(args) - # TODO(ramineni): Enable v3 support - options['os_auth_url'] = options['os_auth_url'].replace('v3', 'v2.0') - self.swift_service = swiftclient.service.SwiftService(options) - self.add_executable_client_methods(self.swift_service, - 'swiftclient.service') - self.initialize_update_methods() - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - # TODO(zhenzanz): This is verified with keystoneauth for swift. - # Do we need to support other Swift auth systems? - # http://docs.openstack.org/developer/swift/overview_auth.html - result = {} - result['id'] = 'swift' - result['description'] = ('Datasource driver that interfaces with ' - 'swift.') - result['config'] = ds_utils.get_openstack_required_config() - result['config']['lazy_tables'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def get_swift_credentials_v1(self, creds): - # Check swiftclient/service.py _default_global_options for more - # auth options. But these 4 options seem to be enough. - options = {} - options['os_username'] = creds['username'] - options['os_password'] = creds['password'] - options['os_tenant_name'] = creds['tenant_name'] - options['os_auth_url'] = creds['auth_url'] - return options - - def initialize_update_methods(self): - containers_method = lambda: self._translate_containers( - self._get_containers_and_objects(container=True)) - self.add_update_method(containers_method, self.containers_translator) - - objects_method = lambda: self._translate_objects( - self._get_containers_and_objects(objects=True)) - self.add_update_method(objects_method, self.objects_translator) - - def _get_containers_and_objects(self, container=False, objects=False): - container_list = self.swift_service.list() - cont_list = [] - objects = [] - containers = [] - LOG.debug("Swift obtaining containers List") - for stats in container_list: - containers = stats['listing'] - for item in containers: - cont_list.append(item['name']) - if container: - return containers - LOG.debug("Swift obtaining objects List") - for container in cont_list: - object_list = self.swift_service.list(container) - for items in object_list: - item_list = items['listing'] - for obj in item_list: - obj['container_name'] = container - for obj in item_list: - objects.append(obj) - if objects: - return objects - return containers, objects - - @ds_utils.update_state_on_changed(CONTAINERS) - def _translate_containers(self, obj): - """Translate the containers represented by OBJ into tables.""" - row_data = SwiftDriver.convert_objs(obj, - self.containers_translator) - return row_data - - @ds_utils.update_state_on_changed(OBJECTS) - def _translate_objects(self, obj): - """Translate the objects represented by OBJ into tables.""" - row_data = SwiftDriver.convert_objs(obj, - self.objects_translator) - return row_data - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.swift_service, action, action_args) diff --git a/congress/datasources/vCenter_driver.py b/congress/datasources/vCenter_driver.py deleted file mode 100644 index f30e3c4e..00000000 --- a/congress/datasources/vCenter_driver.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging -from oslo_vmware import api -from oslo_vmware import vim_util - -from congress.datasources import constants -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils as ds_utils - - -LOG = logging.getLogger(__name__) - - -class VCenterDriver(datasource_driver.PollingDataSourceDriver, - datasource_driver.ExecutionDriver): - - HOSTS = "hosts" - HOST_DNS = "host.DNS_IPs" - HOST_PNICS = "host.PNICs" - HOST_VNICS = "host.VNICs" - VMS = "vms" - - value_trans = {'translation-type': 'VALUE'} - - vms_translator = { - 'translation-type': 'HDICT', - 'table-name': VMS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'uuid', 'translator': value_trans}, - {'fieldname': 'host_uuid', 'translator': value_trans}, - {'fieldname': 'pathName', 'translator': value_trans}, - {'fieldname': 'status', 'translator': value_trans}, - {'fieldname': 'CpuDemand', 'translator': value_trans}, - {'fieldname': 'CpuUsage', 'translator': value_trans}, - {'fieldname': 'memorySizeMB', 'translator': value_trans}, - {'fieldname': 'MemoryUsage', 'translator': value_trans}, - {'fieldname': 'committedStorage', 'translator': value_trans}, - {'fieldname': 'uncommittedStorage', 'translator': value_trans}, - {'fieldname': 'annotation', 'translator': value_trans})} - - pnic_translator = { - 'translation-type': 'HDICT', - 'table-name': HOST_PNICS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'host_uuid', 'translator': value_trans}, - {'fieldname': 'device', 'translator': value_trans}, - {'fieldname': 'mac', 'translator': value_trans}, - {'fieldname': 'ipAddress', 'translator': value_trans}, - {'fieldname': 'subnetMask', 'translator': value_trans})} - - vnic_translator = { - 'translation-type': 'HDICT', - 'table-name': HOST_VNICS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'host_uuid', 'translator': value_trans}, - {'fieldname': 'device', 'translator': value_trans}, - {'fieldname': 'mac', 'translator': value_trans}, - {'fieldname': 'portgroup', 'translator': value_trans}, - {'fieldname': 'ipAddress', 'translator': value_trans}, - {'fieldname': 'subnetMask', 'translator': value_trans})} - - hosts_translator = { - 'translation-type': 'HDICT', - 'table-name': HOSTS, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'uuid', 'translator': value_trans}, - {'fieldname': HOST_DNS, 'col': 'Host:DNS_id', - 'translator': {'translation-type': 'LIST', - 'table-name': HOST_DNS, - 'id-col': 'Host:DNS_id', - 'val-col': 'DNS_IPs', - 'translator': value_trans}})} - - TRANSLATORS = [hosts_translator, pnic_translator, vnic_translator, - vms_translator] - - def __init__(self, name='', args=None, session=None): - if args is None: - args = self.empty_credentials() - else: - args['tenant_name'] = None - super(VCenterDriver, self).__init__(name, args=args) - datasource_driver.ExecutionDriver.__init__(self) - try: - self.max_VMs = int(args['max_vms']) - except (KeyError, ValueError): - LOG.warning("max_vms has not been configured, " - " defaulting to 999.") - self.max_VMs = 999 - try: - self.max_Hosts = int(args['max_hosts']) - except (KeyError, ValueError): - LOG.warning("max_hosts has not been configured, " - "defaulting to 999.") - self.max_Hosts = 999 - self.hosts = None - self.creds = args - self.session = session - if session is None: - self.session = api.VMwareAPISession(self.creds['auth_url'], - self.creds['username'], - self.creds['password'], - 10, 1, - create_session=True) - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'vcenter' - result['description'] = ('Datasource driver that interfaces with ' - 'vcenter') - result['config'] = {'auth_url': constants.REQUIRED, - 'username': constants.REQUIRED, - 'password': constants.REQUIRED, - 'poll_time': constants.OPTIONAL, - 'max_vms': constants.OPTIONAL, - 'max_hosts': constants.OPTIONAL} - result['secret'] = ['password'] - - return result - - def update_from_datasource(self): - """Called when it is time to pull new data from this datasource. - - Pulls lists of objects from vCenter, if the data does not match - the correspondig table in the driver's raw state or has not yet been - added to the state, the driver calls methods to parse this data. - """ - - hosts, pnics, vnics = self._get_hosts_and_nics() - self._translate_hosts(hosts) - self._translate_pnics(pnics) - self._translate_vnics(vnics) - - vms = self._get_vms() - self._translate_vms(vms) - - @ds_utils.update_state_on_changed(HOSTS) - def _translate_hosts(self, hosts): - """Translate the host data from vCenter.""" - - row_data = VCenterDriver.convert_objs(hosts, - VCenterDriver.hosts_translator) - return row_data - - @ds_utils.update_state_on_changed(HOST_PNICS) - def _translate_pnics(self, pnics): - """Translate the host pnics data from vCenter.""" - - row_data = VCenterDriver.convert_objs(pnics, - VCenterDriver.pnic_translator) - return row_data - - @ds_utils.update_state_on_changed(HOST_VNICS) - def _translate_vnics(self, vnics): - """Translate the host vnics data from vCenter.""" - - row_data = VCenterDriver.convert_objs(vnics, - VCenterDriver.vnic_translator) - return row_data - - def _get_hosts_and_nics(self): - """Convert vCenter host object to simple format. - - First the raw host data acquired from vCenter is parsed and - organized into a simple format that can be read by congress - translators. This creates three lists, hosts, pnics and vnics. - These lists are then parsed by congress translators to create tables. - """ - rawhosts = self._get_hosts_from_vcenter() - hosts = [] - pnics = [] - vnics = [] - for host in rawhosts['objects']: - h = {} - h['vCenter_id'] = host.obj['value'] - for prop in host['propSet']: - if prop.name == "hardware.systemInfo.uuid": - h['uuid'] = prop.val - break - for prop in host['propSet']: - if prop.name == "name": - h['name'] = prop.val - continue - if prop.name == "config.network.dnsConfig.address": - try: - h[self.HOST_DNS] = prop.val.string - except AttributeError: - h[self.HOST_DNS] = ["No DNS IP adddresses configured"] - continue - if prop.name == "config.network.pnic": - for pnic in prop.val.PhysicalNic: - p = {} - p['host_uuid'] = h['uuid'] - p['mac'] = pnic['mac'] - p['device'] = pnic['device'] - p['ipAddress'] = pnic['spec']['ip']['ipAddress'] - p['subnetMask'] = pnic['spec']['ip']['subnetMask'] - pnics.append(p) - if prop.name == "config.network.vnic": - for vnic in prop.val.HostVirtualNic: - v = {} - v['host_uuid'] = h['uuid'] - v['device'] = vnic['device'] - v['portgroup'] = vnic['portgroup'] - v['mac'] = vnic['spec']['mac'] - v['ipAddress'] = vnic['spec']['ip']['ipAddress'] - v['subnetMask'] = vnic['spec']['ip']['subnetMask'] - vnics.append(v) - hosts.append(h) - # cached the hosts for vms - self.hosts = hosts - return hosts, pnics, vnics - - @ds_utils.update_state_on_changed(VMS) - def _translate_vms(self, vms): - """Translate the VM data from vCenter.""" - - row_data = VCenterDriver.convert_objs(vms, - VCenterDriver.vms_translator) - return row_data - - def _get_vms(self): - rawvms = self._get_vms_from_vcenter() - vms = [] - for vm in rawvms['objects']: - v = {} - for prop in vm['propSet']: - if prop.name == "name": - v['name'] = prop.val - continue - if prop.name == "config.uuid": - v['uuid'] = prop.val - continue - if prop.name == "config.annotation": - v['annotation'] = prop.val - continue - if prop.name == "summary.config.vmPathName": - v['pathName'] = prop.val - continue - if prop.name == "summary.config.memorySizeMB": - v['memorySizeMB'] = prop.val - continue - if prop.name == "summary.quickStats": - v['MemoryUsage'] = prop.val['guestMemoryUsage'] - v['CpuDemand'] = prop.val['overallCpuDemand'] - v['CpuUsage'] = prop.val['overallCpuUsage'] - continue - if prop.name == "summary.overallStatus": - v['status'] = prop.val - if prop.name == "summary.storage": - v['committedStorage'] = prop.val['committed'] - v['uncommittedStorage'] = prop.val['uncommitted'] - continue - if prop.name == 'runtime.host': - for host in self.hosts: - if host['vCenter_id'] == prop.val['value']: - v['host_uuid'] = host['uuid'] - continue - continue - vms.append(v) - return vms - - def _get_hosts_from_vcenter(self): - """Called to pull host data from vCenter - - """ - - dataFields = ['name', - 'hardware.systemInfo.uuid', - 'config.network.dnsConfig.address', - 'config.network.pnic', - 'config.network.vnic'] - return self.session.invoke_api(vim_util, 'get_objects', - self.session.vim, 'HostSystem', - self.max_Hosts, dataFields) - - def _get_vms_from_vcenter(self): - """Called to pull VM data from vCenter - - """ - - dataFields = ['name', - 'config.uuid', - 'config.annotation', - 'summary.config.vmPathName', - 'runtime.host', - 'summary.config.memorySizeMB', - 'summary.quickStats', - 'summary.overallStatus', - 'summary.storage'] - return self.session.invoke_api(vim_util, 'get_objects', - self.session.vim, 'VirtualMachine', - self.max_VMs, dataFields) - - def execute(self, action, action_args): - """Overwrite ExecutionDriver.execute().""" - # action can be written as a method or an API call. - func = getattr(self, action, None) - if func and self.is_executable(func): - func(action_args) - else: - self._execute_api(self.session, action, action_args) diff --git a/congress/db/__init__.py b/congress/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/db/api.py b/congress/db/api.py deleted file mode 100644 index 31d20b34..00000000 --- a/congress/db/api.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_config import cfg -from oslo_db.sqlalchemy import session - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - - if _FACADE is None: - _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) - - return _FACADE - - -def get_engine(): - """Helper method to grab engine.""" - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(autocommit=True, expire_on_commit=False, make_new=False): - """Helper method to grab session.""" - if make_new: # do not reuse existing facade - facade = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) - else: - facade = _create_facade_lazily() - return facade.get_session(autocommit=autocommit, - expire_on_commit=expire_on_commit) - - -def get_locking_session(): - """Obtain db_session that works with table locking - - supported backends: MySQL and PostgreSQL - return default session if backend not supported (eg. sqlite) - """ - if is_mysql() or is_postgres(): - db_session = get_session( - autocommit=False, - # to prevent implicit new transactions, - # which UNLOCKS in MySQL - expire_on_commit=False, # need to UNLOCK after commit - make_new=True) # brand new facade avoids interference - - else: # unsupported backend for locking (eg sqlite), return default - db_session = get_session() - - return db_session - - -def lock_tables(session, tables): - """Write-lock tables for supported backends: MySQL and PostgreSQL""" - session.begin(subtransactions=True) - if is_mysql(): # Explicitly LOCK TABLES for MySQL - session.execute('SET autocommit=0') - session.execute('LOCK TABLES {}'.format( - ','.join([table + ' WRITE' for table in tables]))) - elif is_postgres(): # Explicitly LOCK TABLE for Postgres - session.execute('BEGIN TRANSACTION') - for table in tables: - session.execute('LOCK TABLE {} IN EXCLUSIVE MODE'.format(table)) - - -def commit_unlock_tables(session): - """Commit and unlock tables for supported backends: MySQL and PostgreSQL""" - session.execute('COMMIT') # execute COMMIT on DB backend - session.commit() - # because sqlalchemy session does not guarantee - # exact boundary correspondence to DB backend transactions - # We must guarantee DB commits transaction before UNLOCK - - # unlock - if is_mysql(): - session.execute('UNLOCK TABLES') - # postgres automatically releases lock at transaction end - - -def rollback_unlock_tables(session): - """Rollback and unlock tables - - supported backends: MySQL and PostgreSQL - """ - # unlock - if is_mysql(): - session.execute('UNLOCK TABLES') - - # postgres automatically releases lock at transaction end - - session.rollback() - - -def is_mysql(): - """Return true if and only if database backend is mysql""" - return (cfg.CONF.database.connection is not None and - (cfg.CONF.database.connection.split(':/')[0] == 'mysql' or - cfg.CONF.database.connection.split('+')[0] == 'mysql')) - - -def is_postgres(): - """Return true if and only if database backend is postgres""" - return (cfg.CONF.database.connection is not None and - (cfg.CONF.database.connection.split(':/')[0] == 'postgresql' or - cfg.CONF.database.connection.split('+')[0] == 'postgresql')) - - -def is_sqlite(): - """Return true if and only if database backend is sqlite""" - return (cfg.CONF.database.connection is not None and - (cfg.CONF.database.connection.split(':/')[0] == 'sqlite' or - cfg.CONF.database.connection.split('+')[0] == 'sqlite')) diff --git a/congress/db/datasources.py b/congress/db/datasources.py deleted file mode 100644 index 10348bc4..00000000 --- a/congress/db/datasources.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -import sqlalchemy as sa -from sqlalchemy.orm import exc as db_exc - -from congress.db import api as db -from congress.db import db_ds_table_data as table_data -from congress.db import model_base - - -class Datasource(model_base.BASE, model_base.HasId): - __tablename__ = 'datasources' - - name = sa.Column(sa.String(255), unique=True) - driver = sa.Column(sa.String(255)) - config = sa.Column(sa.Text(), nullable=False) - description = sa.Column(sa.Text(), nullable=True) - enabled = sa.Column(sa.Boolean, default=True) - - def __init__(self, id_, name, driver, config, description, - enabled=True): - self.id = id_ - self.name = name - self.driver = driver - self.config = json.dumps(config) - self.description = description - self.enabled = enabled - - -def add_datasource(id_, name, driver, config, description, - enabled, session=None): - session = session or db.get_session() - with session.begin(subtransactions=True): - datasource = Datasource( - id_=id_, - name=name, - driver=driver, - config=config, - description=description, - enabled=enabled) - session.add(datasource) - return datasource - - -def delete_datasource(id_, session=None): - session = session or db.get_session() - return session.query(Datasource).filter( - Datasource.id == id_).delete() - - -def delete_datasource_with_data(id_, session=None): - session = session or db.get_session() - with session.begin(subtransactions=True): - deleted = delete_datasource(id_, session) - table_data.delete_ds_table_data(id_, session=session) - return deleted - - -def get_datasource_name(name_or_id, session=None): - session = session or db.get_session() - datasource_obj = get_datasource(name_or_id, session) - if datasource_obj is not None: - return datasource_obj.name - return name_or_id - - -def get_datasource(name_or_id, session=None): - db_object = (get_datasource_by_name(name_or_id, session) or - get_datasource_by_id(name_or_id, session)) - - return db_object - - -def get_datasource_by_id(id_, session=None): - session = session or db.get_session() - try: - return (session.query(Datasource). - filter(Datasource.id == id_). - one()) - except db_exc.NoResultFound: - pass - - -def get_datasource_by_name(name, session=None): - session = session or db.get_session() - try: - return (session.query(Datasource). - filter(Datasource.name == name). - one()) - except db_exc.NoResultFound: - pass - - -def get_datasources(session=None, deleted=False): - session = session or db.get_session() - return (session.query(Datasource). - all()) diff --git a/congress/db/db_ds_table_data.py b/congress/db/db_ds_table_data.py deleted file mode 100644 index 14abdb89..00000000 --- a/congress/db/db_ds_table_data.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -import sqlalchemy as sa -from sqlalchemy.orm import exc as db_exc - -from congress.db import api as db -from congress.db import model_base - - -class DSTableData(model_base.BASE): - __tablename__ = 'dstabledata' - - ds_id = sa.Column(sa.String(36), nullable=False, primary_key=True) - tablename = sa.Column(sa.String(255), nullable=False, primary_key=True) - # choose long length compatible with MySQL, SQLite, Postgres - tabledata = sa.Column(sa.Text(), nullable=False) - - -def store_ds_table_data(ds_id, tablename, tabledata, session=None): - session = session or db.get_session() - tabledata = _json_encode_table_data(tabledata) - with session.begin(subtransactions=True): - new_row = session.merge(DSTableData( - ds_id=ds_id, - tablename=tablename, - tabledata=tabledata)) - return new_row - - -def delete_ds_table_data(ds_id, tablename=None, session=None): - session = session or db.get_session() - if tablename is None: - return session.query(DSTableData).filter( - DSTableData.ds_id == ds_id).delete() - else: - return session.query(DSTableData).filter( - DSTableData.ds_id == ds_id, - DSTableData.tablename == tablename).delete() - - -def get_ds_table_data(ds_id, tablename=None, session=None): - session = session or db.get_session() - try: - if tablename is None: - rows = session.query(DSTableData).filter( - DSTableData.ds_id == ds_id) - return_list = [] - for row in rows: - return_list.append( - {'tablename': row.tablename, - 'tabledata': _json_decode_table_data(row.tabledata)}) - return return_list - else: - return _json_decode_table_data(session.query(DSTableData).filter( - DSTableData.ds_id == ds_id, - DSTableData.tablename == tablename).one().tabledata) - except db_exc.NoResultFound: - pass - - -def _json_encode_table_data(tabledata): - tabledata = list(tabledata) - for i in range(0, len(tabledata)): - tabledata[i] = list(tabledata[i]) - return json.dumps(tabledata) - - -def _json_decode_table_data(json_tabledata): - tabledata = json.loads(json_tabledata) - for i in range(0, len(tabledata)): - tabledata[i] = tuple(tabledata[i]) - return set(tabledata) diff --git a/congress/db/db_library_policies.py b/congress/db/db_library_policies.py deleted file mode 100644 index 4f79375d..00000000 --- a/congress/db/db_library_policies.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2017 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from oslo_db import exception as oslo_db_exc -import sqlalchemy as sa -from sqlalchemy.orm import exc as db_exc - -from congress.db import api as db -from congress.db import model_base - - -class LibraryPolicy(model_base.BASE, model_base.HasId): - __tablename__ = 'library_policies' - - name = sa.Column(sa.String(255), nullable=False, unique=True) - abbreviation = sa.Column(sa.String(5), nullable=False) - description = sa.Column(sa.Text(), nullable=False) - kind = sa.Column(sa.Text(), nullable=False) - rules = sa.Column(sa.Text(), nullable=False) - - def to_dict(self, include_rules=True, json_rules=False): - """From a given library policy, return a policy dict. - - Args: - include_rules (bool, optional): include policy rules in return - dictionary. Defaults to False. - """ - if not include_rules: - d = {'id': self.id, - 'name': self.name, - 'abbreviation': self.abbreviation, - 'description': self.description, - 'kind': self.kind} - else: - d = {'id': self.id, - 'name': self.name, - 'abbreviation': self.abbreviation, - 'description': self.description, - 'kind': self.kind, - 'rules': (self.rules if json_rules - else json.loads(self.rules))} - return d - - -def add_policy(policy_dict, session=None): - session = session or db.get_session() - try: - with session.begin(subtransactions=True): - new_row = LibraryPolicy( - name=policy_dict['name'], - abbreviation=policy_dict['abbreviation'], - description=policy_dict['description'], - kind=policy_dict['kind'], - rules=json.dumps(policy_dict['rules'])) - session.add(new_row) - return new_row - except oslo_db_exc.DBDuplicateEntry: - raise KeyError( - "Policy with name %s already exists" % policy_dict['name']) - - -def replace_policy(id_, policy_dict, session=None): - session = session or db.get_session() - try: - with session.begin(subtransactions=True): - new_row = LibraryPolicy( - id=id_, - name=policy_dict['name'], - abbreviation=policy_dict['abbreviation'], - description=policy_dict['description'], - kind=policy_dict['kind'], - rules=json.dumps(policy_dict['rules'])) - session.query(LibraryPolicy).filter( - LibraryPolicy.id == id_).one().update( - new_row.to_dict(include_rules=True, json_rules=True)) - return new_row - except db_exc.NoResultFound: - raise KeyError('No policy found with policy id %s' % id_) - - -def delete_policy(id_, session=None): - session = session or db.get_session() - return session.query(LibraryPolicy).filter( - LibraryPolicy.id == id_).delete() - - -def delete_policies(session=None): - session = session or db.get_session() - return session.query(LibraryPolicy).delete() - - -def get_policy(id_, session=None): - session = session or db.get_session() - try: - return session.query(LibraryPolicy).filter( - LibraryPolicy.id == id_).one() - except db_exc.NoResultFound: - raise KeyError('No policy found with policy id %s' % id_) - - -def get_policies(session=None): - session = session or db.get_session() - return (session.query(LibraryPolicy).all()) diff --git a/congress/db/db_policy_rules.py b/congress/db/db_policy_rules.py deleted file mode 100644 index 3da2ff0d..00000000 --- a/congress/db/db_policy_rules.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -from oslo_db import exception as oslo_db_exc -import sqlalchemy as sa -from sqlalchemy.orm import exc as db_exc - - -from congress.db import api as db -from congress.db import model_base - - -class Policy(model_base.BASE, model_base.HasId, model_base.HasAudit): - __tablename__ = 'policies' - - # name is a human-readable string, so it can be referenced in policy - name = sa.Column(sa.String(255), nullable=False, unique=True) - abbreviation = sa.Column(sa.String(5), nullable=False) - description = sa.Column(sa.Text(), nullable=False) - owner = sa.Column(sa.Text(), nullable=False) - kind = sa.Column(sa.Text(), nullable=False) - - def __init__(self, id_, name, abbreviation, description, owner, kind, - deleted=False): - self.id = id_ - self.name = name - self.abbreviation = abbreviation - self.description = description - self.owner = owner - self.kind = kind - self.deleted = is_soft_deleted(id_, deleted) - - def to_dict(self): - """From a given database policy, return a policy dict.""" - d = {'id': self.id, - 'name': self.name, - 'abbreviation': self.abbreviation, - 'description': self.description, - 'owner_id': self.owner, - 'kind': self.kind} - return d - - -class PolicyDeleted(model_base.BASE, model_base.HasId, model_base.HasAudit): - __tablename__ = 'policiesdeleted' - - # name is a human-readable string, so it can be referenced in policy - name = sa.Column(sa.String(255), nullable=False) - abbreviation = sa.Column(sa.String(5), nullable=False) - description = sa.Column(sa.Text(), nullable=False) - owner = sa.Column(sa.Text(), nullable=False) - kind = sa.Column(sa.Text(), nullable=False) - - # overwrite some columns from HasAudit to stop value auto-generation - created_at = sa.Column(sa.DateTime, nullable=False) - updated_at = sa.Column(sa.DateTime, nullable=True) - - def __init__(self, policy_obj): - '''Initialize a PolicyDeleted object by copying a Policy object. - - Args: - policy_obj: a Policy object - ''' - self.id = policy_obj.id - self.name = policy_obj.name - self.abbreviation = policy_obj.abbreviation - self.description = policy_obj.description - self.owner = policy_obj.owner - self.kind = policy_obj.kind - self.deleted = policy_obj.deleted - self.created_at = policy_obj.created_at - self.updated_at = policy_obj.updated_at - - -def add_policy(id_, name, abbreviation, description, owner, kind, - deleted=False, session=None): - if session: - # IMPORTANT: if session provided, do not interrupt existing transaction - # with BEGIN which can drop db locks and change desired transaction - # boundaries for proper commit and rollback - try: - policy = Policy(id_, name, abbreviation, description, owner, - kind, deleted) - session.add(policy) - return policy - except oslo_db_exc.DBDuplicateEntry: - raise KeyError("Policy with name %s already exists" % name) - - # else - session = db.get_session() - try: - with session.begin(subtransactions=True): - policy = Policy(id_, name, abbreviation, description, owner, - kind, deleted) - session.add(policy) - return policy - except oslo_db_exc.DBDuplicateEntry: - raise KeyError("Policy with name %s already exists" % name) - - -def delete_policy(id_, session=None): - session = session or db.get_session() - with session.begin(subtransactions=True): - # delete all rules for that policy from database - policy = get_policy_by_id(id_, session=session) - for rule in get_policy_rules(policy.name, session=session): - delete_policy_rule(rule.id, session=session) - - policy_deleted = PolicyDeleted(policy) - session.add(policy_deleted) - - # hard delete policy in Policy table - session.query(Policy).filter(Policy.id == id_).delete() - - # soft delete policy in PolicyDeleted table - return session.query(PolicyDeleted).filter( - PolicyDeleted.id == id_).soft_delete() - - -def get_policy_by_id(id_, session=None, deleted=False): - session = session or db.get_session() - try: - return (session.query(Policy). - filter(Policy.id == id_). - filter(Policy.deleted == is_soft_deleted(id_, deleted)). - one()) - except db_exc.NoResultFound: - pass - - -def get_policy_by_name(name, session=None, deleted=False): - session = session or db.get_session() - try: - return (session.query(Policy). - filter(Policy.name == name). - filter(Policy.deleted == is_soft_deleted(name, deleted)). - one()) - except db_exc.NoResultFound: - pass - - -def get_policy(name_or_id, session=None, deleted=False): - # Try to retrieve policy either by id or name - db_object = (get_policy_by_id(name_or_id, session, deleted) or - get_policy_by_name(name_or_id, session, deleted)) - if not db_object: - raise KeyError("Policy Name or ID '%s' does not exist" % (name_or_id)) - return db_object - - -def get_policies(session=None, deleted=False): - session = session or db.get_session() - return (session.query(Policy). - filter(Policy.deleted == ''). - all()) - - -def policy_name(name_or_id, session=None): - session = session or db.get_session() - try: - ans = (session.query(Policy). - filter(Policy.deleted == ''). - filter(Policy.id == name_or_id). - one()) - except db_exc.NoResultFound: - return name_or_id - return ans.name - - -class PolicyRule(model_base.BASE, model_base.HasId, model_base.HasAudit): - - __tablename__ = "policy_rules" - - # TODO(thinrichs): change this so instead of storing the policy name - # we store the policy's ID. Nontrivial since we often have the - # policy's name but not the ID; looking up the ID from the name - # outside of this class leads to race conditions, which means - # this class ought to be modified so that add/delete/etc. supports - # either name or ID as input. - rule = sa.Column(sa.Text(), nullable=False) - policy_name = sa.Column(sa.Text(), nullable=False) - comment = sa.Column(sa.String(255), nullable=False) - name = sa.Column(sa.String(255)) - - def __init__(self, id, policy_name, rule, comment, deleted=False, - rule_name=""): - self.id = id - self.policy_name = policy_name - self.rule = rule - # FIXME(arosen) we should not be passing None for comment here. - self.comment = comment or "" - self.deleted = is_soft_deleted(id, deleted) - self.name = rule_name - - def to_dict(self): - d = {'rule': self.rule, - 'id': self.id, - 'comment': self.comment, - 'name': self.name} - return d - - -def add_policy_rule(id, policy_name, rule, comment, deleted=False, - rule_name="", session=None): - if session: - # IMPORTANT: if session provided, do not interrupt existing transaction - # with BEGIN which can drop db locks and change desired transaction - # boundaries for proper commit and rollback - policy_rule = PolicyRule(id, policy_name, rule, comment, - deleted, rule_name=rule_name) - session.add(policy_rule) - return policy_rule - - # else - session = db.get_session() - with session.begin(subtransactions=True): - policy_rule = PolicyRule(id, policy_name, rule, comment, - deleted, rule_name=rule_name) - session.add(policy_rule) - return policy_rule - - -def delete_policy_rule(id, session=None): - """Specify either the ID or the NAME, and that policy is deleted.""" - session = session or db.get_session() - return session.query(PolicyRule).filter(PolicyRule.id == id).soft_delete() - - -def get_policy_rule(id, policy_name, session=None, deleted=False): - session = session or db.get_session() - rule_query = (session.query(PolicyRule). - filter(PolicyRule.id == id). - filter(PolicyRule.deleted == is_soft_deleted(id, deleted))) - if policy_name: - rule_query = (rule_query. - filter(PolicyRule.policy_name == policy_name)) - try: - return rule_query.one() - except db_exc.NoResultFound: - pass - - -def get_policy_rules(policy_name=None, session=None, - deleted=False): - session = session or db.get_session() - rule_query = session.query(PolicyRule) - if not deleted: - rule_query = rule_query.filter(PolicyRule.deleted == '') - else: - rule_query = rule_query.filter(PolicyRule.deleted != '') - if policy_name: - rule_query = rule_query.filter(PolicyRule.policy_name == policy_name) - return rule_query.all() - - -def is_soft_deleted(uuid, deleted): - return '' if deleted is False else uuid diff --git a/congress/db/migration/README b/congress/db/migration/README deleted file mode 100644 index 71706bfb..00000000 --- a/congress/db/migration/README +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -The migrations in the alembic/versions contain the changes needed to migrate -from older Congress releases to newer versions. A migration occurs by executing -a script that details the changes needed to upgrade/downgrade the database. The -migration scripts are ordered so that multiple scripts can run sequentially to -update the database. The scripts are executed by Congress's migration wrapper -which uses the Alembic library to manage the migration. Congress supports -migration from Juno or later. - - -If you are a deployer or developer and want to migrate from Juno to Kilo -or later you must first add version tracking to the database: - -$ congress-db-manage --config-file /path/to/congress.conf stamp initial_db - -You can then upgrade to the latest database version via: -$ congress-db-manage --config-file /path/to/congress.conf upgrade head - -To check the current database version: -$ congress-db-manage --config-file /path/to/congress.conf current - -To create a script to run the migration offline: -$ congress-db-manage --config-file /path/to/congress.conf upgrade head --sql - -To run the offline migration between specific migration versions: -$ congress-db-manage --config-file /path/to/congress.conf \ - upgrade : --sql - -Upgrade the database incrementally: -$ congress-db-manage --config-file /path/to/congress.conf \ - upgrade --delta <# of revs> - -Downgrade the database by a certain number of revisions: -$ congress-db-manage --config-file /path/to/congress.conf \ - downgrade --delta <# of revs> - - -DEVELOPERS: -A database migration script is required when you submit a change to Congress -that alters the database model definition. The migration script is a special -python file that includes code to update/downgrade the database to match the -changes in the model definition. Alembic will execute these scripts in order to -provide a linear migration path between revision. The congress-db-manage command -can be used to generate migration template for you to complete. The operations -in the template are those supported by the Alembic migration library. - -$ congress-db-manage --config-file /path/to/congress.conf \ -revision -m "description of revision" --autogenerate - -This generates a prepopulated template with the changes needed to match the -database state with the models. You should inspect the autogenerated template -to ensure that the proper models have been altered. - -In rare circumstances, you may want to start with an empty migration template -and manually author the changes necessary for an upgrade/downgrade. You can -create a blank file via: - -$ congress-db-manage --config-file /path/to/congress.conf \ - revision -m "description of revision" - -The migration timeline should remain linear so that there is a clear path when -upgrading/downgrading. To verify that the timeline does not branch, you can -run this command: -$ congress-db-manage --config-file /path/to/congress.conf check_migration - -If the migration path does branch, you can find the branch point via: -$ congress-db-manage --config-file /path/to/congress.conf history diff --git a/congress/db/migration/__init__.py b/congress/db/migration/__init__.py deleted file mode 100644 index 3238c493..00000000 --- a/congress/db/migration/__init__.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import functools - -from alembic import context -from alembic import op -import sqlalchemy as sa - - -def skip_if_offline(func): - """Decorator for skipping migrations in offline mode.""" - @functools.wraps(func) - def decorator(*args, **kwargs): - if context.is_offline_mode(): - return - return func(*args, **kwargs) - - return decorator - - -def raise_if_offline(func): - """Decorator for raising if a function is called in offline mode.""" - @functools.wraps(func) - def decorator(*args, **kwargs): - if context.is_offline_mode(): - raise RuntimeError(_("%s cannot be called while in offline mode") % - func.__name__) - return func(*args, **kwargs) - - return decorator - - -@raise_if_offline -def schema_has_table(table_name): - """Check whether the specified table exists in the current schema. - - This method cannot be executed in offline mode. - """ - bind = op.get_bind() - insp = sa.engine.reflection.Inspector.from_engine(bind) - return table_name in insp.get_table_names() - - -@raise_if_offline -def schema_has_column(table_name, column_name): - """Check whether the specified column exists in the current schema. - - This method cannot be executed in offline mode. - """ - bind = op.get_bind() - insp = sa.engine.reflection.Inspector.from_engine(bind) - # first check that the table exists - if not schema_has_table(table_name): - return - # check whether column_name exists in table columns - return column_name in [column['name'] for column in - insp.get_columns(table_name)] - - -@raise_if_offline -def alter_column_if_exists(table_name, column_name, **kwargs): - """Alter a column only if it exists in the schema.""" - if schema_has_column(table_name, column_name): - op.alter_column(table_name, column_name, **kwargs) - - -@raise_if_offline -def drop_table_if_exists(table_name): - if schema_has_table(table_name): - op.drop_table(table_name) - - -@raise_if_offline -def rename_table_if_exists(old_table_name, new_table_name): - if schema_has_table(old_table_name): - op.rename_table(old_table_name, new_table_name) - - -def alter_enum(table, column, enum_type, nullable): - bind = op.get_bind() - engine = bind.engine - if engine.name == 'postgresql': - values = {'table': table, - 'column': column, - 'name': enum_type.name} - op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) - enum_type.create(bind, checkfirst=False) - op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " - "old_%(column)s" % values) - op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) - op.execute("UPDATE %(table)s SET %(column)s = " - "old_%(column)s::text::%(name)s" % values) - op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) - op.execute("DROP TYPE old_%(name)s" % values) - else: - op.alter_column(table, column, type_=enum_type, - existing_nullable=nullable) - - -def create_table_if_not_exist_psql(table_name, values): - if op.get_bind().engine.dialect.server_version_info < (9, 1, 0): - op.execute("CREATE LANGUAGE plpgsql") - op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$" - "BEGIN EXECUTE $1; END;" - "$$ LANGUAGE plpgsql STRICT;") - op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as " - "$$ SELECT exists(select 1 from pg_class where relname=$1);" - "$$ language sql STRICT;") - op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) " - "WHERE NOT table_exist(%(name)r);" % - {'name': table_name, - 'columns': values}) diff --git a/congress/db/migration/alembic.ini b/congress/db/migration/alembic.ini deleted file mode 100644 index 32c7a637..00000000 --- a/congress/db/migration/alembic.ini +++ /dev/null @@ -1,52 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# default to an empty string because the Neutron migration cli will -# extract the correct value and set it programmatically before alemic is fully -# invoked. -sqlalchemy.url = - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/congress/db/migration/alembic_migrations/__init__.py b/congress/db/migration/alembic_migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/db/migration/alembic_migrations/env.py b/congress/db/migration/alembic_migrations/env.py deleted file mode 100644 index 772b855b..00000000 --- a/congress/db/migration/alembic_migrations/env.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from logging import config as logging_config - -from alembic import context -from oslo_config import cfg -from oslo_db.sqlalchemy import session -import sqlalchemy as sa -from sqlalchemy import event - -from congress.db.migration.models import head # noqa -from congress.db import model_base - - -MYSQL_ENGINE = None - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config -congress_config = config.congress_config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -logging_config.fileConfig(config.config_file_name) - -# set the target for 'autogenerate' support -target_metadata = model_base.BASE.metadata - - -def set_mysql_engine(): - try: - mysql_engine = congress_config.command.mysql_engine - except cfg.NoSuchOptError: - mysql_engine = None - - global MYSQL_ENGINE - MYSQL_ENGINE = (mysql_engine or - model_base.BASE.__table_args__['mysql_engine']) - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with either a URL - or an Engine. - - Calls to context.execute() here emit the given string to the - script output. - - """ - set_mysql_engine() - - kwargs = dict() - if congress_config.database.connection: - kwargs['url'] = congress_config.database.connection - else: - kwargs['dialect_name'] = congress_config.database.engine - context.configure(**kwargs) - - with context.begin_transaction(): - context.run_migrations() - - -@event.listens_for(sa.Table, 'after_parent_attach') -def set_storage_engine(target, parent): - if MYSQL_ENGINE: - target.kwargs['mysql_engine'] = MYSQL_ENGINE - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - set_mysql_engine() - engine = session.create_engine(congress_config.database.connection) - - connection = engine.connect() - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - try: - with context.begin_transaction(): - context.run_migrations() - finally: - connection.close() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/congress/db/migration/alembic_migrations/policy_rules_init_opts.py b/congress/db/migration/alembic_migrations/policy_rules_init_opts.py deleted file mode 100644 index 24f1e9b8..00000000 --- a/congress/db/migration/alembic_migrations/policy_rules_init_opts.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'policy_rules', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('rule', sa.Text, nullable=False), - sa.Column('comment', sa.String(length=255), nullable=False), - sa.Column('policy_name', sa.String(length=255), nullable=False), - sa.Column('deleted', sa.String(length=36), server_default="", - nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=False), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id')) - - -def downgrade(): - op.drop_table('policy_rules') diff --git a/congress/db/migration/alembic_migrations/script.py.mako b/congress/db/migration/alembic_migrations/script.py.mako deleted file mode 100644 index fb56a455..00000000 --- a/congress/db/migration/alembic_migrations/script.py.mako +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright ${create_date.year} OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/congress/db/migration/alembic_migrations/versions/01e78af70b91_add_datasource_data_persistence.py b/congress/db/migration/alembic_migrations/versions/01e78af70b91_add_datasource_data_persistence.py deleted file mode 100644 index f1f98469..00000000 --- a/congress/db/migration/alembic_migrations/versions/01e78af70b91_add_datasource_data_persistence.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add datasource data persistence - -Revision ID: 01e78af70b91 -Revises: 3cee191c4f84 -Create Date: 2016-07-29 17:02:40.026610 - -""" - -# revision identifiers, used by Alembic. -revision = '01e78af70b91' -down_revision = '3cee191c4f84' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - if op.get_bind().engine.dialect.name == 'mysql': - # NOTE: Specify a length long enough to choose MySQL - # LONGTEXT - text_type = sa.Text(length=1000000000) - else: - text_type = sa.Text() - - op.create_table( - 'dstabledata', - sa.Column('ds_id', sa.String(length=36), nullable=False), - sa.Column('tablename', sa.String(length=255), nullable=False), - sa.Column('tabledata', text_type, nullable=False), - sa.PrimaryKeyConstraint('ds_id', 'tablename'), - mysql_engine='InnoDB') - - -def downgrade(): - op.drop_table('dstabledata') diff --git a/congress/db/migration/alembic_migrations/versions/3cee191c4f84_add_datasources.py b/congress/db/migration/alembic_migrations/versions/3cee191c4f84_add_datasources.py deleted file mode 100644 index eb6820bd..00000000 --- a/congress/db/migration/alembic_migrations/versions/3cee191c4f84_add_datasources.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add_datasources - -Revision ID: 3cee191c4f84 -Revises: 56e86d51ec62 -Create Date: 2015-02-05 13:30:04.272571 - -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -# revision identifiers, used by Alembic. -revision = '3cee191c4f84' -down_revision = '56e86d51ec62' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'datasources', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('driver', sa.String(length=255), nullable=True), - sa.Column('config', sa.Text(), nullable=False), - sa.Column('description', sa.Text(), nullable=True), - sa.Column('enabled', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name'), - mysql_engine='InnoDB') - - -def downgrade(): - op.drop_table('datasources') diff --git a/congress/db/migration/alembic_migrations/versions/532e9e1f0f3a_add_policy.py b/congress/db/migration/alembic_migrations/versions/532e9e1f0f3a_add_policy.py deleted file mode 100644 index e96e585c..00000000 --- a/congress/db/migration/alembic_migrations/versions/532e9e1f0f3a_add_policy.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add_policy - -Revision ID: 532e9e1f0f3a -Revises: initial_db -Create Date: 2014-12-18 14:52:20.402861 - -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -# revision identifiers, used by Alembic. -revision = '532e9e1f0f3a' -down_revision = 'initial_db' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table('policies', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=False), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', sa.String(length=36), - server_default='', nullable=True), - sa.Column('name', sa.Text(), nullable=False), - sa.Column('abbreviation', sa.String(length=5), - nullable=False), - sa.Column('description', sa.Text(), nullable=False), - sa.Column('owner', sa.Text(), nullable=False), - sa.Column('kind', sa.Text(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_engine='InnoDB') - - -def downgrade(): - op.drop_table('policies') diff --git a/congress/db/migration/alembic_migrations/versions/56e86d51ec62_add_name_attribute_to_policy_rules.py b/congress/db/migration/alembic_migrations/versions/56e86d51ec62_add_name_attribute_to_policy_rules.py deleted file mode 100644 index 101c5a32..00000000 --- a/congress/db/migration/alembic_migrations/versions/56e86d51ec62_add_name_attribute_to_policy_rules.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add name attribute to policy rules - -Revision ID: 56e86d51ec62 -Revises: 532e9e1f0f3a -Create Date: 2015-01-14 13:08:53.945019 - -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -# revision identifiers, used by Alembic. -revision = '56e86d51ec62' -down_revision = '532e9e1f0f3a' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('policy_rules', - sa.Column('name', sa.String(length=255), nullable=True)) - - -def downgrade(): - op.drop_column('policy_rules', 'name') diff --git a/congress/db/migration/alembic_migrations/versions/HEAD b/congress/db/migration/alembic_migrations/versions/HEAD deleted file mode 100644 index 03e779e5..00000000 --- a/congress/db/migration/alembic_migrations/versions/HEAD +++ /dev/null @@ -1 +0,0 @@ -c0125080d572 \ No newline at end of file diff --git a/congress/db/migration/alembic_migrations/versions/__init__.py b/congress/db/migration/alembic_migrations/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/db/migration/alembic_migrations/versions/aabe895bbd4d_poliy_name_uniqueness.py b/congress/db/migration/alembic_migrations/versions/aabe895bbd4d_poliy_name_uniqueness.py deleted file mode 100644 index 1e0d4f92..00000000 --- a/congress/db/migration/alembic_migrations/versions/aabe895bbd4d_poliy_name_uniqueness.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""poliy name uniqueness - -Revision ID: aabe895bbd4d -Revises: 01e78af70b91 -Create Date: 2016-11-04 13:55:05.064012 - -""" - -# revision identifiers, used by Alembic. -revision = 'aabe895bbd4d' -down_revision = '01e78af70b91' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table('policiesdeleted', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=False), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', sa.String(length=36), - server_default='', nullable=True), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('abbreviation', sa.String(length=5), - nullable=False), - sa.Column('description', sa.Text(), nullable=False), - sa.Column('owner', sa.Text(), nullable=False), - sa.Column('kind', sa.Text(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_engine='InnoDB') - - # copy all rows from table:policies to table:policiesdeleted - # table:policiesdeleted used as temporary space while we recreate - # table:policies with the right column:name type to support index/unique - # recreate table rather than ALTER to generically support most backends - try: - op.execute( - "INSERT INTO policiesdeleted " - "SELECT * FROM policies") - except Exception: - # if copying of rows fail (likely because a name is longer than 255 - # stop upgrade and cleanup - op.drop_table('policiesdeleted') - raise - - # drop and recreate table:policies - op.drop_table('policies') - op.create_table('policies', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=False), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', sa.String(length=36), - server_default='', nullable=True), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('abbreviation', sa.String(length=5), - nullable=False), - sa.Column('description', sa.Text(), nullable=False), - sa.Column('owner', sa.Text(), nullable=False), - sa.Column('kind', sa.Text(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_engine='InnoDB') - - # copy non-(soft)deleted rows back into table:policies - op.execute("INSERT INTO policies " - "SELECT * FROM policiesdeleted WHERE deleted = ''") - - # delete non-(soft)deleted rows from table:policiesdeleted - op.execute("DELETE FROM policiesdeleted WHERE deleted = ''") - - op.create_unique_constraint(None, 'policies', ['name']) - - -def downgrade(): - # drop and recreate table:policies with right column:name type - # using table:policiesdeleted as temporary work space - - op.execute("INSERT INTO policiesdeleted SELECT * FROM policies") - op.drop_table('policies') - # op.drop_constraint(None, 'policies', type_='unique') - op.create_table('policies', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=False), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', sa.String(length=36), - server_default='', nullable=True), - sa.Column('name', sa.Text(), nullable=False), - sa.Column('abbreviation', sa.String(length=5), - nullable=False), - sa.Column('description', sa.Text(), nullable=False), - sa.Column('owner', sa.Text(), nullable=False), - sa.Column('kind', sa.Text(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_engine='InnoDB') - - # move all rows (deleted or not) into table:policies - op.execute("INSERT INTO policies SELECT * FROM policiesdeleted") - op.drop_table('policiesdeleted') diff --git a/congress/db/migration/alembic_migrations/versions/c0125080d572_policy_library.py b/congress/db/migration/alembic_migrations/versions/c0125080d572_policy_library.py deleted file mode 100644 index d6563b69..00000000 --- a/congress/db/migration/alembic_migrations/versions/c0125080d572_policy_library.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""policy library - -Revision ID: c0125080d572 -Revises: aabe895bbd4d -Create Date: 2017-06-21 13:20:14.529313 - -""" - -# revision identifiers, used by Alembic. -revision = 'c0125080d572' -down_revision = 'aabe895bbd4d' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'library_policies', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=255), nullable=False, unique=True), - sa.Column('abbreviation', sa.String(length=5), nullable=False), - sa.Column('description', sa.Text(), nullable=False), - sa.Column('kind', sa.Text(), nullable=False), - sa.Column('rules', sa.Text(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_engine='InnoDB' - ) - - -def downgrade(): - op.drop_table('library_policies') diff --git a/congress/db/migration/alembic_migrations/versions/inital_db.py b/congress/db/migration/alembic_migrations/versions/inital_db.py deleted file mode 100644 index f089080a..00000000 --- a/congress/db/migration/alembic_migrations/versions/inital_db.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""initial_db - -Revision ID: initial_db -Revises: None - -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -# revision identifiers, used by Alembic. -revision = 'initial_db' -down_revision = None - -from congress.db.migration.alembic_migrations import policy_rules_init_opts - - -def upgrade(): - policy_rules_init_opts.upgrade() - - -def downgrade(): - policy_rules_init_opts.downgrade() diff --git a/congress/db/migration/cli.py b/congress/db/migration/cli.py deleted file mode 100644 index 9993b39c..00000000 --- a/congress/db/migration/cli.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import os - -from alembic import command as alembic_command -from alembic import config as alembic_config -from alembic import script as alembic_script -from alembic import util as alembic_util -from oslo_config import cfg - -HEAD_FILENAME = 'HEAD' - - -_db_opts = [ - cfg.StrOpt('connection', - deprecated_name='sql_connection', - default='', - secret=True, - help=_('URL to database')), - cfg.StrOpt('engine', - default='', - help=_('Database engine')), -] - -CONF = cfg.ConfigOpts() -CONF.register_cli_opts(_db_opts, 'database') - - -def do_alembic_command(config, cmd, *args, **kwargs): - try: - getattr(alembic_command, cmd)(config, *args, **kwargs) - except alembic_util.CommandError as e: - alembic_util.err(str(e)) - - -def do_check_migration(config, cmd): - do_alembic_command(config, 'branches') - validate_head_file(config) - - -def do_upgrade_downgrade(config, cmd): - if not CONF.command.revision and not CONF.command.delta: - raise SystemExit(_('You must provide a revision or relative delta')) - - revision = CONF.command.revision - - if CONF.command.delta: - sign = '+' if CONF.command.name == 'upgrade' else '-' - revision = sign + str(CONF.command.delta) - else: - revision = CONF.command.revision - - do_alembic_command(config, cmd, revision, sql=CONF.command.sql) - - -def do_stamp(config, cmd): - do_alembic_command(config, cmd, - CONF.command.revision, - sql=CONF.command.sql) - - -def do_revision(config, cmd): - do_alembic_command(config, cmd, - message=CONF.command.message, - autogenerate=CONF.command.autogenerate, - sql=CONF.command.sql) - update_head_file(config) - - -def validate_head_file(config): - script = alembic_script.ScriptDirectory.from_config(config) - if len(script.get_heads()) > 1: - alembic_util.err(_('Timeline branches unable to generate timeline')) - - head_path = os.path.join(script.versions, HEAD_FILENAME) - if (os.path.isfile(head_path) and - open(head_path).read().strip() == script.get_current_head()): - return - else: - alembic_util.err(_('HEAD file does not match migration timeline head')) - - -def update_head_file(config): - script = alembic_script.ScriptDirectory.from_config(config) - if len(script.get_heads()) > 1: - alembic_util.err(_('Timeline branches unable to generate timeline')) - - head_path = os.path.join(script.versions, HEAD_FILENAME) - with open(head_path, 'w+') as f: - f.write(script.get_current_head()) - - -def add_command_parsers(subparsers): - for name in ['current', 'history', 'branches']: - parser = subparsers.add_parser(name) - parser.set_defaults(func=do_alembic_command) - - parser = subparsers.add_parser('check_migration') - parser.set_defaults(func=do_check_migration) - - for name in ['upgrade', 'downgrade']: - parser = subparsers.add_parser(name) - parser.add_argument('--delta', type=int) - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision', nargs='?') - parser.add_argument('--mysql-engine', - default='', - help='Change MySQL storage engine of current ' - 'existing tables') - parser.set_defaults(func=do_upgrade_downgrade) - - parser = subparsers.add_parser('stamp') - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision') - parser.set_defaults(func=do_stamp) - - parser = subparsers.add_parser('revision') - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - parser.add_argument('--sql', action='store_true') - parser.set_defaults(func=do_revision) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help=_('Available commands'), - handler=add_command_parsers) - -CONF.register_cli_opt(command_opt) - - -def main(): - config = alembic_config.Config( - os.path.join(os.path.dirname(__file__), 'alembic.ini') - ) - config.set_main_option('script_location', - 'congress.db.migration:alembic_migrations') - # attach the Neutron conf to the Alembic conf - config.congress_config = CONF - - CONF() - # TODO(gongysh) enable logging - CONF.command.func(config, CONF.command.name) diff --git a/congress/db/migration/models/__init__.py b/congress/db/migration/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/db/migration/models/head.py b/congress/db/migration/models/head.py deleted file mode 100644 index a27fcf10..00000000 --- a/congress/db/migration/models/head.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The module provides all database models at current HEAD. - -Its purpose is to create comparable metadata with current database schema. -Based on this comparison database can be healed with healing migration. - -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.db import datasources # noqa -from congress.db import db_policy_rules # noqa -from congress.db import model_base - - -def get_metadata(): - return model_base.BASE.metadata diff --git a/congress/db/model_base.py b/congress/db/model_base.py deleted file mode 100644 index 6f7e2b6b..00000000 --- a/congress/db/model_base.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime - -from oslo_db.sqlalchemy import models -from oslo_utils import uuidutils -import sqlalchemy as sa -from sqlalchemy.ext import declarative -from sqlalchemy import orm - - -class CongressBase(models.ModelBase): - """Base class for Congress Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB'} - - @declarative.declared_attr - def __tablename__(cls): - # NOTE(jkoelker) use the pluralized name of the class as the table - return cls.__name__.lower() + 's' - - def __iter__(self): - self._i = iter(orm.object_mapper(self).columns) - return self - - def next(self): - n = next(self._i).name - return n, getattr(self, n) - - def __repr__(self): - """sqlalchemy based automatic __repr__ method.""" - items = ['%s=%r' % (col.name, getattr(self, col.name)) - for col in self.__table__.columns] - return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__, - self.__class__.__name__, - id(self), ', '.join(items)) - - -class HasTenant(object): - """Tenant mixin, add to subclasses that have a tenant.""" - - # NOTE(jkoelker) tenant_id is just a free form string ;( - tenant_id = sa.Column(sa.String(255)) - - -class HasId(object): - """id mixin, add to subclasses that have an id.""" - - id = sa.Column(sa.String(36), - primary_key=True, - default=uuidutils.generate_uuid) - - -# TODO(arosen) move this somewhere better later... -def _get_date(): - return datetime.datetime.now() - - -class HasAudit(object): - created_at = sa.Column(sa.DateTime, default=_get_date, nullable=False) - updated_at = sa.Column(sa.DateTime, onupdate=_get_date, nullable=True) - deleted_at = sa.Column(sa.DateTime, nullable=True) - deleted = sa.Column(sa.String(length=36), - server_default='', default='', nullable=True) - - -BASE = declarative.declarative_base(cls=CongressBase) diff --git a/congress/dse2/README.rst b/congress/dse2/README.rst deleted file mode 100644 index 18d0f66f..00000000 --- a/congress/dse2/README.rst +++ /dev/null @@ -1,96 +0,0 @@ - -.. _dse2: - -========================================== -Data Services Engine v2 (DSE2) Development -========================================== - -1. DSE2 Design Overview -======================= - -The work in this folder supports the dist-cross-process-dse blueprint. -In brief, the goal of this work is support Congress components running -across multiple process. A new DSE framework is being created to -leverage oslo.messaging for component communication. - -The distribution work is being done in parallel to the existing DSE. -We aim to create an optional startup flag to select the new framework, -such that existing functionality is preserved during development. When -the new framework is deemed ready, a small commit will change the -default runtime to the new framework, and deprecate the old DSE. - - -2. Current Development Status -============================= - -Since the DSE provides the conduit for Congress component communication, -it is critical that it is robust and well tested. The core testing -should be focused on the DSE component only, and should not require external -configuration or dependencies. - -All DSE2 work is currently being done in a standalone dse2 folder. - -* data_service.py: - * Status: - * DataServiceInfo created - * DataService skeleton created - * Next Steps: - * Expose DataService RPC endpoints to DataServiceInfo - * Add table pub/sub to DataService - * Add policy management methods to DataService -* dse_node.py: - * Status: - * DseNode created; supports node and service RPC - * Next Steps: - * Integrate control bus and validate peer discovery -* control_bus.py: - * Status: - * DseNodeControlBus basic discovery of peers - * Next Steps: - * Robustness - - -3. Running the tests -==================== - -The current code snapshot is intentionally decoupled from the project -testing framework. To run, developers can set up a virtual environment -that contains the project dependencies: - -Configure Rabbit for testing ----------------------------- - -* Install rabbitmq (e.g. apt-get install rabbitmq-server) -* Add testing user: - # rabbitmqctl add_user testing test - # rabbitmqctl set_permissions -p / testing '.*' '.*' '.*' - -Setting up a testing virtual environment ----------------------------------------- - - $ virtualenv dsetest - $ echo > \ - dsetest/lib/python2.7/site-packages/congress.pth # Add congress PYTHONPATH - $ . dsetest/bin/activate - $ pip install --upgrade pip - $ pip install -r /requirements.txt - $ pip install -r /test-requirements.txt - $ pip install oslo.messaging - -Running the DSE2 tests ----------------------- - -* Ensure you are in the virtual env configured above - $ . dsetest/bin/activate # Run to join the virtualenv if not already - -* Change to the dse2 directory - $ cd congress/dse2 - -* Run the data_service tests: - $ python test_data_service.py - -* Run the dse_node test using the 'fake' oslo.messaging driver - $ python test_dse_node.py --fake - -* Run the dse_node test using the 'rabbit' oslo.messaging driver - $ python test_dse_node.py --rabbit diff --git a/congress/dse2/__init__.py b/congress/dse2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/dse2/control_bus.py b/congress/dse2/control_bus.py deleted file mode 100644 index 7304c140..00000000 --- a/congress/dse2/control_bus.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import functools -import json -import time - -import eventlet -eventlet.monkey_patch() - -from oslo_log import log as logging - -from congress.dse2 import data_service - -LOG = logging.getLogger() - - -def drop_cast_echos(wrapped): - @functools.wraps(wrapped) - def wrapper(rpc_endpoint, message_context, *args, **kwargs): - node = rpc_endpoint.dse_bus.node - if message_context['node_id'] == node.node_id: - LOG.trace("<%s> Ignoring my echo", node.node_id) - return - return wrapped(rpc_endpoint, message_context, *args, **kwargs) - return wrapper - - -class HeartbeatEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - # Let the base class default method handle all other cases - return json.JSONEncoder.default(self, obj) - - -class _DseControlBusEndpoint(object): - def __init__(self, dse_bus): - self.dse_bus = dse_bus - - @drop_cast_echos - def accept_heartbeat(self, client_ctxt, args): - LOG.debug("<%s> Accepted heartbeat: context=%s, args='%s'", - self.dse_bus.node.node_id, client_ctxt, args) - hb = json.loads(args) - # convert dict to set - for target in hb['subscribed_tables']: - hb['subscribed_tables'][target] = set( - hb['subscribed_tables'][target]) - peer_id = client_ctxt['node_id'] - new_status = { - 'node_id': peer_id, - 'instance': client_ctxt['instance'], - 'services': hb['services'], - 'subscribed_tables': hb['subscribed_tables'] - } - - old_status = self.dse_bus.peers.get(peer_id) - if old_status: - # TODO(pballand): validate instance, services - LOG.trace("<%s> Refreshed peer '%s' with services %s", - self.dse_bus.node.node_id, peer_id, - [s['service_id'] for s in new_status['services']]) - else: - LOG.info("<%s> New peer '%s' with services %s", - self.dse_bus.node.node_id, peer_id, - [s['service_id'] for s in new_status['services']]) - self.dse_bus.peers[peer_id] = new_status - - # TODO(pballand): handle time going backwards - self.dse_bus.peers[peer_id]['last_hb_time'] = time.time() - - # Note(thread-safety): blocking function - @drop_cast_echos - def list_services(self, client_ctxt): - LOG.debug("<%s> Peer '%s' requested updated service list", - self.dse_bus.node.node_id, client_ctxt['node_id']) - # Note(thread-safety): blocking call - self.dse_bus._publish_heartbeat() - - -class DseNodeControlBus(data_service.DataService): - """Maintain DSE connection for a DseNode. - - The DSE maintains a common directory of data services and their - corresponding exported tables and RPCs. This control bus maintains - this view using oslo.messaging RPC primitives. - """ - HEARTBEAT_INTERVAL = 1 - - def __init__(self, node): - self.node = node - self.control_bus_ep = _DseControlBusEndpoint(self) - self.peers = {} - super(DseNodeControlBus, self).__init__('_control_bus') - - def rpc_endpoints(self): - return [self.control_bus_ep] - - # Note(thread-safety): blocking function - def _publish_heartbeat(self): - args = json.dumps( - {'services': [s.info.to_dict() - for s in self.node.get_services(True)], - # FIXME(ekcs): suppress subscriber details for each subscribed - # table to avoid unnecessary network traffic. Only binary - # information needed over HB. - 'subscribed_tables': self.node.subscriptions}, - cls=HeartbeatEncoder) - # Note(thread-safety): blocking call - self.node.broadcast_service_rpc(self.service_id, 'accept_heartbeat', - {'args': args}) - - def _call_heartbeat_callbacks(self): - for service in self.node.get_services(): - heartbeat_callbacks = service.heartbeat_callbacks.values() - for f in heartbeat_callbacks: - if not service._running: - break - # Note(thread-safety): potentially blocking call - f() - - # Note(thread-safety): blocking function - def _heartbeat_loop(self): - while self._running: - self._publish_heartbeat() - self.node._update_tables_with_subscriber() - self._call_heartbeat_callbacks() - eventlet.sleep(self.HEARTBEAT_INTERVAL) - - # Note(thread-safety): blocking function - def _refresh_peers(self): - # Request immediate status refresh from peers - LOG.debug("<%s> Requesting service list from all peers", - self.node.node_id) - self.node.broadcast_service_rpc(self.service_id, 'list_services') - - # Note(thread-safety): blocking function - def start(self): - if self._running: - LOG.debug('control bus on %s already started.' % self.node.node_id) - return - - LOG.debug("<%s> Starting DSE control bus", self.node.node_id) - super(DseNodeControlBus, self).start() - - # TODO(pballand): ensure I am not currently running - # Add an instance UUID to the node status, have timeout on nodes - self._refresh_peers() - - # TODO(pballand): before enabling self, check if my node ID is - # already present (no consensus service, so use timeout heuristic) - self._heartbeat_thread = eventlet.spawn(self._heartbeat_loop) - - def stop(self): - LOG.debug("<%s> Stopping DSE control bus", self.node.node_id) - super(DseNodeControlBus, self).stop() - eventlet.greenthread.kill(self._heartbeat_thread) - - def dse_status(self): - """Return latest observation of DSE status.""" - # TODO(pballand): include node status [JOINING, JOINED] - return {'peers': self.peers} diff --git a/congress/dse2/data_service.py b/congress/dse2/data_service.py deleted file mode 100644 index 215a7293..00000000 --- a/congress/dse2/data_service.py +++ /dev/null @@ -1,513 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import six # thirdparty import first because needed in import of Queue/queue -import time -if six.PY2: - import Queue as queue_package -else: - import queue as queue_package - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json - -from congress import exception - -LOG = logging.getLogger(__name__) - - -class DataServiceInfo(object): - """Metadata for DataService on the DSE. - - Attributes: - service_id: The ID of the service. - node_id: The ID of the node the service is running on. - published_tables: List of table IDs published by this service. - subscribed_tables: List of table IDs this service subscribes to. - rpc_endpoints_info: List of RPC endpoints exposed by this service. - """ - MARSHALL_ATTRS = set(['service_id', 'node_id', 'published_tables', - 'subscribed_tables', 'rpc_endpoints_info']) - - def __init__(self, service_id=None, node_id=None, published_tables=None, - subscribed_tables=None, rpc_endpoints_info=None): - self.service_id = service_id - self.node_id = node_id - self.published_tables = published_tables or [] - self.subscribed_tables = subscribed_tables or [] - self.rpc_endpoints_info = rpc_endpoints_info or [] - - def __str__(self): - return self.__class__.__name__ + "<%s>" % self.service_id - - def __repr__(self): - return self.__class__.__name__ + "(%s)" % dict.__repr__(vars(self)) - - @classmethod - def from_json(cls, json_string): - return json.loads(json_string, object_hook=cls.from_dict) - - @classmethod - def from_dict(cls, raw_dict): - provided_keys = set(raw_dict.keys()) - if provided_keys != cls.MARSHALL_ATTRS: - missing = cls.MARSHALL_ATTRS - provided_keys - malformed = provided_keys - cls.MARSHALL_ATTRS - msg = "Cannot construct %s from input:" % cls.__name__ - if missing: - msg += " Missing keys: %s" % list(missing) - if malformed: - msg += " Malformed keys: %s" % list(malformed) - raise KeyError(msg) - ret = DataServiceInfo() - for n in cls.MARSHALL_ATTRS: - setattr(ret, n, raw_dict[n]) - return ret - - def to_dict(self): - return dict([(k, getattr(self, k)) for k in self.MARSHALL_ATTRS]) - - def to_json(self): - return json.dumps(self.to_dict()) - - -class DataService(object): - """A unit of data and business logic that interfaces with the DSE. - - A DataService may publish tables, subscribe to tables, and/or expose - RPCs on the DSE. DataService instances are bound to a DseNode which is - used for all inter-service communication. - - Attributes: - service_id: A unique ID of the service. - _published_tables_with_subscriber: A set of tables published by self - that has subscribers - """ - - # TODO(pballand): make default methods for pub/subscribed tables - def __init__(self, service_id): - # Note(ekcs): temporary setting to disable use of diffs and sequencing - # to avoid muddying the process of a first dse2 system test. - # TODO(ekcs,dse2): remove when differential update is standard - self.always_snapshot = True - self.service_id = service_id - self.node = None - self._rpc_server = None - self._target = None - self._rpc_endpoints = [DataServiceEndPoints(self)] - self._running = False - self._published_tables_with_subscriber = set() - - # data structures for sequenced data updates for reliable pub-sub - # msg queues for msgs to be processed - # each queue is a priority queue prioritizing lowest seqnum - self.msg_queues = {} # {publisher -> {table -> msg queue}} - # oldest queued msg receipt time - self.oldest_queue_times = {} # {publisher -> {table -> receipt time}} - # last received & processed seqnum - self.receiver_seqnums = {} # {publisher -> {table -> seqnum}} - # last sent seqnum - self.sender_seqnums = {} # {table -> seqnum} - # last published data - self._last_published_data = {} # {table -> data} - - # custom functions to execute on each heartbeat - # must be 0-ary function - # {id -> function} - self.heartbeat_callbacks = {} - - def add_rpc_endpoint(self, endpt): - self._rpc_endpoints.append(endpt) - - def rpc_endpoints(self): - """Return list of RPC endpoint objects to be exposed for this service. - - A DataService may include zero or more RPC endpoints to be exposed - by the DseNode. Each endpoint object must be compatible with the - oslo.messaging RPC Server. - """ - return self._rpc_endpoints - - @property - def status(self): - return "OK" - - @property - def info(self): - # TODO(pballand): populate rpc_endpoints_info from rpc_endpoints - return DataServiceInfo( - service_id=self.service_id, - node_id=self.node.node_id if self.node else None, - published_tables=None, - subscribed_tables=self._published_tables_with_subscriber, - rpc_endpoints_info=None, - ) - - def start(self): - """Start the DataService. - - This method is called by a DseNode before any RPCs are invoked. - """ - assert self.node is not None - if not self._running: - self._rpc_server.start() - self._running = True - - def stop(self): - """Stop the DataService. - - This method is called by a DseNode when the DataService instance is - no longer needed. No RPCs will invoked on stopped DataServices. - """ - assert self.node is not None - self._rpc_server.stop() - self._running = False - - # Note(thread-safety): blocking function - def wait(self): - """Wait for processing to complete. - - After a call to stop(), the DataService may have some outstanding work - that has not yet completed. The wait() method blocks until all - DataService processing is complete. - """ - assert self.node is not None - self._rpc_server.wait() - - # Note(thread-safety): blocking function - def rpc(self, service, action, kwargs=None, timeout=None, local=False, - retry=None): - return self.node.invoke_service_rpc( - service, action, kwargs, timeout=timeout, local=local, retry=retry) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def get_datasources(self, filter_secret=False): - return self.node.get_datasources(filter_secret) - - def is_valid_service(self, service_id): - return self.node.is_valid_service(service_id) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def get_datasource(self, datasource_id): - return self.node.get_datasource(datasource_id) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def get_drivers_info(self, *args): - return self.node.get_drivers_info(*args) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def get_driver_info(self, *args): - return self.node.get_driver_info(*args) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def get_driver_schema(self, *args): - return self.node.get_driver_schema(*args) - - # Will be removed once the reference of node exists in api - # Note(thread-safety): blocking function - def make_datasource_dict(self, *args, **kwargs): - return self.node.make_datasource_dict(*args, **kwargs) - - # Note(thread-safety): blocking function - def publish(self, table, data, use_snapshot=True): - if self.always_snapshot: - self.node.publish_table(self.service_id, table, data) - return - - def get_differential_and_set_last_published_data(): - if table in self._last_published_data: - to_add = list( - set(data) - set(self._last_published_data[table])) - to_del = list( - set(self._last_published_data[table]) - set(data)) - self._last_published_data[table] = data - else: - self._last_published_data[table] = data - to_add = data - to_del = [] - return [to_add, to_del] - - def increment_get_seqnum(): - if table not in self.sender_seqnums: - self.sender_seqnums[table] = 0 - else: - self.sender_seqnums[table] = self.sender_seqnums[table] + 1 - return self.sender_seqnums[table] - - if not use_snapshot: - data = get_differential_and_set_last_published_data() - if len(data[0]) == 0 and len(data[1]) == 0: - return - - seqnum = increment_get_seqnum() - self.node.publish_table_sequenced( - self.service_id, table, data, use_snapshot, seqnum) - - # Note(thread-safety): blocking function - def subscribe(self, service, table): - try: - if self.always_snapshot: - # Note(thread-safety): blocking call - data = self.node.subscribe_table( - self.service_id, service, table) - self.receive_data(service, table, data, is_snapshot=True) - return - # Note(thread-safety): blocking call - (seqnum, data) = self.node.subscribe_table( - self.service_id, service, table) - self.receive_data_sequenced( - service, table, data, seqnum, is_snapshot=True) - except exception.NotFound: - LOG.info("Service '%s' unresponsive. '%s:%s' subscribed but data " - "not yet initialized.", service, service, table) - - def unsubscribe(self, service, table): - # Note(thread-safety): it is important to make sure there are no - # blocking calls in modifying the msg_queues and related - # data structures. - # Otherwise, concurrency bugs will likely occur when the - # periodic task _check_resub_all interrupts and modifies - # the same data structures. - self.node.unsubscribe_table(self.service_id, service, table) - self._clear_msg_queue(service, table) - self._clear_receiver_seqnum(service, table) - - def _clear_msg_queue(self, publisher, table): - # Note(thread-safety): it is important to make sure there are no - # blocking calls in modifying the msg_queues and related - # data structures. - # Otherwise, concurrency bugs will likely occur when the - # periodic task _check_resub_all interrupts and modifies - # the same data structures. - if publisher in self.msg_queues: - if table in self.msg_queues[publisher]: - del self.msg_queues[publisher][table] - del self.oldest_queue_times[publisher][table] - - def _clear_receiver_seqnum(self, publisher, table): - # Note(thread-safety): it is important to make sure there are no - # blocking calls in modifying the msg_queues and related - # data structures. - # Otherwise, concurrency bugs will likely occur when the - # periodic task _check_resub_all interrupts and modifies - # the same data structures. - if publisher in self.receiver_seqnums: - if table in self.receiver_seqnums[publisher]: - del self.receiver_seqnums[publisher][table] - - def receive_data_sequenced( - self, publisher, table, data, seqnum, is_snapshot=False, - receipt_time=None): - """Method called when sequenced publication data arrives.""" - # Note(thread-safety): it is important to make sure there are no - # blocking calls in modifying the msg_queues and related - # data structures. - # Otherwise, concurrency bugs will likely occur when the - # periodic task _check_resub_all interrupts and modifies - # the same data structures. - # TODO(ekcs): allow opting out of sequenced processing (per table) - def set_seqnum(): - if publisher not in self.receiver_seqnums: - self.receiver_seqnums[publisher] = {} - self.receiver_seqnums[publisher][table] = seqnum - - def clear_msg_queue(): - self._clear_msg_queue(publisher, table) - - def add_to_msg_queue(): - if publisher not in self.msg_queues: - self.msg_queues[publisher] = {} - self.oldest_queue_times[publisher] = {} - if table not in self.msg_queues[publisher]: - self.msg_queues[publisher][table] = \ - queue_package.PriorityQueue() - # set oldest queue time on first msg only, others are newer - self.oldest_queue_times[publisher][table] = receipt_time - self.msg_queues[publisher][table].put_nowait( - (seqnum, is_snapshot, data, receipt_time)) - assert self.msg_queues[publisher][table].qsize() > 0 - - def process_queued_msg(): - def update_oldest_time(): - '''Set oldest time to the receipt time of oldest item in queue. - - Called after removing the previous oldest item from a queue. - If queue is empty, corresponding oldest time is set to None. - ''' - try: - # remove and then add back to priority queue to get the - # receipt time of the next oldest message - # (peek not available in python standard library queues) - s, i, d, t = self.msg_queues[publisher][table].get_nowait() - self.msg_queues[publisher][table].put_nowait((s, i, d, t)) - self.oldest_queue_times[publisher][table] = t - except queue_package.Empty: - # set oldest time to None if queue empty - self.oldest_queue_times[publisher][table] = None - - try: - s, i, d, t = self.msg_queues[publisher][table].get_nowait() - update_oldest_time() - self.receive_data_sequenced(publisher, table, d, s, i, t) - except queue_package.Empty: - pass - except KeyError: - pass - - if receipt_time is None: - receipt_time = time.time() - - # if no seqnum process immediately - if seqnum is None: - self.receive_data(publisher, table, data, is_snapshot) - - # if first data update received on this table - elif (publisher not in self.receiver_seqnums or - table not in self.receiver_seqnums[publisher]): - if is_snapshot: - # set sequence number and process data - set_seqnum() - self.receive_data(publisher, table, data, is_snapshot) - process_queued_msg() - else: - # queue - add_to_msg_queue() - - # if re-initialization - elif seqnum == 0: # initial snapshot or reset - # set sequence number and process data - set_seqnum() - clear_msg_queue() - self.receive_data(publisher, table, data, is_snapshot) - - else: - # if seqnum is old, ignore - if seqnum <= self.receiver_seqnums[publisher][table]: - process_queued_msg() - - # if seqnum next, process all in sequence - elif seqnum == self.receiver_seqnums[publisher][table] + 1: - set_seqnum() - self.receive_data(publisher, table, data, is_snapshot) - process_queued_msg() - - # if seqnum future, queue for future - elif seqnum > self.receiver_seqnums[publisher][table] + 1: - add_to_msg_queue() - - def receive_data(self, publisher, table, data, is_snapshot=True): - """Method called when publication data arrives. - - Instances will override this method. - """ - if is_snapshot: - data = self.node.to_set_of_tuples(data) - else: - data = (self.node.to_set_of_tuples(data[0]), - self.node.to_set_of_tuples(data[1])) - - self.last_msg = {} - self.last_msg['data'] = data - self.last_msg['publisher'] = publisher - self.last_msg['table'] = table - - if not hasattr(self, 'receive_data_history'): - self.receive_data_history = [] - self.receive_data_history.append(self.last_msg) - - def subscription_list(self): - """Method that returns subscription list. - - It returns list of tuple that represents the service's subscription. - The tuple forms following format: - (service_id, table_name). - """ - result = [] - subscription = self.node.get_subscription(self.service_id) - for target, tables in subscription.items(): - result.extend([(target, t) for t in tables]) - return result - - def subscriber_list(self): - """Method that returns subscribers list. - - It returns list of services subscribed to this service data. - """ - return self.node.get_subscribers(self.service_id) - - def get_last_published_data_with_seqnum(self, table): - """Method that returns the current seqnum & data for given table.""" - if table not in self.sender_seqnums: - self.sender_seqnums[table] = 0 - self._last_published_data[table] = self.get_snapshot(table) - return (self.sender_seqnums[table], self._last_published_data[table]) - - def get_snapshot(self, table): - """Method that returns the current data for the given table. - - Should be overridden. - """ - raise NotImplementedError( - "get_snapshot is not implemented in the '%s' class." % - self.service_id) - - def check_resub_all(self): - '''Check all subscriptions for long missing update and resubscribe.''' - def check_resub(publisher, table): - if (publisher in self.oldest_queue_times and - table in self.oldest_queue_times[publisher] and - self.oldest_queue_times[publisher][table] is not None and - (time.time() - self.oldest_queue_times[publisher][table] - > cfg.CONF.dse.time_to_resub)): - self.unsubscribe(publisher, table) - self.subscribe(publisher, table) - return True - else: - return False - - copy_oldest_queue_times = copy.copy(self.oldest_queue_times) - for publisher in copy_oldest_queue_times: - copy_oldest_queue_times_publisher = copy.copy( - copy_oldest_queue_times[publisher]) - for table in copy_oldest_queue_times_publisher: - check_resub(publisher, table) - - -class DataServiceEndPoints (object): - def __init__(self, service): - self.service = service - - def get_snapshot(self, context, table): - """Function called on a node when an RPC request is sent.""" - try: - return self.service.get_snapshot(table) - except AttributeError: - pass - - def get_last_published_data_with_seqnum(self, context, table): - """Function called on a node when an RPC request is sent.""" - try: - return self.service.get_last_published_data_with_seqnum(table) - except AttributeError: - pass - - def ping(self, client_ctxt, **args): - """Echo args""" - return args diff --git a/congress/dse2/datasource_manager.py b/congress/dse2/datasource_manager.py deleted file mode 100644 index 2d83d3bd..00000000 --- a/congress/dse2/datasource_manager.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2016 . All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from oslo_db import exception as db_exc -from oslo_log import log as logging - -from congress.api import base as api_base -from congress.db import datasources as datasources_db -from congress.dse2 import data_service -from congress import exception -from congress.synchronizer import datasource_synchronizer - -LOG = logging.getLogger(__name__) - - -class DSManagerService(data_service.DataService): - """A proxy service to datasource managing methods in dse_node.""" - def __init__(self, service_id): - super(DSManagerService, self).__init__(service_id) - self.synchronizer = None - self.add_rpc_endpoint(DSManagerEndpoints(self)) - - def register_synchronizer(self): - self.synchronizer = datasource_synchronizer.DatasourceSynchronizer( - self.node) - self.synchronizer.start() - - def start(self): - super(DSManagerService, self).start() - self.register_synchronizer() - - def stop(self): - if self.synchronizer: - self.synchronizer.stop() - super(DSManagerService, self).stop() - - # Note(thread-safety): blocking function - def add_datasource(self, item, deleted=False, update_db=True): - req = self.make_datasource_dict(item) - - # check the request has valid information - self.node.validate_create_datasource(req) - if (len(req['name']) == 0 or req['name'][0] == '_'): - raise exception.InvalidDatasourceName(value=req['name']) - - new_id = req['id'] - LOG.debug("adding datasource %s", req['name']) - if update_db: - LOG.debug("updating db") - try: - # Note(thread-safety): blocking call - datasource = datasources_db.add_datasource( - id_=req['id'], - name=req['name'], - driver=req['driver'], - config=req['config'], - description=req['description'], - enabled=req['enabled']) - except db_exc.DBDuplicateEntry: - raise exception.DatasourceNameInUse(value=req['name']) - except db_exc.DBError: - LOG.exception('Creating a new datasource failed.') - raise exception.DatasourceCreationError(value=req['name']) - - new_id = datasource['id'] - try: - self.synchronizer.sync_datasource(req['name']) - # immediate synch policies on local PE if present - # otherwise wait for regularly scheduled synch - engine = self.node.service_object(api_base.ENGINE_SERVICE_ID) - if engine is not None: - engine.synchronizer.sync_one_policy(req['name']) - # TODO(dse2): also broadcast to all PE nodes to synch - except exception.DataServiceError: - LOG.exception('the datasource service is already ' - 'created in the node') - except Exception: - LOG.exception( - 'Unexpected exception encountered while registering ' - 'new datasource %s.', req['name']) - if update_db: - datasources_db.delete_datasource(req['id']) - msg = ("Datasource service: %s creation fails." % req['name']) - raise exception.DatasourceCreationError(message=msg) - - new_item = dict(item) - new_item['id'] = new_id - return self.node.make_datasource_dict(new_item) - - # Note(thread-safety): blocking function - def delete_datasource(self, datasource, update_db=True): - LOG.debug("Deleting %s datasource ", datasource['name']) - datasource_id = datasource['id'] - if update_db: - # Note(thread-safety): blocking call - result = datasources_db.delete_datasource_with_data(datasource_id) - if not result: - raise exception.DatasourceNotFound(id=datasource_id) - - # Note(thread-safety): blocking call - try: - self.synchronizer.sync_datasource(datasource['name']) - # If local PE exists.. sync - engine = self.node.service_object(api_base.ENGINE_SERVICE_ID) - if engine: - engine.synchronizer.sync_one_policy(datasource['name']) - except Exception: - msg = ('failed to synchronize_datasource after ' - 'deleting datasource: %s' % datasource_id) - LOG.exception(msg) - raise exception.DataServiceError(msg) - - -class DSManagerEndpoints(object): - - def __init__(self, service): - self.ds_manager = service - - def add_datasource(self, context, items): - return self.ds_manager.add_datasource(items) - - def delete_datasource(self, context, datasource): - return self.ds_manager.delete_datasource(datasource) diff --git a/congress/dse2/dse_node.py b/congress/dse2/dse_node.py deleted file mode 100644 index aad15d3c..00000000 --- a/congress/dse2/dse_node.py +++ /dev/null @@ -1,730 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import json -import six - - -import eventlet -eventlet.monkey_patch() # for using oslo.messaging w/ eventlet executor - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_messaging import exceptions as messaging_exceptions -from oslo_messaging.rpc import dispatcher -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import uuidutils - -from congress.datalog import compile as datalog_compile -from congress.datasources import constants -from congress.db import datasources as datasources_db -from congress.dse2 import control_bus -from congress import exception - - -LOG = logging.getLogger(__name__) - - -class DseNode(object): - """Addressable entity participating on the DSE message bus. - - The Data Services Engine (DSE) is comprised of one or more DseNode - instances that each may run one or more DataService instances. All - communication between data services uses the DseNode interface. - - Attributes: - node_id: The unique ID of this node on the DSE. - messaging_config: Configuration options for the message bus. See - oslo.messaging for more details. - node_rpc_endpoints: List of object instances exposing a remotely - invokable interface. - """ - RPC_VERSION = '1.0' - EXCHANGE = 'congress' - CONTROL_TOPIC = 'congress-control' - SERVICE_TOPIC_PREFIX = 'congress-service-' - - def node_rpc_target(self, namespace=None, server=None, fanout=False): - return messaging.Target(exchange=self.EXCHANGE, - topic=self._add_partition(self.CONTROL_TOPIC), - version=self.RPC_VERSION, - namespace=namespace, - server=server, - fanout=fanout) - - def service_rpc_target(self, service_id, namespace=None, server=None, - fanout=False): - topic = self._add_partition(self.SERVICE_TOPIC_PREFIX + service_id) - return messaging.Target(exchange=self.EXCHANGE, - topic=topic, - version=self.RPC_VERSION, - namespace=namespace, - server=server, - fanout=fanout) - - def _add_partition(self, topic, partition_id=None): - """Create a seed-specific version of an oslo-messaging topic.""" - partition_id = partition_id or self.partition_id - if partition_id is None: - return topic - return topic + "-" + str(partition_id) - - def __init__(self, messaging_config, node_id, node_rpc_endpoints, - partition_id=None): - # Note(ekcs): temporary setting to disable use of diffs and sequencing - # to avoid muddying the process of a first dse2 system test. - # TODO(ekcs,dse2): remove when differential update is standard - self.always_snapshot = False - self.messaging_config = messaging_config - self.node_id = node_id - self.node_rpc_endpoints = node_rpc_endpoints - # unique identifier shared by all nodes that can communicate - self.partition_id = partition_id or cfg.CONF.dse.bus_id or "bus" - self.node_rpc_endpoints.append(DseNodeEndpoints(self)) - self._running = False - self._services = [] - # uuid to help recognize node_id clash - self.instance = uuidutils.generate_uuid() - # TODO(dse2): add detection and logging/rectifying for node_id clash? - access_policy = dispatcher.DefaultRPCAccessPolicy - self.context = self._message_context() - self.transport = messaging.get_transport( - self.messaging_config, - allowed_remote_exmods=[exception.__name__, dispatcher.__name__, - db_exc.__name__, ]) - self._rpctarget = self.node_rpc_target(self.node_id, self.node_id) - self._rpc_server = messaging.get_rpc_server( - self.transport, self._rpctarget, self.node_rpc_endpoints, - executor='eventlet', access_policy=access_policy) - - # # keep track of what publisher/tables local services subscribe to - # subscribers indexed by publisher and table: - # {publisher_id -> - # {table_name -> set_of_subscriber_ids}} - self.subscriptions = {} - - # Note(ekcs): A little strange that _control_bus starts before self? - self._control_bus = control_bus.DseNodeControlBus(self) - self.register_service(self._control_bus) - # load configured drivers - self.loaded_drivers = self.load_drivers() - self.periodic_tasks = None - self.sync_thread = None - self.start() - - def __del__(self): - self.stop() - self.wait() - - def __repr__(self): - return self.__class__.__name__ + "<%s>" % self.node_id - - def _message_context(self): - return {'node_id': self.node_id, 'instance': str(self.instance)} - - # Note(thread-safety): blocking function - def register_service(self, service): - assert service.node is None - if self.service_object(service.service_id): - msg = ('Service %s already exsists on the node %s' - % (service.service_id, self.node_id)) - raise exception.DataServiceError(msg) - access_policy = dispatcher.DefaultRPCAccessPolicy - service.always_snapshot = self.always_snapshot - service.node = self - self._services.append(service) - service._target = self.service_rpc_target(service.service_id, - server=self.node_id) - service._rpc_server = messaging.get_rpc_server( - self.transport, service._target, service.rpc_endpoints(), - executor='eventlet', access_policy=access_policy) - - if self._running: - service.start() - - LOG.debug('<%s> Service %s RPC Server listening on %s', - self.node_id, service.service_id, service._target) - - # Note(thread-safety): blocking function - def unregister_service(self, service_id=None, uuid_=None): - """Unregister service from DseNode matching on service_id or uuid_ - - Only one should be supplied. No-op if no matching service found. - """ - LOG.debug("unregistering service %s on node %s", service_id, - self.node_id) - service = self.service_object(service_id=service_id, uuid_=uuid_) - if service is not None: - self._services.remove(service) - service.stop() - # Note(thread-safety): blocking call - service.wait() - - def get_services(self, hidden=False): - """Return all local service objects.""" - if hidden: - return self._services - return [s for s in self._services if s.service_id[0] != '_'] - - def get_global_service_names(self, hidden=False): - """Return names of all services on all nodes.""" - services = self.get_services(hidden=hidden) - local_services = [s.service_id for s in services] - # Also, check services registered on other nodes - peer_nodes = self.dse_status()['peers'] - peer_services = [] - for node in peer_nodes.values(): - peer_services.extend( - [srv['service_id'] for srv in node['services']]) - return set(local_services + peer_services) - - def service_object(self, service_id=None, uuid_=None): - """Return the service object requested. - - Search by service_id or uuid_ (only one should be supplied). - None if not found. - """ - if service_id is not None: - if uuid_ is not None: - raise TypeError('service_object() cannot accept both args ' - 'service_id and uuid_') - for s in self._services: - if s.service_id == service_id: - return s - elif uuid_ is not None: - for s in self._services: - if getattr(s, 'ds_id', None) == uuid_: - return s - else: - raise TypeError('service_object() requires service_id or ' - 'uuid_ argument, but neither is given.') - return None - - def start(self): - LOG.debug("<%s> DSE Node '%s' starting with %s sevices...", - self.node_id, self.node_id, len(self._services)) - - # Start Node RPC server - self._rpc_server.start() - LOG.debug('<%s> Node RPC Server listening on %s', - self.node_id, self._rpctarget) - - # Start Service RPC server(s) - for s in self._services: - s.start() - LOG.debug('<%s> Service %s RPC Server listening on %s', - self.node_id, s.service_id, s._target) - - self._running = True - - def stop(self): - if self._running is False: - return - - LOG.info("Stopping DSE node '%s'", self.node_id) - for s in self._services: - s.stop() - self._rpc_server.stop() - self._running = False - - # Note(thread-safety): blocking function - def wait(self): - for s in self._services: - # Note(thread-safety): blocking call - s.wait() - # Note(thread-safety): blocking call - self._rpc_server.wait() - - def dse_status(self): - """Return latest observation of DSE status.""" - return self._control_bus.dse_status() - - def is_valid_service(self, service_id): - return service_id in self.get_global_service_names(hidden=True) - - # Note(thread-safety): blocking function - def invoke_node_rpc(self, node_id, method, kwargs=None, timeout=None): - """Invoke RPC method on a DSE Node. - - Args: - node_id: The ID of the node on which to invoke the call. - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - The result of the method invocation. - - Raises: MessagingTimeout, RemoteError, MessageDeliveryFailure - """ - if kwargs is None: - kwargs = {} - target = self.node_rpc_target(server=node_id) - LOG.trace("<%s> Invoking RPC '%s' on %s", self.node_id, method, target) - client = messaging.RPCClient(self.transport, target, timeout=timeout) - return client.call(self.context, method, **kwargs) - - # Note(thread-safety): blocking function - def broadcast_node_rpc(self, method, kwargs=None): - """Invoke RPC method on all DSE Nodes. - - Args: - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - None - Methods are invoked asynchronously and results are dropped. - - Raises: RemoteError, MessageDeliveryFailure - """ - if kwargs is None: - kwargs = {} - target = self.node_rpc_target(fanout=True) - LOG.trace("<%s> Casting RPC '%s' on %s", self.node_id, method, target) - client = messaging.RPCClient(self.transport, target) - client.cast(self.context, method, **kwargs) - - # Note(thread-safety): blocking function - def invoke_service_rpc( - self, service_id, method, kwargs=None, timeout=None, local=False, - retry=None): - """Invoke RPC method on a DSE Service. - - Args: - service_id: The ID of the data service on which to invoke the call. - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - The result of the method invocation. - - Raises: MessagingTimeout, RemoteError, MessageDeliveryFailure, NotFound - """ - target = self.service_rpc_target( - service_id, server=(self.node_id if local else None)) - LOG.trace("<%s> Preparing to invoking RPC '%s' on %s", - self.node_id, method, target) - client = messaging.RPCClient(self.transport, target, timeout=timeout, - retry=retry) - if not self.is_valid_service(service_id): - try: - # First ping the destination to fail fast if unresponsive - LOG.trace("<%s> Checking responsiveness before invoking RPC " - "'%s' on %s", self.node_id, method, target) - client.prepare(timeout=cfg.CONF.dse.ping_timeout).call( - self.context, 'ping') - except (messaging_exceptions.MessagingTimeout, - messaging_exceptions.MessageDeliveryFailure): - msg = "service '%s' could not be found" - raise exception.NotFound(msg % service_id) - if kwargs is None: - kwargs = {} - try: - LOG.trace( - "<%s> Invoking RPC '%s' on %s", self.node_id, method, target) - result = client.call(self.context, method, **kwargs) - except dispatcher.NoSuchMethod: - msg = "Method %s not supported for datasource %s" - LOG.exception(msg, method, service_id) - raise exception.BadRequest(msg % (method, service_id)) - except (messaging_exceptions.MessagingTimeout, - messaging_exceptions.MessageDeliveryFailure): - msg = "Request to service '%s' timed out" - raise exception.NotFound(msg % service_id) - LOG.trace("<%s> RPC call returned: %s", self.node_id, result) - return result - - # Note(thread-safety): blocking function - def broadcast_service_rpc(self, service_id, method, kwargs=None): - """Invoke RPC method on all instances of service_id. - - Args: - service_id: The ID of the data service on which to invoke the call. - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - None - Methods are invoked asynchronously and results are dropped. - - Raises: RemoteError, MessageDeliveryFailure - """ - if kwargs is None: - kwargs = {} - if not self.is_valid_service(service_id): - msg = "service '%s' is not a registered service" - raise exception.NotFound(msg % service_id) - - target = self.service_rpc_target(service_id, fanout=True) - LOG.trace("<%s> Casting RPC '%s' on %s", self.node_id, method, target) - client = messaging.RPCClient(self.transport, target) - client.cast(self.context, method, **kwargs) - - # Note(ekcs): non-sequenced publish retained to simplify rollout of dse2 - # to be replaced by handle_publish_sequenced - # Note(thread-safety): blocking function - def publish_table(self, publisher, table, data): - """Invoke RPC method on all insances of service_id. - - Args: - service_id: The ID of the data service on which to invoke the call. - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - None - Methods are invoked asynchronously and results are dropped. - - Raises: RemoteError, MessageDeliveryFailure - """ - LOG.trace("<%s> Publishing from '%s' table %s: %s", - self.node_id, publisher, table, data) - self.broadcast_node_rpc( - "handle_publish", - {'publisher': publisher, 'table': table, 'data': data}) - - # Note(thread-safety): blocking function - def publish_table_sequenced( - self, publisher, table, data, is_snapshot, seqnum): - """Invoke RPC method on all insances of service_id. - - Args: - service_id: The ID of the data service on which to invoke the call. - method: The method name to call. - kwargs: A dict of method arguments. - - Returns: - None - Methods are invoked asynchronously and results are dropped. - - Raises: RemoteError, MessageDeliveryFailure - """ - LOG.trace("<%s> Publishing from '%s' table %s: %s", - self.node_id, publisher, table, data) - self.broadcast_node_rpc( - "handle_publish_sequenced", - {'publisher': publisher, 'table': table, - 'data': data, 'is_snapshot': is_snapshot, 'seqnum': seqnum}) - - def table_subscribers(self, publisher, table): - """List services on this node that subscribes to publisher/table.""" - return self.subscriptions.get( - publisher, {}).get(table, []) - - # Note(thread-safety): blocking function - def subscribe_table(self, subscriber, publisher, table): - """Prepare local service to receives publications from target/table.""" - # data structure: {service -> {target -> set-of-tables} - LOG.trace("subscribing %s to %s:%s", subscriber, publisher, table) - if publisher not in self.subscriptions: - self.subscriptions[publisher] = {} - if table not in self.subscriptions[publisher]: - self.subscriptions[publisher][table] = set() - self.subscriptions[publisher][table].add(subscriber) - - # oslo returns [] instead of set(), so handle that case directly - if self.always_snapshot: - # Note(thread-safety): blocking call - snapshot = self.invoke_service_rpc( - publisher, "get_snapshot", {'table': table}) - return self.to_set_of_tuples(snapshot) - else: - # Note(thread-safety): blocking call - snapshot_seqnum = self.invoke_service_rpc( - publisher, "get_last_published_data_with_seqnum", - {'table': table}) - return snapshot_seqnum - - def get_subscription(self, service_id): - """Return publisher/tables subscribed by service: service_id - - Return data structure: - {publisher_id -> set of tables} - """ - result = {} - for publisher in self.subscriptions: - for table in self.subscriptions[publisher]: - if service_id in self.subscriptions[publisher][table]: - try: - result[publisher].add(table) - except KeyError: - result[publisher] = set([table]) - return result - - def get_subscribers(self, service_id): - """List of services subscribed to this service.""" - - result = set() - tables = self.subscriptions.get(service_id, None) - if not tables: - # no subscribers - return [] - - for t in tables: - result = result | self.subscriptions[service_id][t] - - return list(result) - - def to_set_of_tuples(self, snapshot): - try: - return set([tuple(x) for x in snapshot]) - except TypeError: - return snapshot - - def unsubscribe_table(self, subscriber, publisher, table): - """Remove subscription for local service to target/table.""" - if publisher not in self.subscriptions: - return False - if table not in self.subscriptions[publisher]: - return False - self.subscriptions[publisher][table].discard(subscriber) - if len(self.subscriptions[publisher][table]) == 0: - del self.subscriptions[publisher][table] - if len(self.subscriptions[publisher]) == 0: - del self.subscriptions[publisher] - - def _update_tables_with_subscriber(self): - # not thread-safe: assumes each dseNode is single-threaded - peers = self.dse_status()['peers'] - for s in self.get_services(): - sid = s.service_id - # first, include subscriptions within the node, if any - tables_with_subs = set(self.subscriptions.get(sid, {})) - # then add subscriptions from other nodes - for peer_id in peers: - if sid in peers[peer_id]['subscribed_tables']: - tables_with_subs |= peers[ - peer_id]['subscribed_tables'][sid] - # call DataService hooks - if hasattr(s, 'on_first_subs'): - added = tables_with_subs - s._published_tables_with_subscriber - if len(added) > 0: - s.on_first_subs(added) - if hasattr(s, 'on_no_subs'): - removed = \ - s._published_tables_with_subscriber - tables_with_subs - if len(removed) > 0: - s.on_no_subs(removed) - s._published_tables_with_subscriber = tables_with_subs - - # Driver CRUD. Maybe belongs in a subclass of DseNode? - # Note(thread-safety): blocking function? - def load_drivers(self): - """Load all configured drivers and check no name conflict""" - result = {} - for driver_path in cfg.CONF.drivers: - # Note(thread-safety): blocking call? - obj = importutils.import_class(driver_path) - driver = obj.get_datasource_info() - if driver['id'] in result: - raise exception.BadConfig(_("There is a driver loaded already" - "with the driver name of %s") - % driver['id']) - driver['module'] = driver_path - result[driver['id']] = driver - return result - - def get_driver_info(self, driver_name): - driver = self.loaded_drivers.get(driver_name) - if not driver: - raise exception.DriverNotFound(id=driver_name) - return driver - - def get_drivers_info(self): - return self.loaded_drivers - - def get_driver_schema(self, drivername): - driver = self.get_driver_info(drivername) - # Note(thread-safety): blocking call? - obj = importutils.import_class(driver['module']) - return obj.get_schema() - - # Datasource CRUD. Maybe belongs in a subclass of DseNode? - # Note(thread-safety): blocking function - def get_datasource(cls, id_): - """Return the created datasource.""" - # Note(thread-safety): blocking call - result = datasources_db.get_datasource(id_) - if not result: - raise exception.DatasourceNotFound(id=id_) - return cls.make_datasource_dict(result) - - # Note(thread-safety): blocking function - def get_datasources(self, filter_secret=False): - """Return the created datasources as recorded in the DB. - - This returns what datasources the database contains, not the - datasources that this server instance is running. - """ - results = [] - for datasource in datasources_db.get_datasources(): - result = self.make_datasource_dict(datasource) - if filter_secret: - # driver_info knows which fields should be secret - driver_info = self.get_driver_info(result['driver']) - try: - for hide_field in driver_info['secret']: - result['config'][hide_field] = "" - except KeyError: - pass - results.append(result) - return results - - def delete_missing_driver_datasources(self): - removed = 0 - for datasource in datasources_db.get_datasources(): - try: - self.get_driver_info(datasource.driver) - except exception.DriverNotFound: - datasources_db.delete_datasource_with_data(datasource.id) - removed = removed+1 - LOG.debug("Datasource driver '%s' not found, deleting the " - "datasource '%s' from DB ", datasource.driver, - datasource.name) - - LOG.info("Datsource cleanup completed, removed %d datasources", - removed) - - def make_datasource_dict(self, req, fields=None): - result = {'id': req.get('id') or uuidutils.generate_uuid(), - 'name': req.get('name'), - 'driver': req.get('driver'), - 'description': req.get('description'), - 'type': None, - 'enabled': req.get('enabled', True)} - # NOTE(arosen): we store the config as a string in the db so - # here we serialize it back when returning it. - if isinstance(req.get('config'), six.string_types): - result['config'] = json.loads(req['config']) - else: - result['config'] = req.get('config') - - return self._fields(result, fields) - - def _fields(self, resource, fields): - if fields: - return dict(((key, item) for key, item in resource.items() - if key in fields)) - return resource - - def validate_create_datasource(self, req): - name = req['name'] - if not datalog_compile.string_is_servicename(name): - raise exception.InvalidDatasourceName(value=name) - driver = req['driver'] - config = req['config'] or {} - for loaded_driver in self.loaded_drivers.values(): - if loaded_driver['id'] == driver: - specified_options = set(config.keys()) - valid_options = set(loaded_driver['config'].keys()) - # Check that all the specified options passed in are - # valid configuration options that the driver exposes. - invalid_options = specified_options - valid_options - if invalid_options: - raise exception.InvalidDriverOption( - invalid_options=invalid_options) - - # check that all the required options are passed in - required_options = set( - [k for k, v in loaded_driver['config'].items() - if v == constants.REQUIRED]) - missing_options = required_options - specified_options - if missing_options: - missing_options = ', '.join(missing_options) - raise exception.MissingRequiredConfigOptions( - missing_options=missing_options) - return loaded_driver - - # If we get here no datasource driver match was found. - raise exception.InvalidDriver(driver=req) - - # Note (thread-safety): blocking function - def create_datasource_service(self, datasource): - """Create a new DataService on this node. - - :param name is the name of the service. Must be unique across all - services - :param classPath is a string giving the path to the class name, e.g. - congress.datasources.fake_datasource.FakeDataSource - :param args is the list of arguments to give the DataService - constructor - :param type_ is the kind of service - :param id_ is an optional parameter for specifying the uuid. - """ - # get the driver info for the datasource - ds_dict = self.make_datasource_dict(datasource) - if not ds_dict['enabled']: - LOG.info("datasource %s not enabled, skip loading", - ds_dict['name']) - return - - driver_info = self.get_driver_info(ds_dict['driver']) - # split class_path into module and class name - class_path = driver_info['module'] - pieces = class_path.split(".") - module_name = ".".join(pieces[:-1]) - class_name = pieces[-1] - - if ds_dict['config'] is None: - args = {'ds_id': ds_dict['id']} - else: - args = dict(ds_dict['config'], ds_id=ds_dict['id']) - kwargs = {'name': ds_dict['name'], 'args': args} - LOG.info("creating service %s with class %s and args %s", - ds_dict['name'], module_name, - strutils.mask_password(kwargs, "****")) - - # import the module - try: - # Note(thread-safety): blocking call? - module = importutils.import_module(module_name) - service = getattr(module, class_name)(**kwargs) - except Exception: - msg = ("Error loading instance of module '%s'") - LOG.exception(msg, class_path) - raise exception.DataServiceError(msg % class_path) - return service - - -class DseNodeEndpoints (object): - """Collection of RPC endpoints that the DseNode exposes on the bus. - - Must be a separate class since all public methods of a given - class are assumed to be valid RPC endpoints. - """ - - def __init__(self, dsenode): - self.node = dsenode - - # Note(ekcs): non-sequenced publish retained to simplify rollout of dse2 - # to be replaced by handle_publish_sequenced - def handle_publish(self, context, publisher, table, data): - """Function called on the node when a publication is sent. - - Forwards the publication to all of the relevant services. - """ - for s in self.node.table_subscribers(publisher, table): - self.node.service_object(s).receive_data( - publisher=publisher, table=table, data=data, is_snapshot=True) - - def handle_publish_sequenced( - self, context, publisher, table, data, is_snapshot, seqnum): - """Function called on the node when a publication is sent. - - Forwards the publication to all of the relevant services. - """ - for s in self.node.table_subscribers(publisher, table): - self.node.service_object(s).receive_data_sequenced( - publisher=publisher, table=table, data=data, seqnum=seqnum, - is_snapshot=is_snapshot) diff --git a/congress/dse2/test_control_bus.py b/congress/dse2/test_control_bus.py deleted file mode 100644 index 29c06c82..00000000 --- a/congress/dse2/test_control_bus.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import eventlet -import sys - -from oslo_config import cfg -from oslo_messaging import conffixture - -from congress.dse2 import data_service -from congress.dse2 import dse_node -from congress.tests import base - - -# For manual testing, support using rabbit driver instead of fake -USE_RABBIT = False -if len(sys.argv) > 1: - driver_flg = sys.argv[1].lower() - if driver_flg == '--rabbit': - USE_RABBIT = True - elif driver_flg != '--fake': - print("Usage: %s [--fake | --rabbit]" % sys.argv[0]) - sys.exit(1) - sys.argv[1:] = sys.argv[2:] - - -class TestControlBus(base.TestCase): - - def setUp(self): - super(TestControlBus, self).setUp() - - if USE_RABBIT: - self.messaging_config = cfg.CONF - else: - mc_fixture = conffixture.ConfFixture(cfg.CONF) - mc_fixture.conf.transport_url = 'kombu+memory://' - self.messaging_config = mc_fixture.conf - self.messaging_config.rpc_response_timeout = 1 - - def tearDown(self): - super(TestControlBus, self).tearDown() - - def test_control_bus_discovery(self): - nodes = [] - services = [] - - def _create_node_with_services(num): - nid = 'cbd_node%s' % num - nodes.append(dse_node.DseNode(self.messaging_config, nid, [])) - ns = [] - for s in range(num): - # intentionally starting different number services - ns.append(data_service.DataService('cbd-%d_svc-%d' % (num, s))) - nodes[-1].register_service(ns[-1]) - services.append(ns) - return nodes[-1] - - for i in range(3): - n = _create_node_with_services(i) - n.start() - - eventlet.sleep(.1) # Allow for heartbeat propagation - - def _validate_peer_view(node): - status = node.dse_status() - expected_peers = set([n.node_id for n in nodes - if n.node_id != node.node_id]) - peers = set(status['peers'].keys()) - self.assertEqual(peers, expected_peers, - '%s has incorrect peers list' % node.node_id) - for n in nodes: - if n.node_id == node.node_id: - continue - expected_services = [s.service_id for s in n._services] - services = [s['service_id'] - for s in status['peers'][n.node_id]['services']] - self.assertEqual(set(expected_services), set(services), - '%s has incorrect service list' - % node.node_id) - - for n in nodes: - _validate_peer_view(n) - - # Late arriving node - n = _create_node_with_services(3) - n.start() - eventlet.sleep(.1) # Allow for heartbeat propagation - for n in nodes: - _validate_peer_view(n) - - -# TODO(pballand): replace with congress unit test framework when convenient -if __name__ == '__main__': - import unittest - unittest.main(verbosity=2) diff --git a/congress/exception.py b/congress/exception.py deleted file mode 100644 index 90b8d941..00000000 --- a/congress/exception.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Congress base exception handling.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -import six - -from congress.api import error_codes -from congress import utils - - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class CongressException(Exception): - """Base Congress Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("An unknown exception occurred.") - # FIXME(thinrichs): this exception is overly complex and should - # not include HTTP codes at all. Proper fix needs to touch - # too many files that others are currently working on. - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - """FIXME(thinrichs): - - We just want name and data as fields. - :param name will be a name from error_codes, which includes the basic - message. - :param data will contain specifics for this instance of the - exception, e.g. a description error message. - """ - self.data = kwargs.get('data', None) - self.name = kwargs.get('name', None) - - # TODO(thinrichs): remove the rest of this (except the call to super) - self.kwargs = kwargs - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - if self.name is not None: - error_code = error_codes.get_num(self.name) - description = error_codes.get_desc(self.name) - message = "(%s) %s" % (error_code, description) - else: - try: - message = self.msg_fmt % kwargs - - except Exception: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.items(): - LOG.error("%s: %s", name, value) # noqa - - if CONF.fatal_exception_format_errors: - six.reraise(exc_info[0], exc_info[1], exc_info[2]) - else: - # at least get the core message out - message = self.msg_fmt - super(CongressException, self).__init__(message) - - def format_message(self): - # NOTE(mrodden): use the first argument to the python Exception object - # which should be our full CongressException message, (see __init__) - return self.args[0] - - -# FIXME(thinrichs): Get rid of the ones below and instead create exception -# classes to represent the parts of the code that generated the exception, -# e.g. datasources versus policy compiler versus policy runtime. - - -class Forbidden(CongressException): - msg_fmt = _("Not authorized.") - code = 403 - - -class Conflict(CongressException): - msg_fmt = _("Conflict") - code = 409 - - -class BadRequest(CongressException): - msg_fmt = _("Bad request") - code = 400 - - -class NotFound(CongressException): - msg_fmt = _("Resource not found.") - code = 404 - - -class PolicyNotAuthorized(Forbidden): - msg_fmt = _("Policy doesn't allow %(action)s to be performed.") - - -class InvalidParamException(Exception): - pass - - -class DataSourceConfigException(Exception): - pass - - -class DuplicateTableName(Exception): - pass - - -class InvalidTranslationType(Exception): - pass - - -class DanglingReference(Conflict): - pass - - -class LazyTable(BadRequest): - msg_fmt = _("table %(lazy_table)s is a lazy table and is not subscribed.") - - -class InvalidPolicyInput(BadRequest): - msg_fmt = _('Input policy item violates schema.') - - -# NOTE(thinrichs): The following represent different kinds of -# exceptions: the policy compiler and the policy runtime, respectively. -class PolicyException(CongressException): - def __init__(self, msg=None, obj=None, line=None, col=None, - name=None, data=None, **kwargs): - CongressException.__init__(self, message=msg, name=name, data=data) - self.obj = obj - self.location = utils.Location(line=line, col=col, obj=obj) - - def __str__(self): - s = str(self.location) - if len(s) > 0: - s = " at" + s - return CongressException.__str__(self) + s - - -class PolicyRuntimeException(CongressException): - pass - - -class DatabaseError(CongressException): - msg_fmt = _("Database backend experienced an unknown error.") - - -class IncompleteSchemaException(CongressException): - pass - - -class DataServiceError(Exception): - pass - - -class BadConfig(BadRequest): - pass - - -class DatasourceDriverException(CongressException): - pass - - -class MissingRequiredConfigOptions(BadConfig): - msg_fmt = _("Missing required config options: %(missing_options)s") - - -class InvalidDriver(BadConfig): - msg_fmt = _("Invalid driver: %(driver)s") - - -class InvalidDriverOption(BadConfig): - msg_fmt = _("Invalid driver options: %(invalid_options)s") - - -class DatasourceNameInUse(Conflict): - msg_fmt = _("Datasource already in use with name %(value)s") - - -class InvalidDatasourceName(BadConfig): - msg_fmt = _("Datasource name %(value) is invalid. Cannot be empty or " - "start with underscore. Must be valid in policy language") - - -class DatasourceNotFound(NotFound): - msg_fmt = _("Datasource not found %(id)s") - - -class DriverNotFound(NotFound): - msg_fmt = _("Driver not found %(id)s") - - -class DatasourceCreationError(BadConfig): - msg_fmt = _("Datasource could not be created on the DSE: %(value)s") diff --git a/congress/harness.py b/congress/harness.py deleted file mode 100644 index 84775bc1..00000000 --- a/congress/harness.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from congress.api import action_model -from congress.api import application -from congress.api import base as api_base -from congress.api import datasource_model -from congress.api import library_policy_model -from congress.api import policy_model -from congress.api import router -from congress.api import row_model -from congress.api import rule_model -from congress.api import schema_model -from congress.api import status_model -from congress.api.system import driver_model -from congress.api import table_model -from congress.db import datasources as db_datasources -from congress.dse2 import datasource_manager as ds_manager -from congress.dse2 import dse_node -from congress import exception -from congress.library_service import library_service -from congress.policy_engines import agnostic - -LOG = logging.getLogger(__name__) - - -def create2(node_id=None, bus_id=None, existing_node=None, - policy_engine=True, datasources=True, api=True): - """Get Congress up. - - Creates a DseNode if one is not provided and adds policy_engine, - datasources, api to that node. - - :param node_id is node_id of DseNode to be created - :param bus_id is partition_id of DseNode to be created - :param existing_node is a DseNode (optional; in lieu of previous 2 params) - :param policy_engine controls whether policy_engine is included - :param datasources controls whether datasources are included - :param api controls whether API is included - :returns DseNode - """ - # create DseNode if existing_node not given - if existing_node is None: - assert (not (node_id is None or bus_id is None)),\ - 'params node_id and bus_id required.' - node = dse_node.DseNode(cfg.CONF, node_id, [], partition_id=bus_id) - else: - assert (node_id is None and bus_id is None),\ - 'params node_id and bus_id must be None when existing_node given.' - node = existing_node - - # create services as required - services = {} - - if datasources: - LOG.info("Registering congress datasource services on node %s", - node.node_id) - services['datasources'] = create_datasources(node) - services['ds_manager'] = ds_manager.DSManagerService( - api_base.DS_MANAGER_SERVICE_ID) - node.register_service(services['ds_manager']) - - if policy_engine: - LOG.info("Registering congress PolicyEngine service on node %s", - node.node_id) - engine = create_policy_engine() - services[api_base.ENGINE_SERVICE_ID] = engine - node.register_service(engine) - initialize_policy_engine(engine) - - # NOTE(ekcs): library service does not depend on policy engine, - # it is placed on the same nodes as policy engine for convenience only - LOG.info("Registering congress policy library service on node %s", - node.node_id) - library = create_policy_library_service() - services[api_base.LIBRARY_SERVICE_ID] = library - node.register_service(library) - - if api: - LOG.info("Registering congress API service on node %s", node.node_id) - services['api'], services['api_service'] = create_api() - node.register_service(services['api_service']) - - return services - - -def create_api(): - """Return service that encapsulates api logic for DSE2.""" - # ResourceManager inherits from DataService - api_resource_mgr = application.ResourceManager() - models = create_api_models(api_resource_mgr) - router.APIRouterV1(api_resource_mgr, models) - return models, api_resource_mgr - - -def create_api_models(bus): - """Create all the API models and return as a dictionary for DSE2.""" - res = {} - res['api-library-policy'] = library_policy_model.LibraryPolicyModel( - 'api-library-policy', bus=bus) - res['api-policy'] = policy_model.PolicyModel('api-policy', bus=bus) - res['api-rule'] = rule_model.RuleModel('api-rule', bus=bus) - res['api-row'] = row_model.RowModel('api-row', bus=bus) - res['api-datasource'] = datasource_model.DatasourceModel( - 'api-datasource', bus=bus) - res['api-schema'] = schema_model.SchemaModel('api-schema', bus=bus) - res['api-table'] = table_model.TableModel('api-table', bus=bus) - res['api-status'] = status_model.StatusModel('api-status', bus=bus) - res['api-action'] = action_model.ActionsModel('api-action', bus=bus) - res['api-system'] = driver_model.DatasourceDriverModel( - 'api-system', bus=bus) - return res - - -def create_policy_engine(): - """Create policy engine and initialize it using the api models.""" - engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - engine.debug_mode() # should take this out for production - return engine - - -def initialize_policy_engine(engine): - """Initialize the policy engine using the API.""" - # Load policies from database - engine.persistent_load_policies() - engine.create_default_policies() - engine.persistent_load_rules() - - -def create_policy_library_service(): - """Create policy library service.""" - library = library_service.LibraryService(api_base.LIBRARY_SERVICE_ID) - # load library policies from file if none present in DB - if len(library.get_policies(include_rules=False)) == 0: - library.load_policies_from_files() - return library - - -def create_datasources(bus): - """Create and register datasource services .""" - if cfg.CONF.delete_missing_driver_datasources: - # congress server started with --delete-missing-driver-datasources - bus.delete_missing_driver_datasources() - - datasources = db_datasources.get_datasources() - services = [] - for ds in datasources: - LOG.info("create configured datasource service %s.", ds.name) - try: - service = bus.create_datasource_service(ds) - if service: - bus.register_service(service) - services.append(service) - except exception.DriverNotFound: - LOG.exception("Some datasources could not be loaded, start " - "congress server with " - "--delete-missing-driver-datasources option to " - "clean up stale datasources in DB.") - sys.exit(1) - except Exception: - LOG.exception("datasource %s creation failed. %s service may not " - "be running.", ds.name, ds.driver) - return services diff --git a/congress/library_service/__init__.py b/congress/library_service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/library_service/library_service.py b/congress/library_service/library_service.py deleted file mode 100644 index e4504c9f..00000000 --- a/congress/library_service/library_service.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) 2017 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import json -import jsonschema -import os -import yaml - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging - -from congress.datalog import compile -from congress.db import db_library_policies -from congress.dse2 import data_service -from congress import exception - -LOG = logging.getLogger(__name__) - - -def validate_policy_item(item): - schema_json = ''' - { - "id": "PolicyProperties", - "title": "Policy Properties", - "type": "object", - "required": ["name", "rules"], - "properties": { - "name": { - "title": "Policy unique name", - "type": "string", - "minLength": 1, - "maxLength": 255 - }, - "description": { - "title": "Policy description", - "type": "string" - }, - "kind": { - "title": "Policy kind", - "type": "string", - "enum": ["database", "nonrecursive", "action", "materialized", - "delta", "datasource"] - }, - "abbreviation": { - "title": "Policy name abbreviation", - "type": "string", - "minLength": 1, - "maxLength": 5 - }, - "rules": { - "title": "collection of rules", - "type": "array", - "items": { - "type": "object", - "properties": { - "PolicyRule": { - "title": "Policy rule", - "type": "object", - "required": ["rule"], - "properties": { - "rule": { - "title": "Rule definition following policy grammar", - "type": "string" - }, - "name": { - "title": "User-friendly name", - "type": "string" - }, - "comment": { - "title": "User-friendly comment", - "type": "string" - } - } - } - } - } - } - } - } - ''' - try: - jsonschema.validate(item, json.loads(schema_json)) - except jsonschema.exceptions.ValidationError as ve: - raise exception.InvalidPolicyInput(data=str(ve)) - - -class LibraryService (data_service.DataService): - def __init__(self, name): - data_service.DataService.__init__(self, name) - self.name = name - self.add_rpc_endpoint(DseLibraryServiceEndpoints(self)) - - def create_policy(self, policy_dict): - policy_dict = copy.deepcopy(policy_dict) - validate_policy_item(policy_dict) - policy_name = policy_dict['name'] - - # check name is valid - if not compile.string_is_servicename(policy_name): - raise exception.PolicyException( - 'name `%s` is not a valid policy name' % policy_name) - - # make defaults - if 'kind' not in policy_dict: - policy_dict['kind'] = 'nonrecursive' - if 'abbreviation' not in policy_dict: - policy_dict['abbreviation'] = policy_name[:5] - if 'description' not in policy_dict: - policy_dict['description'] = '' - - try: - # Note(thread-safety): blocking call - policy = db_library_policies.add_policy(policy_dict=policy_dict) - return policy.to_dict() - except db_exc.DBError: - LOG.exception('Creating a new library policy failed.') - raise - - def get_policies(self, include_rules=True): - return [p.to_dict(include_rules) - for p in db_library_policies.get_policies()] - - def get_policy(self, id_, include_rules=True): - # Note(thread-safety): blocking call - policy = db_library_policies.get_policy(id_) - return policy.to_dict(include_rules) - - def delete_all_policies(self): - # Note(thread-safety): blocking call - db_library_policies.delete_policies() - - def delete_policy(self, id_): - # Note(thread-safety): blocking call - db_object = db_library_policies.get_policy(id_) - db_library_policies.delete_policy(id_) - return db_object.to_dict(include_rules=True) - - def replace_policy(self, id_, policy_dict): - validate_policy_item(policy_dict) - policy_name = policy_dict['name'] - - # check name is valid - if not compile.string_is_servicename(policy_name): - raise exception.PolicyException( - "Policy name %s is not a valid service name" % policy_name) - - # make defaults - if 'kind' not in policy_dict: - policy_dict['kind'] = 'nonrecursive' - if 'abbreviation' not in policy_dict: - policy_dict['abbreviation'] = policy_name[:5] - if 'description' not in policy_dict: - policy_dict['description'] = '' - - # Note(thread-safety): blocking call - policy = db_library_policies.replace_policy( - id_, policy_dict=policy_dict) - return policy.to_dict() - - def load_policies_from_files(self): - def _load_library_policy_file(full_path): - with open(full_path, "r") as stream: - policies = yaml.load_all(stream) - count = 0 - doc_num_in_file = 0 - for policy in policies: - try: - doc_num_in_file += 1 - self.create_policy(policy) - count += 1 - except db_exc.DBDuplicateEntry: - LOG.debug( - 'Library policy %s (number %s in file %s) already ' - 'exists (likely loaded by another Congress ' - 'instance). Skipping.', - policy.get('name', '[no name]'), - doc_num_in_file, full_path) - except exception.CongressException: - LOG.exception( - 'Library policy %s could not be loaded. Skipped. ' - 'YAML reproduced here %s', - policy.get('name', '[no name]'), - yaml.dumps(policy)) - return count - file_count = 0 - policy_count = 0 - for (dirpath, dirnames, filenames) in os.walk( - cfg.CONF.policy_library_path): - for filename in filenames: - count = _load_library_policy_file( - os.path.join(dirpath, filename)) - if count > 0: - file_count += 1 - policy_count += count - LOG.debug( - '%s library policies from %s files successfully loaded', - policy_count, file_count) - - -class DseLibraryServiceEndpoints(object): - """RPC endpoints exposed by LibraryService.""" - - def __init__(self, data_service): - self.data_service = data_service - - def create_policy(self, context, policy_dict): - return self.data_service.create_policy(policy_dict) - - def get_policies(self, context, include_rules=True): - return self.data_service.get_policies(include_rules) - - def get_policy(self, context, id_, include_rules=True): - return self.data_service.get_policy(id_, include_rules) - - def delete_all_policies(self, context): - return self.data_service.delete_all_policies() - - def delete_policy(self, context, id_): - return self.data_service.delete_policy(id_) - - def replace_policy(self, context, id_, policy_dict): - return self.data_service.replace_policy(id_, policy_dict) diff --git a/congress/opts.py b/congress/opts.py deleted file mode 100644 index 760ec433..00000000 --- a/congress/opts.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2015 Huawei. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import itertools - -import congress.common.config -import congress.dse2.dse_node -import congress.exception -import congress.utils - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - congress.common.config.core_opts, - congress.utils.utils_opts, - congress.exception.exc_log_opts, - )), - ('dse', congress.common.config.dse_opts) - ] diff --git a/congress/policy_engines/__init__.py b/congress/policy_engines/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/policy_engines/agnostic.py b/congress/policy_engines/agnostic.py deleted file mode 100644 index de406799..00000000 --- a/congress/policy_engines/agnostic.py +++ /dev/null @@ -1,2355 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import time - -import eventlet -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_messaging import exceptions as messaging_exceptions -from oslo_utils import uuidutils -import six -from six.moves import range - -from congress.datalog import base -from congress.datalog import compile -from congress.datalog import database as db -from congress.datalog import materialized -from congress.datalog import nonrecursive -from congress.datalog import unify -from congress.datalog import utility -from congress.db import api as db_api -from congress.db import db_policy_rules -from congress.dse2 import data_service -from congress import exception -from congress.synchronizer import policy_rule_synchronizer -from congress import utils - -LOG = logging.getLogger(__name__) - - -class ExecutionLogger(object): - def __init__(self): - self.messages = [] - - def debug(self, msg, *args): - self.messages.append(msg % args) - - def info(self, msg, *args): - self.messages.append(msg % args) - - def warn(self, msg, *args): - self.messages.append(msg % args) - - def error(self, msg, *args): - self.messages.append(msg % args) - - def critical(self, msg, *args): - self.messages.append(msg % args) - - def content(self): - return '\n'.join(self.messages) - - def empty(self): - self.messages = [] - - -def list_to_database(atoms): - database = db.Database() - for atom in atoms: - if atom.is_atom(): - database.insert(atom) - return database - - -def string_to_database(string, theories=None): - return list_to_database(compile.parse( - string, theories=theories)) - - -############################################################################## -# Runtime -############################################################################## - -class Trigger(object): - """A chunk of code that should be run when a table's contents changes.""" - - def __init__(self, tablename, policy, callback, modal=None): - self.tablename = tablename - self.policy = policy - self.callback = callback - self.modal = modal - - def __str__(self): - return "Trigger on table=%s; policy=%s; modal=%s with callback %s." % ( - self.tablename, self.policy, self.modal, self.callback) - - -class TriggerRegistry(object): - """A collection of triggers and algorithms to analyze that collection.""" - - def __init__(self, dependency_graph): - # graph containing relationships between tables - self.dependency_graph = dependency_graph - - # set of triggers that are currently registered - self.triggers = set() - - # map from table to triggers relevant to changes for that table - self.index = {} - - def register_table(self, tablename, policy, callback, modal=None): - """Register CALLBACK to run when TABLENAME changes.""" - # TODO(thinrichs): either fix dependency graph to differentiate - # between execute[alice:p] and alice:p or reject rules - # in which both occur - trigger = Trigger(tablename, policy, callback, modal=modal) - self.triggers.add(trigger) - self._add_indexes(trigger) - LOG.info("registered trigger: %s", trigger) - return trigger - - def unregister(self, trigger): - """Unregister trigger ID.""" - self.triggers.remove(trigger) - self._delete_indexes(trigger) - - def update_dependencies(self, dependency_graph_changes=None): - """Inform registry of changes to the dependency graph. - - Changes are accounted for in self.dependency_graph, but - by giving the list of changes we can avoid recomputing - all dependencies from scratch. - """ - # TODO(thinrichs): instead of destroying the index and - # recomputing from scratch, look at the changes and - # figure out the delta. - self.index = {} - for trigger in self.triggers: - self._add_indexes(trigger) - - def _add_indexes(self, trigger): - full_table = compile.Tablename.build_service_table( - trigger.policy, trigger.tablename) - deps = self.dependency_graph.dependencies(full_table) - if deps is None: - deps = set([full_table]) - for table in deps: - if table in self.index: - self.index[table].add(trigger) - else: - self.index[table] = set([trigger]) - - def _delete_indexes(self, trigger): - full_table = compile.Tablename.build_service_table( - trigger.policy, trigger.tablename) - deps = self.dependency_graph.dependencies(full_table) - if deps is None: - deps = set([full_table]) - for table in deps: - self.index[table].discard(trigger) - - def relevant_triggers(self, events): - """Return the set of triggers that are relevant to the EVENTS. - - Each EVENT may either be a compile.Event or a tablename. - """ - table_changes = set() - for event in events: - if isinstance(event, compile.Event): - if compile.is_rule(event.formula): - table_changes |= set( - [lit.table.global_tablename(event.target) - for lit in event.formula.heads]) - else: - table_changes.add( - event.formula.table.global_tablename(event.target)) - elif isinstance(event, six.string_types): - table_changes.add(event) - triggers = set() - for table in table_changes: - if table in self.index: - triggers |= self.index[table] - return triggers - - def _index_string(self): - """Build string representation of self.index; useful for debugging.""" - s = '{' - s += ";".join(["%s -> %s" % (key, ",".join(str(x) for x in value)) - for key, value in self.index.items()]) - s += '}' - return s - - @classmethod - def triggers_by_table(cls, triggers): - """Return dictionary from tables to triggers.""" - d = {} - for trigger in triggers: - table = (trigger.tablename, trigger.policy, trigger.modal) - if table not in d: - d[table] = [trigger] - else: - d[table].append(trigger) - return d - - -class Runtime (object): - """Runtime for the Congress policy language. - - Only have one instantiation in practice, but using a - class is natural and useful for testing. - """ - - DEFAULT_THEORY = 'classification' - ACTION_THEORY = 'action' - - def __init__(self): - # tracer object - self.tracer = base.Tracer() - # record execution - self.logger = ExecutionLogger() - # collection of theories - self.theory = {} - # collection of builtin theories - self.builtin_policy_names = set() - # dependency graph for all theories - self.global_dependency_graph = ( - compile.RuleDependencyGraph()) - # triggers - self.trigger_registry = TriggerRegistry(self.global_dependency_graph) - # execution triggers - self.execution_triggers = {} - # disabled rules - self.disabled_events = [] - # rules with errors (because of schema inconsistencies) - self.error_events = [] - self.synchronizer = None - - ############################################### - # Persistence layer - ############################################### - # Note(thread-safety): blocking function - def persistent_create_policy_with_rules(self, policy_rules_obj): - rules, policy_metadata = self.persistent_insert_rules( - policy_name=policy_rules_obj['name'], - rules=policy_rules_obj['rules'], - create_policy=True, - abbr=policy_rules_obj.get('abbreviation'), - kind=policy_rules_obj.get('kind'), - desc=policy_rules_obj.get('description')) - - # remove the rule IDs - for rule in rules: - del rule['id'] - - policy_metadata['rules'] = rules - return policy_metadata - - # Note(thread-safety): blocking function - def persistent_create_policy(self, name, id_=None, abbr=None, kind=None, - desc=None, db_session=None): - # validation for name - if not compile.string_is_servicename(name): - raise exception.PolicyException( - "Policy name %s is not a valid tablename" % name) - - # Create policy object, but don't add to runtime yet, sync later - if id_ is None: - id_ = str(uuidutils.generate_uuid()) - policy_obj = self.construct_policy_obj( - name=name, abbr=abbr, kind=kind, id_=id_, desc=desc, owner='user') - - # save policy to database - if desc is None: - desc = '' - obj = {'id': policy_obj.id, - 'name': policy_obj.name, - 'owner_id': 'user', - 'description': desc, - 'abbreviation': policy_obj.abbr, - 'kind': policy_obj.kind} - try: - # Note(thread-safety): blocking function - db_policy_rules.add_policy(obj['id'], - obj['name'], - obj['abbreviation'], - obj['description'], - obj['owner_id'], - obj['kind'], - session=db_session) - except KeyError: - raise - except Exception: - policy_name = policy_obj.name - msg = "Error thrown while adding policy %s into DB." % policy_name - LOG.exception(msg) - raise exception.PolicyException(msg) - if db_session: - # stay in current transaction, previous write may not be - # readable by synchronizer - self.add_policy_obj_to_runtime(policy_obj) - else: - self.synchronizer.sync_one_policy(obj['name'], - db_session=db_session) - return obj - - # Note(thread-safety): blocking function - def persistent_delete_policy(self, name_or_id): - # Note(thread-safety): blocking call - db_object = db_policy_rules.get_policy(name_or_id) - if db_object['name'] in [self.DEFAULT_THEORY, self.ACTION_THEORY]: - raise KeyError("Cannot delete system-maintained policy %s" % - db_object['name']) - # delete policy from memory and from database - db_policy_rules.delete_policy(db_object['id']) - self.synchronizer.sync_one_policy(db_object['name']) - return db_object.to_dict() - - # Note(thread-safety): blocking function - def persistent_get_policies(self): - return [p.to_dict() for p in db_policy_rules.get_policies()] - - # Note(thread-safety): blocking function - def persistent_get_policy(self, id_): - # Note(thread-safety): blocking call - try: - policy = db_policy_rules.get_policy(id_) - return policy.to_dict() - except KeyError: - raise exception.NotFound( - 'No policy found with name or id %s' % id_) - - # Note(thread-safety): blocking function - def persistent_get_rule(self, id_, policy_name): - """Return data for rule with id_ in policy_name.""" - # Check if policy exists, else raise error - self.assert_policy_exists(policy_name) - # Note(thread-safety): blocking call - rule = db_policy_rules.get_policy_rule(id_, policy_name) - if rule is None: - return - return rule.to_dict() - - # Note(thread-safety): blocking function - def persistent_get_rules(self, policy_name): - """Return data for all rules in policy_name.""" - # Check if policy exists, else raise error - self.assert_policy_exists(policy_name) - # Note(thread-safety): blocking call - rules = db_policy_rules.get_policy_rules(policy_name) - return [rule.to_dict() for rule in rules] - - def persistent_insert_rule(self, policy_name, str_rule, rule_name, - comment): - rule_data = {'rule': str_rule, 'name': rule_name, - 'comment': comment} - return_data, _ = self.persistent_insert_rules(policy_name, [rule_data]) - return (return_data[0]['id'], return_data[0]) - - # Note(thread-safety): blocking function - # acquire lock to avoid periodic sync from undoing insert before persisted - # IMPORTANT: Be very careful to avoid deadlock when - # acquiring locks sequentially. In this case, we will acquire lock A - # then attempt to acquire lock B. We have to make sure no thread will hold - # lock B and attempt to acquire lock A, causing a deadlock - @lockutils.synchronized('congress_synchronize_policies') - @lockutils.synchronized('congress_synchronize_rules') - def persistent_insert_rules(self, policy_name, rules, create_policy=False, - id_=None, abbr=None, kind=None, desc=None): - """Insert and persists rule into policy_name.""" - - def uninsert_rules(rules_inserted): - for d in rules_inserted: - self._safe_process_policy_update( - [d['input_rule_str']], policy_name, insert=False) - - success = False # used to rollback DB if not set to success - try: - rules_to_persist = [] - return_data = [] - # get session - db_session = db_api.get_locking_session() - - # lock policy_rules table to prevent conflicting rules - # insertion (say causing unsupported recursion) - # policies and datasources tables locked because - # it's a requirement of MySQL backend to lock all accessed tables - db_api.lock_tables(session=db_session, - tables=['policy_rules', 'policies', - 'datasources']) - - if cfg.CONF.replicated_policy_engine: - # synchronize policy rules to get latest state, locked state - # non-locking version because lock already acquired, - # avoid deadlock - self.synchronizer.synchronize_rules_nonlocking( - db_session=db_session) - - # Note: it's important that this create policy is run after locking - # the policy_rules table, so as to prevent other nodes from - # inserting rules into this policy, which may be removed by an - # undo (delete the policy) later in this method - policy_metadata = None - if create_policy: - policy_metadata = self.persistent_create_policy( - id_=id_, name=policy_name, abbr=abbr, kind=kind, - desc=desc, db_session=db_session) - else: - # Reject rules inserted into non-persisted policies - # (i.e. datasource policies) - - # Note(thread-safety): blocking call - policy_name = db_policy_rules.policy_name( - policy_name, session=db_session) - # call synchronizer to make sure policy is sync'ed in memory - self.synchronizer.sync_one_policy_nonlocking( - policy_name, db_session=db_session) - # Note(thread-safety): blocking call - policies = db_policy_rules.get_policies(session=db_session) - persisted_policies = set([p.name for p in policies]) - if policy_name not in persisted_policies: - if policy_name in self.theory: - LOG.debug( - "insert_persisted_rule error: rule not permitted " - "for policy %s", policy_name) - raise exception.PolicyRuntimeException( - name='rule_not_permitted') - - rules_to_insert = [] - for rule_data in rules: - str_rule = rule_data['rule'] - rule_name = rule_data.get('name') - comment = rule_data.get('comment') - - id_ = uuidutils.generate_uuid() - try: - rule = self.parse(str_rule) - except exception.PolicyException as e: - # TODO(thinrichs): change compiler to provide these - # error_code names directly. - raise exception.PolicyException( - str(e), name='rule_syntax') - - if len(rule) == 1: - rule = rule[0] - else: - msg = ("Received multiple rules: " + - "; ".join(str(x) for x in rule)) - raise exception.PolicyRuntimeException( - msg, name='multiple_rules') - - rule.set_id(id_) - rule.set_name(rule_name) - rule.set_comment(comment or "") - rule.set_original_str(str_rule) - rules_to_insert.append(rule) - changes = self._safe_process_policy_update( - rules_to_insert, policy_name, persistent=True) - - if len(changes) > 0: - # remember the rule for possible undo - rules_inserted = [ - change_event.formula for change_event in changes] - - # remember the rule for insert into DB - rules_to_persist = [{ - 'original_str': change_event.formula.original_str, - 'id': str(change_event.formula.id), - 'comment': change_event.formula.comment, - 'name': change_event.formula.name} - for change_event in changes] - - # prepare return data based on rules inserted - return_data = [{ - 'rule': utils.pretty_rule( - change_event.formula.original_str), - 'id': str(change_event.formula.id), - 'comment': change_event.formula.comment, - 'name': change_event.formula.name} - for change_event in changes] - - # save rule to database if change actually happened. - # Note: change produced may not be equivalent to original rule - # because of column-reference elimination. - if len(rules_to_persist) == 0 and len(rules) > 0: - # change not accepted means it was already there - raise exception.PolicyRuntimeException( - name='rule_already_exists') - try: - for d in rules_to_persist: - # Note(thread-safety): blocking call - db_policy_rules.add_policy_rule( - d['id'], policy_name, d['original_str'], - d['comment'], rule_name=d['name'], session=db_session) - # do not begin to avoid implicitly releasing table - # lock due to starting new transaction - success = True - return return_data, policy_metadata - except Exception as db_exception: - try: - # un-insert all rules from engine unless all db inserts - # succeeded - # Note limitation: if an unexpected DB error is encountered - # the rule insertions into policy engine are undone, but - # may have already had effects on actions and query results - uninsert_rules(rules_inserted) - raise exception.PolicyRuntimeException( - "Error while writing to DB: %s." - % str(db_exception)) - except Exception as change_exception: - raise exception.PolicyRuntimeException( - "Error thrown during recovery from DB error. " - "Inconsistent state. DB error: %s. " - "New error: %s." % (str(db_exception), - str(change_exception))) - finally: - # commit/rollback, unlock, and close db_session - if db_session: - if success: - db_api.commit_unlock_tables(session=db_session) - else: - db_api.rollback_unlock_tables(session=db_session) - if create_policy: - # sync the potentially rolled back policy creation - self.synchronizer.sync_one_policy_nonlocking( - policy_name) - db_session.close() - - # Note(thread-safety): blocking function - def persistent_delete_rule(self, id_, policy_name_or_id): - # Note(thread-safety): blocking call - policy_name = db_policy_rules.policy_name(policy_name_or_id) - # Note(thread-safety): blocking call - item = self.persistent_get_rule(id_, policy_name) - if item is None: - raise exception.PolicyRuntimeException( - name='rule_not_exists', - data='ID: %s, policy_name: %s' % (id_, policy_name)) - rule = self.parse1(item['rule']) - self._safe_process_policy_update([rule], policy_name, insert=False) - # Note(thread-safety): blocking call - db_policy_rules.delete_policy_rule(id_) - return item - - def persistent_load_policies(self): - """Load policies from database.""" - return self.synchronizer.synchronize_all_policies() - - # Note(thread-safety): blocking function - def persistent_load_rules(self): - """Load all rules from the database.""" - # Note(thread-safety): blocking call - rules = db_policy_rules.get_policy_rules() - for rule in rules: - parsed_rule = self.parse1(rule.rule) - parsed_rule.set_id(rule.id) - parsed_rule.set_name(rule.name) - parsed_rule.set_comment(rule.comment) - parsed_rule.set_original_str(rule.rule) - self._safe_process_policy_update( - [parsed_rule], - rule.policy_name) - - def _safe_process_policy_update(self, parsed_rules, policy_name, - insert=True, persistent=False): - if policy_name not in self.theory: - raise exception.PolicyRuntimeException( - 'Policy ID %s does not exist' % policy_name, - name='policy_not_exist') - events = [compile.Event( - formula=parsed_rule, insert=insert, target=policy_name) - for parsed_rule in parsed_rules] - (permitted, changes) = self.process_policy_update( - events, persistent=persistent) - if not permitted: - raise exception.PolicyException( - ";".join([str(x) for x in changes]), - name='rule_syntax') - return changes - - def process_policy_update(self, events, persistent=False): - LOG.debug("process_policy_update %s" % events) - # body_only so that we don't subscribe to tables in the head - result = self.update(events, persistent=persistent) - return result - - ########################## - # Non-persistence layer - ########################## - - def construct_policy_obj(self, name, abbr=None, kind=None, id_=None, - desc=None, owner=None): - """Construct policy obj""" - if not isinstance(name, six.string_types): - raise KeyError("Policy name %s must be a string" % name) - if not isinstance(abbr, six.string_types): - abbr = name[0:5] - LOG.debug("Creating policy <%s> with abbr <%s> and kind <%s>", - name, abbr, kind) - if kind is None: - kind = base.NONRECURSIVE_POLICY_TYPE - else: - kind = kind.lower() - if kind == base.NONRECURSIVE_POLICY_TYPE: - PolicyClass = nonrecursive.NonrecursiveRuleTheory - elif kind == base.ACTION_POLICY_TYPE: - PolicyClass = nonrecursive.ActionTheory - elif kind == base.DATABASE_POLICY_TYPE: - PolicyClass = db.Database - elif kind == base.MATERIALIZED_POLICY_TYPE: - PolicyClass = materialized.MaterializedViewTheory - elif kind == base.DATASOURCE_POLICY_TYPE: - PolicyClass = nonrecursive.DatasourcePolicyTheory - else: - raise exception.PolicyException( - "Unknown kind of policy: %s" % kind) - policy_obj = PolicyClass(name=name, abbr=abbr, theories=self.theory, - desc=desc, owner=owner) - policy_obj.set_id(id_) - policy_obj.set_tracer(self.tracer) - return policy_obj - - def add_policy_obj_to_runtime(self, policy_obj): - """Add policy obj to runtime""" - name = policy_obj.name - if name in self.theory: - raise KeyError("Policy with name %s already exists" % name) - self.theory[name] = policy_obj - LOG.debug("Added to runtime policy <%s> with abbr <%s> and kind <%s>", - policy_obj.name, policy_obj.abbr, policy_obj.kind) - - def create_policy(self, name, abbr=None, kind=None, id_=None, - desc=None, owner=None): - """Create a new policy and add it to the runtime. - - ABBR is a shortened version of NAME that appears in - traces. KIND is the name of the datastructure used to - represent a policy. - """ - policy_obj = self.construct_policy_obj( - name, abbr, kind, id_, desc, owner) - self.add_policy_obj_to_runtime(policy_obj) - return policy_obj - - def initialize_datasource(self, name, schema): - """Initializes datasource by creating policy and setting schema. """ - try: - self.create_policy(name, kind=base.DATASOURCE_POLICY_TYPE) - except KeyError: - raise exception.DatasourceNameInUse(value=name) - try: - self.set_schema(name, schema) - except Exception: - self.delete_policy(name) - raise exception.DatasourceCreationError(value=name) - - def delete_policy(self, name_or_id, disallow_dangling_refs=False): - """Deletes policy with name NAME or throws KeyError or DanglingRefs.""" - LOG.info("Deleting policy named %s", name_or_id) - name = self._find_policy_name(name_or_id) - if disallow_dangling_refs: - refs = self._references_to_policy(name) - if refs: - refmsg = ";".join("%s: %s" % (policy, rule) - for policy, rule in refs) - raise exception.DanglingReference( - "Cannot delete %s because it would leave dangling " - "references: %s" % (name, refmsg)) - # delete the rules explicitly so cross-theory state is properly - # updated - events = [compile.Event(formula=rule, insert=False, target=name) - for rule in self.theory[name].content()] - permitted, errs = self.update(events) - if not permitted: - # This shouldn't happen - msg = ";".join(str(x) for x in errs) - LOG.exception("%s:: failed to empty theory %s: %s", - self.name, name, msg) - raise exception.PolicyException("Policy %s could not be deleted " - "since rules could not all be " - "deleted: %s" % (name, msg)) - # delete disabled rules - self.disabled_events = [event for event in self.disabled_events - if event.target != name] - # actually delete the theory - del self.theory[name] - - def rename_policy(self, oldname, newname): - """Renames policy OLDNAME to NEWNAME or raises KeyError.""" - if newname in self.theory: - raise KeyError('Cannot rename %s to %s: %s already exists' % - (oldname, newname, newname)) - try: - self.theory[newname] = self.theory[oldname] - del self.theory[oldname] - except KeyError: - raise KeyError('Cannot rename %s to %s: %s does not exist' % - (oldname, newname, oldname)) - - # TODO(thinrichs): make Runtime act like a dictionary so that we - # can iterate over policy names (keys), check if a policy exists, etc. - def assert_policy_exists(self, policy_name): - """Checks if policy exists or not. - - :param policy_name: policy name - :returns: True, if policy exists - :raises: PolicyRuntimeException, if policy doesn't exist. - """ - if policy_name not in self.theory: - raise exception.PolicyRuntimeException( - 'Policy ID %s does not exist' % policy_name, - name='policy_not_exist') - return True - - def policy_names(self): - """Returns list of policy names.""" - return list(self.theory.keys()) - - def policy_object(self, name=None, id=None): - """Return policy by given name. Raises KeyError if does not exist.""" - assert name or id - if name: - try: - if not id or str(self.theory[name].id) == str(id): - return self.theory[name] - except KeyError: - raise KeyError("Policy with name %s and id %s does not " - "exist" % (name, str(id))) - elif id: - for n in self.policy_names(): - if str(self.theory[n].id) == str(id): - return self.theory[n] - raise KeyError("Policy with name %s and id %s does not " - "exist" % (name, str(id))) - - def policy_type(self, name): - """Return type of policy NAME. Throws KeyError if does not exist.""" - return self.policy_object(name).kind - - def set_schema(self, name, schema, complete=False): - """Set the schema for module NAME to be SCHEMA.""" - # TODO(thinrichs): handle the case of a schema being UPDATED, - # not just being set for the first time - if name not in self.theory: - raise exception.CongressException( - "Cannot set policy for %s because it has not been created" % - name) - if self.theory[name].schema and len(self.theory[name].schema) > 0: - raise exception.CongressException( - "Schema for %s already set" % name) - self.theory[name].schema = compile.Schema(schema, complete=complete) - enabled, disabled, errs = self._process_limbo_events( - self.disabled_events) - self.disabled_events = disabled - self.error_events.extend(errs) - for event in enabled: - permitted, errors = self._update_obj_datalog([event]) - if not permitted: - self.error_events.append((event, errors)) - - def _create_status_dict(self, target, keys): - result = {} - - for k in keys: - attr = getattr(target, k, None) - if attr is not None: - result[k] = attr - - return result - - def get_status(self, source_id, params): - try: - if source_id in self.policy_names(): - target = self.policy_object(name=source_id) - else: - target = self.policy_object(id=source_id) - - keys = ['name', 'id'] - - if 'rule_id' in params: - target = target.get_rule(str(params['rule_id'])) - keys.extend(['comment', 'original_str']) - - except KeyError: - msg = ("policy with name or id '%s' doesn't exist" % source_id) - LOG.exception(msg) - raise exception.NotFound(msg) - - return self._create_status_dict(target, keys) - - def select(self, query, target=None, trace=False): - """Event handler for arbitrary queries. - - Returns the set of all instantiated QUERY that are true. - """ - if isinstance(query, six.string_types): - return self._select_string(query, self.get_target(target), trace) - elif isinstance(query, tuple): - return self._select_tuple(query, self.get_target(target), trace) - else: - return self._select_obj(query, self.get_target(target), trace) - - def initialize_tables(self, tablenames, facts, target=None): - """Event handler for (re)initializing a collection of tables - - @facts must be an iterable containing compile.Fact objects. - """ - target_theory = self.get_target(target) - alltables = set([compile.Tablename.build_service_table( - target_theory.name, x) - for x in tablenames]) - triggers = self.trigger_registry.relevant_triggers(alltables) - LOG.info("relevant triggers (init): %s", - ";".join(str(x) for x in triggers)) - # run queries on relevant triggers *before* applying changes - table_triggers = self.trigger_registry.triggers_by_table(triggers) - table_data_old = self._compute_table_contents(table_triggers) - # actually apply the updates - target_theory.initialize_tables(tablenames, facts) - # rerun the trigger queries to check for changes - table_data_new = self._compute_table_contents(table_triggers) - # run triggers if tables changed - for table, triggers in table_triggers.items(): - if table_data_old[table] != table_data_new[table]: - for trigger in triggers: - trigger.callback(table, - table_data_old[table], - table_data_new[table]) - - def insert(self, formula, target=None): - """Event handler for arbitrary insertion (rules and facts).""" - if isinstance(formula, six.string_types): - return self._insert_string(formula, target) - elif isinstance(formula, tuple): - return self._insert_tuple(formula, target) - else: - return self._insert_obj(formula, target) - - def delete(self, formula, target=None): - """Event handler for arbitrary deletion (rules and facts).""" - if isinstance(formula, six.string_types): - return self._delete_string(formula, target) - elif isinstance(formula, tuple): - return self._delete_tuple(formula, target) - else: - return self._delete_obj(formula, target) - - def update(self, sequence, target=None, persistent=False): - """Event handler for applying an arbitrary sequence of insert/deletes. - - If TARGET is supplied, it overrides the targets in SEQUENCE. - """ - if isinstance(sequence, six.string_types): - return self._update_string(sequence, target, persistent) - else: - return self._update_obj(sequence, target, persistent) - - def policy(self, target=None): - """Event handler for querying policy.""" - target = self.get_target(target) - if target is None: - return "" - return " ".join(str(p) for p in target.policy()) - - def content(self, target=None): - """Event handler for querying content().""" - target = self.get_target(target) - if target is None: - return "" - return " ".join(str(p) for p in target.content()) - - def simulate(self, query, theory, sequence, action_theory, delta=False, - trace=False, as_list=False): - """Event handler for simulation. - - :param query is a string/object to query after - :param theory is the policy to query - :param sequence is a string/iter of updates to state/policy or actions - :param action_theory is the policy that contains action descriptions - :param delta indicates whether to return *changes* to query caused by - sequence - :param trace indicates whether to include a string description of the - implementation. When True causes the return value to be the - tuple (result, trace). - :param as_list controls whether the result is forced to be a list of - answers - Returns a list of instances of query. If query/sequence are strings - the query instance list is a single string (unless as_list is True - in which case the query instance list is a list of strings). If - query/sequence are objects then the query instance list is a list - of objects. - - The computation of a query given an action sequence. That sequence - can include updates to atoms, updates to rules, and action - invocations. Returns a collection of Literals (as a string if the - query and sequence are strings or as a Python collection otherwise). - If delta is True, the return is a collection of Literals where - each tablename ends with either + or - to indicate whether - that fact was added or deleted. - Example atom update: q+(1) or q-(1) - Example rule update: p+(x) :- q(x) or p-(x) :- q(x) - Example action invocation: - create_network(17), options:value(17, "name", "net1") :- true - """ - assert self.get_target(theory) is not None, "Theory must be known" - assert self.get_target(action_theory) is not None, ( - "Action theory must be known") - if (isinstance(query, six.string_types) and - isinstance(sequence, six.string_types)): - return self._simulate_string(query, theory, sequence, - action_theory, delta, trace, as_list) - else: - return self._simulate_obj(query, theory, sequence, action_theory, - delta, trace) - - def get_tablename(self, source_id, table_id): - tables = self.get_tablenames(source_id) - # when the policy doesn't have any rule 'tables' is set([]) - # when the policy doesn't exist 'tables' is None - if tables and table_id in tables: - return table_id - - def get_tablenames(self, source_id): - if source_id in self.theory.keys(): - return self.tablenames(theory_name=source_id, include_modal=False) - - def get_row_data(self, table_id, source_id, trace=False): - # source_id is the policy name. But it needs to stay 'source_id' - # since RPC calls invoke by the name of the argument, and we're - # currently assuming the implementations of get_row_data in - # the policy engine, datasources, and datasource manager all - # use the same argument names. - policy_name = source_id - tablename = self.get_tablename(policy_name, table_id) - if not tablename: - raise exception.NotFound("table '%s' doesn't exist" % table_id) - - queries = self.table_contents_queries(tablename, policy_name) - if queries is None: - m = "Known table but unknown arity for '%s' in policy '%s'" % ( - tablename, policy_name) - LOG.error(m) - raise exception.CongressException(m) - - gen_trace = None - query = self.parse1(queries[0]) - # LOG.debug("query: %s", query) - result = self.select(query, target=policy_name, - trace=trace) - if trace: - literals = result[0] - gen_trace = result[1] - else: - literals = result - # should NOT need to convert to set -- see bug 1344466 - literals = frozenset(literals) - # LOG.info("results: %s", '\n'.join(str(x) for x in literals)) - results = [] - for lit in literals: - d = {} - d['data'] = [arg.name for arg in lit.arguments] - results.append(d) - - if trace: - return results, gen_trace - else: - return results - - def tablenames(self, body_only=False, include_builtin=False, - theory_name=None, include_modal=True): - """Return tablenames occurring in some theory.""" - tables = set() - - if theory_name: - th = self.theory.get(theory_name, None) - if th: - tables |= set(th.tablenames(body_only=body_only, - include_builtin=include_builtin, - include_modal=include_modal, - include_facts=True)) - return tables - - for th in self.theory.values(): - tables |= set(th.tablenames(body_only=body_only, - include_builtin=include_builtin, - include_modal=include_modal, - include_facts=True)) - return tables - - def reserved_tablename(self, name): - return name.startswith('___') - - def table_contents_queries(self, tablename, policy, modal=None): - """Return list of queries yielding contents of TABLENAME in POLICY.""" - # TODO(thinrichs): Handle case of multiple arities. Connect to API. - arity = self.arity(tablename, policy, modal) - if arity is None: - return - args = ["x" + str(i) for i in range(0, arity)] - atom = tablename + "(" + ",".join(args) + ")" - if modal is None: - return [atom] - else: - return [modal + "[" + atom + "]"] - - def register_trigger(self, tablename, callback, policy=None, modal=None): - """Register CALLBACK to run when table TABLENAME changes.""" - # calling self.get_target_name to check if policy actually exists - # and to resolve None to a policy name - return self.trigger_registry.register_table( - tablename, self.get_target_name(policy), callback, modal=modal) - - def unregister_trigger(self, trigger): - """Unregister CALLBACK for table TABLENAME.""" - return self.trigger_registry.unregister(trigger) - - def arity(self, table, theory, modal=None): - """Return number of columns for TABLE in THEORY. - - TABLE can include the policy name. :
- THEORY is the name of the theory we are asking. - MODAL is the value of the modal, if any. - """ - arity = self.get_target(theory).arity(table, modal) - if arity is not None: - return arity - policy, tablename = compile.Tablename.parse_service_table(table) - if policy not in self.theory: - return - return self.theory[policy].arity(tablename, modal) - - def find_subpolicy(self, required_tables, prohibited_tables, - output_tables, target=None): - """Return a subset of rules in @theory. - - @required_tables is the set of tablenames that a rule must depend on. - @prohibited_tables is the set of tablenames that a rule must - NOT depend on. - @output_tables is the set of tablenames that all rules must support. - """ - target = self.get_target(target) - if target is None: - return - subpolicy = compile.find_subpolicy( - target.content(), - required_tables, - prohibited_tables, - output_tables) - return " ".join(str(p) for p in subpolicy) - - ########################################## - # Implementation of Non-persistence layer - ########################################## - # Arguments that are strings are suffixed with _string. - # All other arguments are instances of Theory, Literal, etc. - - ################################### - # Implementation: updates - - # insert: convenience wrapper around Update - def _insert_string(self, policy_string, theory_string): - policy = self.parse(policy_string) - return self._update_obj( - [compile.Event(formula=x, insert=True, target=theory_string) - for x in policy], - theory_string) - - def _insert_tuple(self, iter, theory_string): - return self._insert_obj(compile.Literal.create_from_iter(iter), - theory_string) - - def _insert_obj(self, formula, theory_string): - return self._update_obj([compile.Event(formula=formula, insert=True, - target=theory_string)], - theory_string) - - # delete: convenience wrapper around Update - def _delete_string(self, policy_string, theory_string): - policy = self.parse(policy_string) - return self._update_obj( - [compile.Event(formula=x, insert=False, target=theory_string) - for x in policy], - theory_string) - - def _delete_tuple(self, iter, theory_string): - return self._delete_obj(compile.Literal.create_from_iter(iter), - theory_string) - - def _delete_obj(self, formula, theory_string): - return self._update_obj([compile.Event(formula=formula, insert=False, - target=theory_string)], - theory_string) - - # update - def _update_string(self, events_string, theory_string, persistent=False): - assert False, "Not yet implemented--need parser to read events" - - def _update_obj(self, events, theory_string, persistent=False): - """Apply events. - - Checks if applying EVENTS is permitted and if not - returns a list of errors. If it is permitted, it - applies it and then returns a list of changes. - In both cases, the return is a 2-tuple (if-permitted, list). - Note: All event.target fields are the NAMES of theories, not - theory objects. theory_string is the default theory. - """ - errors = [] - # resolve event targets and check that they actually exist - for event in events: - if event.target is None: - event.target = theory_string - try: - event.target = self.get_target_name(event.target) - except exception.PolicyException as e: - errors.append(e) - if len(errors) > 0: - return (False, errors) - # eliminate column refs where possible - enabled, disabled, errs = self._process_limbo_events( - events, persistent) - for err in errs: - errors.extend(err[1]) - if len(errors) > 0: - return (False, errors) - # continue updating and if successful disable the rest - permitted, extra = self._update_obj_datalog(enabled) - if not permitted: - return permitted, extra - self._disable_events(disabled) - return (True, extra) - - def _disable_events(self, events): - """Take collection of insert events and disable them. - - Assume that events.theory is an object. - """ - self.disabled_events.extend(events) - - def _process_limbo_events(self, events, persistent=False): - """Assume that events.theory is an object. - - Return (, , ) - where is a list of (event, err-list). - """ - disabled = [] - enabled = [] - errors = [] - for event in events: - try: - oldformula = event.formula - event.formula = \ - oldformula.eliminate_column_references_and_pad_positional( - self.theory, default_theory=event.target) - # doesn't copy over ID since it creates a new one - event.formula.set_id(oldformula.id) - enabled.append(event) - - errs = compile.check_schema_consistency( - event.formula, self.theory, event.target) - if len(errs) > 0: - errors.append((event, errs)) - continue - except exception.IncompleteSchemaException as e: - if persistent: - # FIXME(ekcs): inconsistent behavior? - # persistent_insert with 'unknown:p(x)' allowed but - # 'unknown:p(colname=x)' disallowed - raise exception.PolicyException(str(e), name='rule_syntax') - else: - disabled.append(event) - except exception.PolicyException as e: - errors.append((event, [e])) - return enabled, disabled, errors - - def _update_obj_datalog(self, events): - """Do the updating. - - Checks if applying EVENTS is permitted and if not - returns a list of errors. If it is permitted, it - applies it and then returns a list of changes. - In both cases, the return is a 2-tuple (if-permitted, list). - Note: All event.target fields are the NAMES of theories, not - theory objects, and all event.formula fields have - had all column references removed. - """ - # TODO(thinrichs): look into whether we can move the bulk of the - # trigger code into Theory, esp. so that MaterializedViewTheory - # can implement it more efficiently. - self.table_log(None, "Updating with %s", utility.iterstr(events)) - errors = [] - # eliminate noop events - events = self._actual_events(events) - if not len(events): - return (True, []) - # check that the updates would not cause an error - by_theory = self._group_events_by_target(events) - for th, th_events in by_theory.items(): - th_obj = self.get_target(th) - errors.extend(th_obj.update_would_cause_errors(th_events)) - if len(errors) > 0: - return (False, errors) - # update dependency graph (and undo it if errors) - graph_changes = self.global_dependency_graph.formula_update( - events, include_atoms=False) - if graph_changes: - if self.global_dependency_graph.has_cycle(): - # TODO(thinrichs): include path - errors.append(exception.PolicyException( - "Rules are recursive")) - self.global_dependency_graph.undo_changes(graph_changes) - if len(errors) > 0: - return (False, errors) - # modify execution triggers - self._maintain_triggers() - # figure out relevant triggers - triggers = self.trigger_registry.relevant_triggers(events) - LOG.info("relevant triggers (update): %s", - ";".join(str(x) for x in triggers)) - # signal trigger registry about graph updates - self.trigger_registry.update_dependencies(graph_changes) - - # run queries on relevant triggers *before* applying changes - table_triggers = self.trigger_registry.triggers_by_table(triggers) - table_data_old = self._compute_table_contents(table_triggers) - # actually apply the updates - changes = [] - for th, th_events in by_theory.items(): - changes.extend(self.get_target(th).update(events)) - # rerun the trigger queries to check for changes - table_data_new = self._compute_table_contents(table_triggers) - # run triggers if tables changed - for table, triggers in table_triggers.items(): - if table_data_old[table] != table_data_new[table]: - for trigger in triggers: - trigger.callback(table, - table_data_old[table], - table_data_new[table]) - # return non-error and the list of changes - return (True, changes) - - def _maintain_triggers(self): - pass - - def _actual_events(self, events): - actual = [] - for event in events: - th_obj = self.get_target(event.target) - actual.extend(th_obj.actual_events([event])) - return actual - - def _compute_table_contents(self, table_policy_pairs): - data = {} # dict from (table, policy) to set of query results - for table, policy, modal in table_policy_pairs: - th = self.get_target(policy) - queries = self.table_contents_queries(table, policy, modal) or [] - data[(table, policy, modal)] = set() - for query in queries: - ans = set(self._select_obj(self.parse1(query), th, False)) - data[(table, policy, modal)] |= ans - return data - - def _group_events_by_target(self, events): - """Return mapping of targets and events. - - Return a dictionary mapping event.target to the list of events - with that target. Assumes each event.target is a string. - Returns a dictionary from event.target to . - """ - by_target = {} - for event in events: - if event.target not in by_target: - by_target[event.target] = [event] - else: - by_target[event.target].append(event) - return by_target - - def _reroute_events(self, events): - """Events re-routing. - - Given list of events with different event.target values, - change each event.target so that the events are routed to the - proper place. - """ - by_target = self._group_events_by_target(events) - for target, target_events in by_target.items(): - newth = self._compute_route(target_events, target) - for event in target_events: - event.target = newth - - def _references_to_policy(self, name): - refs = [] - name = name + ":" - for th_obj in self.theory.values(): - for rule in th_obj.policy(): - if any(table.startswith(name) for table in rule.tablenames()): - refs.append((name, rule)) - return refs - - ########################## - # Implementation: queries - - # select - def _select_string(self, policy_string, theory, trace): - policy = self.parse(policy_string) - assert (len(policy) == 1), ( - "Queries can have only 1 statement: {}".format( - [str(x) for x in policy])) - results = self._select_obj(policy[0], theory, trace) - if trace: - return (compile.formulas_to_string(results[0]), results[1]) - else: - return compile.formulas_to_string(results) - - def _select_tuple(self, tuple, theory, trace): - return self._select_obj(compile.Literal.create_from_iter(tuple), - theory, trace) - - def _select_obj(self, query, theory, trace): - if trace: - old_tracer = self.get_tracer() - tracer = base.StringTracer() # still LOG.debugs trace - tracer.trace('*') # trace everything - self.set_tracer(tracer) - value = set(theory.select(query)) - self.set_tracer(old_tracer) - return (value, tracer.get_value()) - return set(theory.select(query)) - - # simulate - def _simulate_string(self, query, theory, sequence, action_theory, delta, - trace, as_list): - query = self.parse(query) - if len(query) > 1: - raise exception.PolicyException( - "Query %s contained more than 1 rule" % query) - query = query[0] - sequence = self.parse(sequence) - result = self._simulate_obj(query, theory, sequence, action_theory, - delta, trace) - if trace: - actual_result = result[0] - else: - actual_result = result - strresult = [str(x) for x in actual_result] - if not as_list: - strresult = " ".join(strresult) - if trace: - return (strresult, result[1]) - else: - return strresult - - def _simulate_obj(self, query, theory, sequence, action_theory, delta, - trace): - """Simulate objects. - - Both THEORY and ACTION_THEORY are names of theories. - Both QUERY and SEQUENCE are parsed. - """ - assert compile.is_datalog(query), "Query must be formula" - # Each action is represented as a rule with the actual action - # in the head and its supporting data (e.g. options) in the body - assert all(compile.is_extended_datalog(x) for x in sequence), ( - "Sequence must be an iterable of Rules") - th_object = self.get_target(theory) - - if trace: - old_tracer = self.get_tracer() - tracer = base.StringTracer() # still LOG.debugs trace - tracer.trace('*') # trace everything - self.set_tracer(tracer) - - # if computing delta, query the current state - if delta: - self.table_log(query.tablename(), - "** Simulate: Querying %s", query) - oldresult = th_object.select(query) - self.table_log(query.tablename(), - "Original result of %s is %s", - query, utility.iterstr(oldresult)) - - # apply SEQUENCE - self.table_log(query.tablename(), "** Simulate: Applying sequence %s", - utility.iterstr(sequence)) - undo = self.project(sequence, theory, action_theory) - - # query the resulting state - self.table_log(query.tablename(), "** Simulate: Querying %s", query) - result = set(th_object.select(query)) - self.table_log(query.tablename(), "Result of %s is %s", query, - utility.iterstr(result)) - # rollback the changes - self.table_log(query.tablename(), "** Simulate: Rolling back") - self.project(undo, theory, action_theory) - - # if computing the delta, do it - if delta: - result = set(result) - oldresult = set(oldresult) - pos = result - oldresult - neg = oldresult - result - pos = [formula.make_update(is_insert=True) for formula in pos] - neg = [formula.make_update(is_insert=False) for formula in neg] - result = pos + neg - if trace: - self.set_tracer(old_tracer) - return (result, tracer.get_value()) - return result - - # Helpers - - def _react_to_changes(self, changes): - """Filters changes and executes actions contained therein.""" - # LOG.debug("react to: %s", iterstr(changes)) - actions = self.get_action_names() - formulas = [change.formula for change in changes - if (isinstance(change, compile.Event) - and change.is_insert() - and change.formula.is_atom() - and change.tablename() in actions)] - # LOG.debug("going to execute: %s", iterstr(formulas)) - self.execute(formulas) - - def _data_listeners(self): - return [self.theory[self.ENFORCEMENT_THEORY]] - - def _compute_route(self, events, theory): - """Compute rerouting. - - When a formula is inserted/deleted (in OPERATION) into a THEORY, - it may need to be rerouted to another theory. This function - computes that rerouting. Returns a Theory object. - """ - self.table_log(None, "Computing route for theory %s and events %s", - theory.name, utility.iterstr(events)) - # Since Enforcement includes Classify and Classify includes Database, - # any operation on data needs to be funneled into Enforcement. - # Enforcement pushes it down to the others and then - # reacts to the results. That is, we really have one big theory - # Enforcement + Classify + Database as far as the data is concerned - # but formulas can be inserted/deleted into each policy individually. - if all([compile.is_atom(event.formula) for event in events]): - if (theory is self.theory[self.CLASSIFY_THEORY] or - theory is self.theory[self.DATABASE]): - return self.theory[self.ENFORCEMENT_THEORY] - return theory - - def project(self, sequence, policy_theory, action_theory): - """Apply the list of updates SEQUENCE. - - Apply the list of updates SEQUENCE, where actions are described - in ACTION_THEORY. Return an update sequence that will undo the - projection. - - SEQUENCE can include atom insert/deletes, rule insert/deletes, - and action invocations. Projecting an action only - simulates that action's invocation using the action's description; - the results are therefore only an approximation of executing - actions directly. Elements of SEQUENCE are just formulas - applied to the given THEORY. They are NOT Event()s. - - SEQUENCE is really a program in a mini-programming - language--enabling results of one action to be passed to another. - Hence, even ignoring actions, this functionality cannot be achieved - by simply inserting/deleting. - """ - actth = self.theory[action_theory] - policyth = self.theory[policy_theory] - # apply changes to the state - newth = nonrecursive.NonrecursiveRuleTheory(abbr="Temp") - newth.tracer.trace('*') - actth.includes.append(newth) - # TODO(thinrichs): turn 'includes' into an object that guarantees - # there are no cycles through inclusion. Otherwise we get - # infinite loops - if actth is not policyth: - actth.includes.append(policyth) - actions = self.get_action_names(action_theory) - self.table_log(None, "Actions: %s", utility.iterstr(actions)) - undos = [] # a list of updates that will undo SEQUENCE - self.table_log(None, "Project: %s", sequence) - last_results = [] - for formula in sequence: - self.table_log(None, "** Updating with %s", formula) - self.table_log(None, "Actions: %s", utility.iterstr(actions)) - self.table_log(None, "Last_results: %s", - utility.iterstr(last_results)) - tablename = formula.tablename() - if tablename not in actions: - if not formula.is_update(): - raise exception.PolicyException( - "Sequence contained non-action, non-update: " + - str(formula)) - updates = [formula] - else: - self.table_log(tablename, "Projecting %s", formula) - # define extension of current Actions theory - if formula.is_atom(): - assert formula.is_ground(), ( - "Projection atomic updates must be ground") - assert not formula.is_negated(), ( - "Projection atomic updates must be positive") - newth.define([formula]) - else: - # instantiate action using prior results - newth.define(last_results) - self.table_log(tablename, "newth (with prior results) %s", - utility.iterstr(newth.content())) - bindings = actth.top_down_evaluation( - formula.variables(), formula.body, find_all=False) - if len(bindings) == 0: - continue - grounds = formula.plug_heads(bindings[0]) - grounds = [act for act in grounds if act.is_ground()] - assert all(not lit.is_negated() for lit in grounds) - newth.define(grounds) - self.table_log(tablename, - "newth contents (after action insertion): %s", - utility.iterstr(newth.content())) - # self.table_log(tablename, "action contents: %s", - # iterstr(actth.content())) - # self.table_log(tablename, "action.includes[1] contents: %s", - # iterstr(actth.includes[1].content())) - # self.table_log(tablename, "newth contents: %s", - # iterstr(newth.content())) - # compute updates caused by action - updates = actth.consequences(compile.is_update) - updates = self.resolve_conflicts(updates) - updates = unify.skolemize(updates) - self.table_log(tablename, "Computed updates: %s", - utility.iterstr(updates)) - # compute results for next time - for update in updates: - newth.insert(update) - last_results = actth.consequences(compile.is_result) - last_results = set([atom for atom in last_results - if atom.is_ground()]) - # apply updates - for update in updates: - undo = self.project_updates(update, policy_theory) - if undo is not None: - undos.append(undo) - undos.reverse() - if actth is not policyth: - actth.includes.remove(policyth) - actth.includes.remove(newth) - return undos - - def project_updates(self, delta, theory): - """Project atom/delta rule insertion/deletion. - - Takes an atom/rule DELTA with update head table - (i.e. ending in + or -) and inserts/deletes, respectively, - that atom/rule into THEORY after stripping - the +/-. Returns None if DELTA had no effect on the - current state. - """ - theory = delta.theory_name() or theory - - self.table_log(None, "Applying update %s to %s", delta, theory) - th_obj = self.theory[theory] - insert = delta.tablename().endswith('+') - newdelta = delta.drop_update().drop_theory() - changed = th_obj.update([compile.Event(formula=newdelta, - insert=insert)]) - if changed: - return delta.invert_update() - else: - return None - - def resolve_conflicts(self, atoms): - """If p+(args) and p-(args) are present, removes the p-(args).""" - neg = set() - result = set() - # split atoms into NEG and RESULT - for atom in atoms: - if atom.table.table.endswith('+'): - result.add(atom) - elif atom.table.table.endswith('-'): - neg.add(atom) - else: - result.add(atom) - # add elems from NEG only if their inverted version not in RESULT - for atom in neg: - if atom.invert_update() not in result: # slow: copying ATOM here - result.add(atom) - return result - - def parse(self, string): - return compile.parse(string, theories=self.theory) - - def parse1(self, string): - return compile.parse1(string, theories=self.theory) - - ########################## - # Helper functions - ########################## - - def get_target(self, name): - if name is None: - if len(self.theory) == 1: - name = next(iter(self.theory)) - elif len(self.theory) == 0: - raise exception.PolicyException("No policies exist.") - else: - raise exception.PolicyException( - "Must choose a policy to operate on") - if name not in self.theory: - raise exception.PolicyException("Unknown policy " + str(name)) - return self.theory[name] - - def _find_policy_name(self, name_or_id): - """Given name or ID, return the name of the policy or KeyError.""" - if name_or_id in self.theory: - return name_or_id - for th in self.theory.values(): - if th.id == name_or_id: - return th.name - raise KeyError("Policy %s could not be found" % name_or_id) - - def get_target_name(self, name): - """Resolve NAME to the name of a proper policy (even if it is None). - - Raises PolicyException there is no such policy. - """ - return self.get_target(name).name - - def get_action_names(self, target): - """Return a list of the names of action tables.""" - if target not in self.theory: - return [] - actionth = self.theory[target] - actions = actionth.select(self.parse1('action(x)')) - return [action.arguments[0].name for action in actions] - - def table_log(self, table, msg, *args): - self.tracer.log(table, "RT : %s" % msg, *args) - - def set_tracer(self, tracer): - if isinstance(tracer, base.Tracer): - self.tracer = tracer - for th in self.theory: - self.theory[th].set_tracer(tracer) - else: - self.tracer = tracer[0] - for th, tracr in tracer[1].items(): - if th in self.theory: - self.theory[th].set_tracer(tracr) - - def get_tracer(self): - """Return (Runtime's tracer, dict of tracers for each theory). - - Useful so we can temporarily change tracing. - """ - d = {} - for th in self.theory: - d[th] = self.theory[th].get_tracer() - return (self.tracer, d) - - def debug_mode(self): - tracer = base.Tracer() - tracer.trace('*') - self.set_tracer(tracer) - - def production_mode(self): - tracer = base.Tracer() - self.set_tracer(tracer) - - -############################################################################## -# ExperimentalRuntime -############################################################################## - -class ExperimentalRuntime (Runtime): - def explain(self, query, tablenames=None, find_all=False, target=None): - """Event handler for explanations. - - Given a ground query and a collection of tablenames - that we want the explanation in terms of, - return proof(s) that the query is true. If - FIND_ALL is True, returns list; otherwise, returns single proof. - """ - if isinstance(query, six.string_types): - return self.explain_string( - query, tablenames, find_all, self.get_target(target)) - elif isinstance(query, tuple): - return self.explain_tuple( - query, tablenames, find_all, self.get_target(target)) - else: - return self.explain_obj( - query, tablenames, find_all, self.get_target(target)) - - def remediate(self, formula): - """Event handler for remediation.""" - if isinstance(formula, six.string_types): - return self.remediate_string(formula) - elif isinstance(formula, tuple): - return self.remediate_tuple(formula) - else: - return self.remediate_obj(formula) - - def execute(self, action_sequence): - """Event handler for execute: - - Execute a sequence of ground actions in the real world. - """ - if isinstance(action_sequence, six.string_types): - return self.execute_string(action_sequence) - else: - return self.execute_obj(action_sequence) - - def access_control(self, action, support=''): - """Event handler for making access_control request. - - ACTION is an atom describing a proposed action instance. - SUPPORT is any data that should be assumed true when posing - the query. Returns True iff access is granted. - """ - # parse - if isinstance(action, six.string_types): - action = self.parse1(action) - assert compile.is_atom(action), "ACTION must be an atom" - if isinstance(support, six.string_types): - support = self.parse(support) - # add support to theory - newth = nonrecursive.NonrecursiveRuleTheory(abbr="Temp") - newth.tracer.trace('*') - for form in support: - newth.insert(form) - acth = self.theory[self.ACCESSCONTROL_THEORY] - acth.includes.append(newth) - # check if action is true in theory - result = len(acth.select(action, find_all=False)) > 0 - # allow new theory to be freed - acth.includes.remove(newth) - return result - - # explain - def explain_string(self, query_string, tablenames, find_all, theory): - policy = self.parse(query_string) - assert len(policy) == 1, "Queries can have only 1 statement" - results = self.explain_obj(policy[0], tablenames, find_all, theory) - return compile.formulas_to_string(results) - - def explain_tuple(self, tuple, tablenames, find_all, theory): - self.explain_obj(compile.Literal.create_from_iter(tuple), - tablenames, find_all, theory) - - def explain_obj(self, query, tablenames, find_all, theory): - return theory.explain(query, tablenames, find_all) - - # remediate - def remediate_string(self, policy_string): - policy = self.parse(policy_string) - assert len(policy) == 1, "Queries can have only 1 statement" - return compile.formulas_to_string(self.remediate_obj(policy[0])) - - def remediate_tuple(self, tuple, theory): - self.remediate_obj(compile.Literal.create_from_iter(tuple)) - - def remediate_obj(self, formula): - """Find a collection of action invocations - - That if executed result in FORMULA becoming false. - """ - actionth = self.theory[self.ACTION_THEORY] - classifyth = self.theory[self.CLASSIFY_THEORY] - # look at FORMULA - if compile.is_atom(formula): - pass # TODO(tim): clean up unused variable - # output = formula - elif compile.is_regular_rule(formula): - pass # TODO(tim): clean up unused variable - # output = formula.head - else: - assert False, "Must be a formula" - # grab a single proof of FORMULA in terms of the base tables - base_tables = classifyth.base_tables() - proofs = classifyth.explain(formula, base_tables, False) - if proofs is None: # FORMULA already false; nothing to be done - return [] - # Extract base table literals that make that proof true. - # For remediation, we assume it suffices to make any of those false. - # (Leaves of proof may not be literals or may not be written in - # terms of base tables, despite us asking for base tables-- - # because of negation.) - leaves = [leaf for leaf in proofs[0].leaves() - if (compile.is_atom(leaf) and - leaf.table in base_tables)] - self.table_log(None, "Leaves: %s", utility.iterstr(leaves)) - # Query action theory for abductions of negated base tables - actions = self.get_action_names() - results = [] - for lit in leaves: - goal = lit.make_positive() - if lit.is_negated(): - goal.table = goal.table + "+" - else: - goal.table = goal.table + "-" - # return is a list of goal :- act1, act2, ... - # This is more informative than query :- act1, act2, ... - for abduction in actionth.abduce(goal, actions, False): - results.append(abduction) - return results - - ########################## - # Execute actions - - def execute_string(self, actions_string): - self.execute_obj(self.parse(actions_string)) - - def execute_obj(self, actions): - """Executes the list of ACTION instances one at a time. - - For now, our execution is just logging. - """ - LOG.debug("Executing: %s", utility.iterstr(actions)) - assert all(compile.is_atom(action) and action.is_ground() - for action in actions) - action_names = self.get_action_names() - assert all(action.table in action_names for action in actions) - for action in actions: - if not action.is_ground(): - if self.logger is not None: - self.logger.warn("Unground action to execute: %s", action) - continue - if self.logger is not None: - self.logger.info("%s", action) - -############################################################################## -# Engine that operates on the DSE -############################################################################## - - -class PolicySubData (object): - def __init__(self, trigger): - self.table_trigger = trigger - self.to_add = () - self.to_rem = () - self.dataindex = trigger.policy + ":" + trigger.tablename - - def trigger(self): - return self.table_trigger - - def changes(self): - result = [] - for row in self.to_add: - event = compile.Event(formula=row, insert=True) - result.append(event) - for row in self.to_rem: - event = compile.Event(formula=row, insert=False) - result.append(event) - return result - - -class DseRuntime (Runtime, data_service.DataService): - def __init__(self, name): - Runtime.__init__(self) - data_service.DataService.__init__(self, name) - self.name = name - self.msg = None - self.last_policy_change = None - self.policySubData = {} - self.log_actions_only = cfg.CONF.enable_execute_action - self.add_rpc_endpoint(DseRuntimeEndpoints(self)) - - def set_synchronizer(self): - obj = policy_rule_synchronizer.PolicyRuleSynchronizer(self, self.node) - self.synchronizer = obj - - def start(self): - super(DseRuntime, self).start() - self.set_synchronizer() - if self.synchronizer is not None: - self.synchronizer.start() - - def extend_schema(self, service_name, schema): - newschema = {} - for key, value in schema: - newschema[service_name + ":" + key] = value - super(DseRuntime, self).extend_schema(self, newschema) - - def receive_policy_update(self, msg): - LOG.debug("received policy-update msg %s", - utility.iterstr(msg.body.data)) - # update the policy and subscriptions to data tables. - self.last_policy_change = self.process_policy_update(msg.body.data) - - def process_policy_update(self, events, persistent=False): - LOG.debug("process_policy_update %s" % events) - # body_only so that we don't subscribe to tables in the head - oldtables = self.tablenames(body_only=True) - result = Runtime.process_policy_update(self, events, - persistent=persistent) - newtables = self.tablenames(body_only=True) - self.update_table_subscriptions(oldtables, newtables) - return result - - def initialize_table_subscriptions(self): - """Initialize table subscription. - - Once policies have all been loaded, this function subscribes to - all the necessary tables. See UPDATE_TABLE_SUBSCRIPTIONS as well. - """ - self.update_table_subscriptions(set(), self.tablenames()) - - def update_table_subscriptions(self, oldtables, newtables): - """Update table subscription. - - Change the subscriptions from OLDTABLES to NEWTABLES, ensuring - to load all the appropriate services. - """ - add = newtables - oldtables - rem = oldtables - newtables - LOG.debug("Tables:: Old: %s, new: %s, add: %s, rem: %s", - oldtables, newtables, add, rem) - # subscribe to the new tables (loading services as required) - for table in add: - if not self.reserved_tablename(table): - (service, tablename) = compile.Tablename.parse_service_table( - table) - if service is not None: - LOG.debug("Subscribing to new (service, table): (%s, %s)", - service, tablename) - self.subscribe(service, tablename) - - # unsubscribe from the old tables - for table in rem: - (service, tablename) = compile.Tablename.parse_service_table(table) - if service is not None: - LOG.debug("Unsubscribing to new (service, table): (%s, %s)", - service, tablename) - self.unsubscribe(service, tablename) - - # Note(thread-safety): blocking function - def execute_action(self, service_name, action, action_args): - """Event handler for action execution. - - :param service_name: openstack service to perform the action on, - e.g. 'nova', 'neutron' - :param action: action to perform on service, e.g. an API call - :param action_args: positional-args and named-args in format: - {'positional': ['p_arg1', 'p_arg2'], - 'named': {'name1': 'n_arg1', 'name2': 'n_arg2'}}. - """ - if not self.log_actions_only: - LOG.info("action %s is called with args %s on %s, but " - "current configuration doesn't allow Congress to " - "execute any action.", action, action_args, service_name) - return - - # Log the execution - LOG.info("%s:: executing: %s:%s on %s", - self.name, service_name, action, action_args) - if self.logger is not None: - pos_args = '' - if 'positional' in action_args: - pos_args = ", ".join(str(x) for x in action_args['positional']) - named_args = '' - if 'named' in action_args: - named_args = ", ".join( - "%s=%s" % (key, val) - for key, val in action_args['named'].items()) - delimit = '' - if pos_args and named_args: - delimit = ', ' - self.logger.info( - "Executing %s:%s(%s%s%s)", - service_name, action, pos_args, delimit, named_args) - - # execute the action on a service in the DSE - if not self.service_exists(service_name): - raise exception.PolicyException( - "Service %s not found" % service_name) - if not action: - raise exception.PolicyException("Action not found") - LOG.info("Sending request(%s:%s), args = %s", - service_name, action, action_args) - # Note(thread-safety): blocking call - self._rpc(service_name, action, args=action_args) - - def pub_policy_result(self, table, olddata, newdata): - """Callback for policy table triggers.""" - LOG.debug("grabbing policySubData[%s]", table) - policySubData = self.policySubData[table] - policySubData.to_add = newdata - olddata - policySubData.to_rem = olddata - newdata - LOG.debug("Table Data:: Old: %s, new: %s, add: %s, rem: %s", - olddata, newdata, policySubData.to_add, policySubData.to_rem) - - # TODO(dse2): checks needed that all literals are facts - # TODO(dse2): should we support modals and other non-fact literals? - # convert literals to rows for dse2 - newdata = [lit.argument_names() for lit in newdata] - self.publish(policySubData.dataindex, newdata) - - def get_snapshot(self, table_name): - # print("agnostic policy engine get_snapshot(%s); %s" % ( - # table_name, self.policySubData[table])) - (policy, tablename) = compile.Tablename.parse_service_table(table_name) - data = self.get_row_data(tablename, policy, trace=False) - data = [record['data'] for record in data] - return data - - def prepush_processor(self, data, dataindex, type=None): - """Called before push. - - Takes as input the DATA that the receiver needs and returns - the payload for the message. If this is a regular publication - message, make the payload just the delta; otherwise, make the - payload the entire table. - """ - # This routine basically ignores DATA and sends a delta - # of policy table (i.e. dataindex) changes part of the state. - LOG.debug("prepush_processor: dataindex <%s> data: %s", dataindex, - data) - # if not a regular publication, just return the original data - if type != 'pub': - LOG.debug("prepush_processor: returned original data") - if type == 'sub' and data is None: - # Always want to send initialization of [] - return [] - return data - # grab deltas to publish to subscribers - (policy, tablename) = compile.Tablename.parse_service_table(dataindex) - result = self.policySubData[(tablename, policy, None)].changes() - if len(result) == 0: - # Policy engine expects an empty update to be an init msg - # So if delta is empty, return None, which signals - # the message should not be sent. - result = None - text = "None" - else: - text = utility.iterstr(result) - LOG.debug("prepush_processor for <%s> returning with %s items", - dataindex, text) - return result - - def _maintain_triggers(self): - # ensure there is a trigger registered to execute actions - curr_tables = set(self.global_dependency_graph.tables_with_modal( - 'execute')) - # add new triggers - for table in curr_tables: - LOG.debug("%s:: checking for missing trigger table %s", - self.name, table) - if table not in self.execution_triggers: - (policy, tablename) = compile.Tablename.parse_service_table( - table) - LOG.debug("creating new trigger for policy=%s, table=%s", - policy, tablename) - trig = self.trigger_registry.register_table( - tablename, policy, - lambda table, old, new: self._execute_table( - policy, tablename, old, new), - modal='execute') - self.execution_triggers[table] = trig - # remove triggers no longer needed - # Using copy of execution_trigger keys so we can delete inside loop - for table in self.execution_triggers.copy().keys(): - LOG.debug("%s:: checking for stale trigger table %s", - self.name, table) - if table not in curr_tables: - LOG.debug("removing trigger for table %s", table) - try: - self.trigger_registry.unregister( - self.execution_triggers[table]) - del self.execution_triggers[table] - except KeyError: - LOG.exception( - "Tried to unregister non-existent trigger: %s", table) - - # Note(thread-safety): blocking function - def _execute_table(self, theory, table, old, new): - # LOG.info("execute_table(theory=%s, table=%s, old=%s, new=%s", - # theory, table, ";".join(str(x) for x in old), - # ";".join(str(x) for x in new)) - service, tablename = compile.Tablename.parse_service_table(table) - service = service or theory - for newlit in new - old: - args = [term.name for term in newlit.arguments] - LOG.info("%s:: on service %s executing %s on %s", - self.name, service, tablename, args) - try: - # Note(thread-safety): blocking call - self.execute_action(service, tablename, {'positional': args}) - except exception.PolicyException as e: - LOG.error(str(e)) - - def stop(self): - if self.synchronizer: - self.synchronizer.stop() - super(DseRuntime, self).stop() - - # eventually we should remove the action theory as a default, - # but we need to update the docs and tutorials - def create_default_policies(self): - # check the DB before creating policy instead of in-mem - policy = db_policy_rules.get_policy_by_name(self.DEFAULT_THEORY) - if policy is None: - self.persistent_create_policy(name=self.DEFAULT_THEORY, - desc='default policy') - - policy = db_policy_rules.get_policy_by_name(self.ACTION_THEORY) - if policy is None: - self.persistent_create_policy(name=self.ACTION_THEORY, - kind=base.ACTION_POLICY_TYPE, - desc='default action policy') - - # Note(thread-safety): blocking function - def _rpc(self, service_name, action, args): - """Overloading the DseRuntime version of _rpc so it uses dse2.""" - # TODO(ramineni): This is called only during execute_action, added - # the same function name for compatibility with old arch - - retry_rpc = cfg.CONF.dse.execute_action_retry - args = {'action': action, 'action_args': args, 'wait': retry_rpc} - - def execute_once(): - return self.rpc(service_name, 'request_execute', args, - timeout=cfg.CONF.dse.long_timeout, retry=0) - - def execute_retry(): - timeout = cfg.CONF.dse.execute_action_retry_timeout - start_time = time.time() - end_time = start_time + timeout - while timeout <= 0 or time.time() < end_time: - try: - return self.rpc( - service_name, 'request_execute', args, - timeout=cfg.CONF.dse.long_timeout, retry=0) - except (messaging_exceptions.MessagingTimeout, - messaging_exceptions.MessageDeliveryFailure): - LOG.warning('DSE failure executing action %s with ' - 'arguments %s. Retrying.', - action, args['action_args']) - LOG.error('Failed to executing action %s with arguments %s', - action, args['action_args']) - - # long timeout for action execution because actions can take a while - if not retry_rpc: - # Note(thread-safety): blocking call - # Only when thread pool at capacity - eventlet.spawn_n(execute_once) - eventlet.sleep(0) - else: - # Note(thread-safety): blocking call - # Only when thread pool at capacity - eventlet.spawn_n(execute_retry) - eventlet.sleep(0) - - def service_exists(self, service_name): - return self.is_valid_service(service_name) - - def receive_data(self, publisher, table, data, is_snapshot=False): - """Event handler for when a dataservice publishes data. - - That data can either be the full table (as a list of tuples) - or a delta (a list of Events). - """ - LOG.debug("received data msg for %s:%s", publisher, table) - if not is_snapshot: - to_add = data[0] - to_del = data[1] - result = [] - for row in to_del: - formula = compile.Literal.create_from_table_tuple(table, row) - event = compile.Event(formula=formula, insert=False) - result.append(event) - for row in to_add: - formula = compile.Literal.create_from_table_tuple(table, row) - event = compile.Event(formula=formula, insert=True) - result.append(event) - self.receive_data_update(publisher, table, result) - return - - # if empty data, assume it is an init msg, since noop otherwise - if len(data) == 0: - self.receive_data_full(publisher, table, data) - else: - # grab an item from any iterable - dataelem = next(iter(data)) - if isinstance(dataelem, compile.Event): - self.receive_data_update(publisher, table, data) - else: - self.receive_data_full(publisher, table, data) - - def receive_data_full(self, publisher, table, data): - """Handler for when dataservice publishes full table.""" - LOG.debug("received full data msg for %s:%s. %s", - publisher, table, utility.iterstr(data)) - # Use a generator to avoid instantiating all these Facts at once. - facts = (compile.Fact(table, row) for row in data) - self.initialize_tables([table], facts, target=publisher) - - def receive_data_update(self, publisher, table, data): - """Handler for when dataservice publishes a delta.""" - LOG.debug("received update data msg for %s:%s: %s", - publisher, table, utility.iterstr(data)) - events = data - for event in events: - assert compile.is_atom(event.formula), ( - "receive_data_update received non-atom: " + - str(event.formula)) - # prefix tablename with data source - event.target = publisher - (permitted, changes) = self.update(events) - if not permitted: - raise exception.CongressException( - "Update not permitted." + '\n'.join(str(x) for x in changes)) - else: - LOG.debug("update data msg for %s from %s caused %d " - "changes: %s", table, publisher, len(changes), - utility.iterstr(changes)) - if table in self.theory[publisher].tablenames(): - rows = self.theory[publisher].content([table]) - LOG.debug("current table: %s", utility.iterstr(rows)) - - def on_first_subs(self, tables): - """handler for policy table subscription - - when a previously non-subscribed table gains a subscriber, register a - trigger for the tables and publish table results when there is - updates. - """ - for table in tables: - (policy, tablename) = compile.Tablename.parse_service_table( - table) - # we only care about policy table subscription - if policy is None: - return - - if not (tablename, policy, None) in self.policySubData: - trig = self.trigger_registry.register_table( - tablename, - policy, - self.pub_policy_result) - self.policySubData[ - (tablename, policy, None)] = PolicySubData(trig) - - def on_no_subs(self, tables): - """Remove triggers when tables have no subscribers.""" - for table in tables: - (policy, tablename) = compile.Tablename.parse_service_table(table) - if (tablename, policy, None) in self.policySubData: - # release resource if no one cares about it any more - sub = self.policySubData.pop((tablename, policy, None)) - self.trigger_registry.unregister(sub.trigger()) - return True - - def set_schema(self, name, schema, complete=False): - old_tables = self.tablenames(body_only=True) - super(DseRuntime, self).set_schema(name, schema, complete) - new_tables = self.tablenames(body_only=True) - self.update_table_subscriptions(old_tables, new_tables) - - -class DseRuntimeEndpoints(object): - """RPC endpoints exposed by DseRuntime.""" - - def __init__(self, dse): - self.dse = dse - - # Note(thread-safety): blocking function - def persistent_create_policy(self, context, name=None, id_=None, - abbr=None, kind=None, desc=None): - # Note(thread-safety): blocking call - return self.dse.persistent_create_policy(name, id_, abbr, kind, desc) - - # Note(thread-safety): blocking function - def persistent_create_policy_with_rules(self, context, policy_rules_obj): - # Note(thread-safety): blocking call - return self.dse.persistent_create_policy_with_rules(policy_rules_obj) - - # Note(thread-safety): blocking function - def persistent_delete_policy(self, context, name_or_id): - # Note(thread-safety): blocking call - return self.dse.persistent_delete_policy(name_or_id) - - # Note(thread-safety): blocking function - def persistent_get_policies(self, context): - # Note(thread-safety): blocking call - return self.dse.persistent_get_policies() - - # Note(thread-safety): blocking function - def persistent_get_policy(self, context, id_): - # Note(thread-safety): blocking call - return self.dse.persistent_get_policy(id_) - - # Note(thread-safety): blocking function - def persistent_get_rule(self, context, id_, policy_name): - # Note(thread-safety): blocking call - return self.dse.persistent_get_rule(id_, policy_name) - - # Note(thread-safety): blocking function - def persistent_get_rules(self, context, policy_name): - # Note(thread-safety): blocking call - return self.dse.persistent_get_rules(policy_name) - - # Note(thread-safety): blocking function - def persistent_insert_rule(self, context, policy_name, str_rule, rule_name, - comment): - # Note(thread-safety): blocking call - return self.dse.persistent_insert_rule( - policy_name, str_rule, rule_name, comment) - - # Note(thread-safety): blocking function - def persistent_delete_rule(self, context, id_, policy_name_or_id): - # Note(thread-safety): blocking call - return self.dse.persistent_delete_rule(id_, policy_name_or_id) - - # Note(thread-safety): blocking function - def persistent_load_policies(self, context): - # Note(thread-safety): blocking call - return self.dse.persistent_load_policies() - - def simulate(self, context, query, theory, sequence, action_theory, - delta=False, trace=False, as_list=False): - return self.dse.simulate(query, theory, sequence, action_theory, - delta, trace, as_list) - - def get_tablename(self, context, source_id, table_id): - return self.dse.get_tablename(source_id, table_id) - - def get_tablenames(self, context, source_id): - return self.dse.get_tablenames(source_id) - - def get_status(self, context, source_id, params): - return self.dse.get_status(source_id, params) - - def get_row_data(self, context, table_id, source_id, trace=False): - return self.dse.get_row_data(table_id, source_id, trace) - - # Note(thread-safety): blocking function - def execute_action(self, context, service_name, action, action_args): - # Note(thread-safety): blocking call - return self.dse.execute_action(service_name, action, action_args) - - def delete_policy(self, context, name, disallow_dangling_refs=False): - return self.dse.delete_policy(name, disallow_dangling_refs) - - def initialize_datasource(self, context, name, schema): - return self.dse.initialize_datasource(name, schema) - - def synchronize_policies(self, context): - return self.dse.synchronizer.synchronize_all_policies() - - def sync_one_policy(self, context, policy_name): - return self.dse.synchronizer.sync_one_policy(policy_name) diff --git a/congress/policy_engines/base_driver.py b/congress/policy_engines/base_driver.py deleted file mode 100644 index 6d472d0b..00000000 --- a/congress/policy_engines/base_driver.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.dse import deepsix - - -class PolicyEngineDriver(deepsix.deepSix): - """Driver for policy engines, analogous to DataSourceDriver.""" - - def set_policy(self, policy): - """Set the policy of this policy engine. POLICY is a datalog string.""" - return NotImplementedError - - def supported_language(self): - """Return description of the language supported by this engine. - - A description is a list of permitted tables and a list of forbidden - tables. Eventually we may broaden the description. - """ - return NotImplementedError diff --git a/congress/policy_engines/vm_placement.py b/congress/policy_engines/vm_placement.py deleted file mode 100644 index 814ac334..00000000 --- a/congress/policy_engines/vm_placement.py +++ /dev/null @@ -1,675 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import subprocess -import time - -from oslo_log import log as logging -import pulp - -from congress.datalog import arithmetic_solvers -from congress.datalog import base -from congress.datalog import compile -from congress.datalog import nonrecursive -from congress import exception -from congress.policy_engines import base_driver - -LOG = logging.getLogger(__name__) - - -def d6service(name, keys, inbox, datapath, args): - """This method is called by d6cage to create a dataservice instance.""" - return ComputePlacementEngine(name, keys, inbox, datapath, args) - - -# TODO(thinrichs): Figure out what to move to the base class PolicyEngineDriver -# Could also pull out the Datalog-to-LP conversion, potentially. -class ComputePlacementEngine(base_driver.PolicyEngineDriver): - def __init__(self, name='', keys='', inbox=None, datapath=None, args=None): - super(ComputePlacementEngine, self).__init__( - name, keys, inbox, datapath) - self.policy = nonrecursive.MultiModuleNonrecursiveRuleTheory(name=name) - self.initialized = True - self.guest_host_assignment = {} - self.lplang = arithmetic_solvers.PulpLpLang() - self.vm_migrator = VmMigrator() - - ########################### - # Policy engine interface - - def insert(self, formula): - return self.policy.insert(self.parse1(formula)) - - def delete(self, formula): - return self.policy.delete(self.parse1(formula)) - - def select(self, query): - ans = self.policy.select(self.parse1(query)) - return " ".join(str(x) for x in ans) - - def set_policy(self, policy): - LOG.info("%s:: setting policy to %s", str(self.name), str(policy)) - # empty out current policy - external = [compile.Tablename.build_service_table(service, name) - for service, name in self._current_external_tables()] - self.policy.empty(tablenames=external, invert=True) - - # insert new policy and subscribe to the tablenames referencing a - # datasource driver - for rule in self.parse(policy): - self.policy.insert(rule) - LOG.info("new policy: %s", self.policy.content_string()) - - # initialize table subscriptions - self.initialize_table_subscriptions() - - # enforce policy - self.enforce_policy() - - def initialize_table_subscriptions(self): - """Initialize table subscription. - - Once policies have all been loaded, this function subscribes to - all the necessary tables. See UPDATE_TABLE_SUBSCRIPTIONS as well. - """ - tablenames = self.policy.tablenames() - tablenames = [compile.Tablename.parse_service_table(table) - for table in tablenames] - tablenames = [(service, name) for (service, name) in tablenames - if service is not None] - self._set_subscriptions(tablenames) - - def _set_subscriptions(self, tablenames): - """Update subscriptions on DSE to be exactly @tablenames.""" - subscriptions = set(self._current_external_tables()) - tablenames = set(tablenames) - toadd = tablenames - subscriptions - torem = subscriptions - tablenames - for service, tablename in toadd: - if service is not None: - LOG.info("%s:: subscribing to (%s, %s)", - self.name, service, tablename) - self.subscribe(service, tablename, - callback=self.receive_data) - - for service, tablename in torem: - if service is not None: - LOG.info("%s:: unsubscribing from (%s, %s)", - self.name, service, tablename) - self.unsubscribe(service, tablename) - relevant_tables = [compile.Tablename.build_service_table( - service, tablename)] - self.policy.empty(relevant_tables) - - def _current_external_tables(self): - """Return list of tables engine is currently subscribed to.""" - return [(value.key, value.dataindex) - for value in self.subdata.values()] - - ################################################################ - # Receiving data published on the DSE by other services - # For PoC, assuming all data already present and no pubs. - # So we're ignoring this for now. - - def receive_data(self, msg): - """Event handler for when a dataservice publishes data. - - That data can either be the full table (as a list of tuples) - or a delta (a list of Events). - """ - LOG.info("%s:: received data msg %s", self.name, msg) - # if empty data, assume it is an init msg, since noop otherwise - if len(msg.body.data) == 0: - self.receive_data_full(msg) - else: - # grab an item from any iterable - dataelem = next(iter(msg.body.data)) - if isinstance(dataelem, compile.Event): - self.receive_data_update(msg) - else: - self.receive_data_full(msg) - self.enforce_policy() - - def receive_data_full(self, msg): - """Handler for when dataservice publishes full table.""" - LOG.info("%s:: received full data msg for %s: %s", - self.name, msg.header['dataindex'], - ";".join(str(x) for x in msg.body.data)) - tablename = compile.Tablename.build_service_table( - msg.replyTo, msg.header['dataindex']) - - # Use a generator to avoid instantiating all these Facts at once. - # Don't print out 'literals' since that will eat the generator - literals = (compile.Fact(tablename, row) for row in msg.body.data) - - LOG.info("%s:: begin initialize_tables %s", self.name, tablename) - self.policy.initialize_tables([tablename], literals) - LOG.info("%s:: end initialize data msg for %s", self.name, tablename) - select = [str(x) for x in self.select('p(x)')] - LOG.info("%s:: select('p(x)'): %s ENDED", self.name, " ".join(select)) - - def receive_data_update(self, msg): - """Handler for when dataservice publishes a delta.""" - LOG.info("%s:: received update data msg for %s: %s", - self.name, msg.header['dataindex'], - ";".join(str(x) for x in msg.body.data)) - new_events = [] - for event in msg.body.data: - assert compile.is_atom(event.formula), ( - "receive_data_update received non-atom: " + - str(event.formula)) - # prefix tablename with data source - actual_table = compile.Tablename.build_service_table( - msg.replyTo, event.formula.table.table) - values = [term.name for term in event.formula.arguments] - newevent = compile.Event(compile.Fact(actual_table, values), - insert=event.insert) - new_events.append(newevent) - (permitted, changes) = self.policy.update(new_events) - if not permitted: - raise exception.CongressException( - "Update not permitted." + '\n'.join(str(x) for x in changes)) - else: - tablename = msg.header['dataindex'] - service = msg.replyTo - LOG.debug("update data msg for %s from %s caused %d " - "changes: %s", tablename, service, len(changes), - ";".join(str(x) for x in changes)) - - ####################################### - # Policy enforcement - - def enforce_policy(self): - """Enforce policy by migrating VMs to minimize warnings. - - Raises LpProblemUnsolvable if the LP cannot solve the - given problem. - - Raises LpConversionFailure if self.policy cannot be converted - into an LP problem. - """ - LOG.info("Enforcing policy") - ans = self.policy.select(self.parse1('warning(x)'), True) - if len(ans) == 0: - return - # grab assignment - g_h_assignment = self.calculate_vm_assignment() - self.guest_host_assignment = dict(g_h_assignment) - # migrate - for guest in g_h_assignment: - g_h_assignment[guest] = [g_h_assignment[guest], 0] - self.vm_migrator.do_migrations(g_h_assignment) - - def calculate_vm_assignment(self): - """Calculate where VMs should be located in order to minimize warnings. - - Returns a dictionary from guest ID to host ID where that guest should - be located. - - Raises LpProblemUnsolvable if the LP cannot solve the - given problem. - - Raises LpConversionFailure if self.policy cannot be converted - into an LP problem. - """ - - g_h_assignment = {} - LOG.info("* Calculating VM assignment for Datalog policy: *") - LOG.info(self.policy.content_string()) - migproblem, value_mapping = self.policy_to_lp_problem() - LOG.info("* Converted to PuLP program: *") - LOG.info("problem: %s", migproblem) - migproblem.solve() - LOG.info("problem status: %s", migproblem.status) - if pulp.LpStatus[migproblem.status] == 'Optimal': - LOG.info("value-mapping: %s", value_mapping) - for var in migproblem.variables(): - LOG.info("var: %s = %s", var.name, var.varValue) - if var.name.startswith('assign'): - g, h = var.name.lstrip('assign').lstrip('_').split('_') - g = value_mapping.get(int(g), g) - h = value_mapping.get(int(h), h) - LOG.info("guest %s, host %s has value %s", - g, h, var.varValue) - if var.varValue == 1.0: - # add correct old host - g_h_assignment[g] = h - - return g_h_assignment - raise LpProblemUnsolvable(str(migproblem)) - - ####################################### - # Toplevel conversion of Datalog to LP - - # mapping Datalog tables to LP decision variables - - def policy_to_lp_problem(self): - """Return an LP problem representing the state of this engine. - - Returns an instance of self.lplang.problem representing the policy - and the current data of this engine. - """ - opt, hard = self.policy_to_lp() - LOG.info("* Converted Datalog policy to DatalogLP *") - LOG.info("optimization:\n%s", opt) - LOG.info("constraints:\n%s", "\n".join(str(x) for x in hard)) - bounds = {} - for exp in hard: - self.set_bounds(exp, bounds) - return self.lplang.problem(opt, hard, bounds) - - def policy_to_lp(self): - """Transform self.policy into a (non-)linear programming problem. - - Returns (, ) where - each are represented using expressions constructed by self.lplang. - """ - # soft constraints. optimization criteria: minimize number of warnings - # LOG.info("* Converting warning(x) to DatalogLP *") - wquery = self.parse1('warning(x)') - warnings, wvars = self.datalog_to_lp(wquery, []) - opt = self.lplang.makeOr(*wvars) - # hard constraints. all must be false - # LOG.info("* Converting error(x) to DatalogLP *") - equery = self.parse1('error(x)') - errors, evars = self.datalog_to_lp(equery, []) - hard = [self.lplang.makeNotEqual(var, 1) for var in evars] - # domain-specific axioms, e.g. sum of guest memory util = host mem util - # LOG.info("* Constructing domain-specific axioms *") - axioms = self.domain_axioms() - return opt, warnings + errors + hard + axioms - - def set_bounds(self, expr, bounds): - """Find upper bounds on all variables occurring in expr. - - :param expr is a LpLang.Expression - :param bounds is a dictionary mapping an Expression's tuple() to a - number. - - Modifies bounds to include values for all variables occurring inside - expr. - """ - # LOG.info("set_bounds(%s)", expr) - variables = self.lplang.variables(expr) - for var in variables: - tup = var.tuple() - if tup not in bounds: - bounds[tup] = 10 - - ########################## - # Domain-specific axioms - - def domain_axioms(self): - """Return a list of all the domain-specific axioms as strings. - - Axioms define relationships between LP decision variables that we - would not expect the user to write. - """ - # TODO(thinrichs): just defining relationship between mem-usage for - # guests and hosts. Add rest of axioms. - hosts = self.get_hosts() - guests = self.get_guests() - memusage = self.get_memusage() - - memusage_ax = self._domain_axiom_memusage(hosts, guests, memusage) - assign_ax = self._domain_axiom_assignment(hosts, guests) - return memusage_ax + assign_ax - - def _domain_axiom_assignment(self, hosts, guests): - """Return axioms for assignment variables. - - :param hosts is the list of host IDs - :param guests is the list of guest IDs - - assign[h1,g] + ... + assign[hn, g] = 1 - """ - axioms = [] - for g in guests: - hostvars = [self._construct_assign(h, g) for h in hosts] - axioms.append(self.lplang.makeEqual( - 1, self.lplang.makeArith('plus', *hostvars))) - return axioms - - def _construct_assign(self, host, guest): - return self.lplang.makeBoolVariable('assign', guest, host) - - def _domain_axiom_memusage(self, hosts, guests, memusage): - """Return a list of LP axioms defining guest/host mem-usage. - - :param hosts is the list of host IDs - :param guests is the list of guest IDs - - Axiom: sum of all guest mem-usage for those guests deployed on a host - gives the mem-usage for that host: - - hMemUse[h] = assign[1][h]*gMemUse[1] + ... + assign[G][h]*gMemUse[G]. - - Returns a list of LpLang expressions. - Raises NotEnoughData if it does not have guest memory usage. - """ - axioms = [] - - for h in hosts: - guest_terms = [] - for guest in guests: - if guest not in memusage: - raise NotEnoughData( - "could not find guest mem usage: %s" % guest) - guest_terms.append( - self.lplang.makeArith( - 'times', - self._construct_assign(h, guest), - memusage[guest])) - axioms.append( - self.lplang.makeEqual( - self.lplang.makeIntVariable('hMemUse', h), - self.lplang.makeArith('plus', *guest_terms))) - return axioms - - def get_hosts(self): - query = self.parse1('nova:host(id, zone, memory_capacity)') - host_rows = self.policy.select(query) - return set([lit.arguments[0].name for lit in host_rows]) - - def get_guests(self): - query = self.parse1('nova:server(id, name, host)') - guest_rows = self.policy.select(query) - return set([lit.arguments[0].name for lit in guest_rows]) - - def get_memusage(self): - query = self.parse1('ceilometer:mem_consumption(id, mem)') - rows = self.policy.select(query) - return {lit.arguments[0].name: lit.arguments[1].name - for lit in rows} - - ######################### - # Convert datalog to LP - - unknowns = ['ceilometer:mem_consumption'] - rewrites = ['ceilometer:mem_consumption(x, y) :- ' - 'var("hMemUse", x), output(y)'] - - def datalog_to_lp(self, query, unknown_table_possibilities): - """Convert rules defining QUERY in self.policy into a linear program. - - @unknowns is the list of tablenames that should become - decision variables. @unknown_table_possibilities is the list - of all possible instances of the decision variable tables. - """ - # TODO(thinrichs): figure out if/when negation is handled properly - - # a list of rules, each of which has an instance of QUERY in the head - # and whose bodies are drawn from unknowns. - rules = self.policy.abduce(query, self.unknowns) - # LOG.info("interpolates:\n%s", "\n".join(str(x) for x in rules)) - if len(unknown_table_possibilities): - rules = self.policy.instances(query, unknown_table_possibilities) - # LOG.info("instances:\n%s", "\n".join(str(x) for x in rules)) - equalities, variables = self._to_lp(rules) - # LOG.info("LP rules: \n%s", "\n".join(str(x) for x in equalities)) - # LOG.info("LP variables: %s", ", ".join(str(x) for x in variables)) - return equalities, variables - - def _to_lp(self, rules): - """Compute an LP program equivalent to the given Datalog rules. - - :param rules: a list of Rule instances, all of which are ground - except for variables representing LP variables - """ - # TODO(thinrichs): need type analysis to ensure we differentiate - # hosts from guests within ceilometer:mem_consumption - act = nonrecursive.MultiModuleNonrecursiveRuleTheory() - for var_rewrite_rule in self.rewrites: - changes = act.insert(self.parse1(var_rewrite_rule)) - assert(changes) - LOG.debug("action theory: %s", act.content_string()) - act.set_tracer(self.policy.tracer) - definitions = {} - for rule in rules: - equalities, newrule = self._extract_lp_variable_equalities( - rule, act) - LOG.debug("equalities: %s", equalities) - LOG.debug("newrule: %s", newrule) - LOG.debug("newrule.body: %s", str(newrule.body)) - head = self._lit_to_lp_variable(newrule.head) - LOG.debug("head: %s", str(head)) - LOG.debug("newrule.body: %s", newrule.body) - body = [] - for lit in newrule.body: - LOG.debug("processing %s", lit) - body.append(self._lit_to_lp_arithmetic(lit, equalities)) - LOG.debug("new body: %s", ";".join(str(x) for x in body)) - conjunction = self.lplang.makeAnd(*body) - LOG.debug("conjunct: %s", conjunction) - if head not in definitions: - definitions[head] = set([conjunction]) - else: - definitions[head].add(conjunction) - - equalities = [self.lplang.makeEqual(h, self.lplang.makeOr(*bodies)) - for h, bodies in definitions.items()] - return equalities, definitions.keys() - - def _extract_lp_variable_equalities(self, rule, rewrite_theory): - """Extract values for LP variables and slightly modify rule. - - :param rule: an instance of Rule - :param rewrite_theory: reference to a theory that contains rules - describing how tables correspond to LP variable inputs and - outputs. - - Returns (i) dictionary mapping Datalog variable name (a string) to - the set of LP variables to which it is equal and (ii) a rewriting - of the rule that is the same as the original except some - elements have been removed from the body. - """ - newbody = [] - varnames = {} - for lit in rule.body: - result = self._extract_lp_variable_equality_lit( - lit, rewrite_theory) - if result is None: - newbody.append(lit) - else: - datalogvar, lpvar = result - if datalogvar not in varnames: - varnames[datalogvar] = set([lpvar]) - else: - varnames[datalogvar].add(lpvar) - return varnames, compile.Rule(rule.head, newbody) - - def _extract_lp_variable_equality_lit(self, lit, rewrite_theory): - """Identify datalog variable representing an LP-variable. - - :param lit: an instance of Literal - :param rewrite_theory: reference to a theory that contains rules - describing how tables correspond to LP variable inputs and - outputs. - Returns None, signifying literal does not include any datalog - variable that maps to an LP variable, or (datalogvar, lpvar). - """ - if lit.is_builtin(): - return - # LOG.info("_extract_lp_var_eq_lit %s", lit) - rewrites = rewrite_theory.abduce(lit, ['var', 'output']) - # LOG.info("lit rewriting: %s", ";".join(str(x) for x in rewrites)) - if not rewrites: - return - assert(len(rewrites) == 1) - varlit = next(lit for lit in rewrites[0].body - if lit.table.table == 'var') - # LOG.info("varlit: %s", varlit) - lpvar = self._varlit_to_lp_variable(varlit) - outlit = next(lit for lit in rewrites[0].body - if lit.table.table == 'output') - outvar = outlit.arguments[0].name - # LOG.info("lpvar: %s; outvar: %s", lpvar, outvar) - return outvar, lpvar - - def _lit_to_lp_arithmetic(self, lit, varnames): - """Translates Datalog literal into an LP arithmetic statement. - - :param lit is a Literal instance and may include Datalog variables - :param varnames is a dictionary from datalog variables to a set of - LP variables - - Returns an LP arithmetic statement. - - Raises LpConversion if one of the Datalog variables appearing in - lit has other than 1 value in varnames. - Raises LpException if the arithmetic operator is not supported. - """ - # TODO(thinrichs) translate to infix and use standard operators - newargs = [self._term_to_lp_term(arg, varnames) - for arg in lit.arguments] - return self.lplang.makeArith(lit.tablename(), *newargs) - - def _lit_to_lp_variable(self, lit): - """Translates ground Datalog literal into an LP variable. - - :param lit is a Literal instance without variables - Returns an LP variable. - Raises LpConversionFailure if lit includes any Datalog variables. - """ - if any(arg.is_variable() for arg in lit.arguments): - raise self.lplang.LpConversionFailure( - "Tried to convert literal %s into LP variable but " - "found a Datalog variable" % lit) - args = [arg.name for arg in lit.arguments] - return self.lplang.makeVariable(lit.table.table, *args, type='bool') - - def _term_to_lp_term(self, term, varnames): - """Translates Datalog term into an LP variable or a constant. - - :param term is an instance of Term - :param varnames is a dictionary from varname to a set of LP variables - - Returns an LP variable, a number, or a string. - - Raises LpConversionFailure if Datalog variable appears without a - corresponding LP variable or if multiple LP variables for a given - Datalog variable. (The latter condition could probably be handled - without raising an error, but this is good for now.) - """ - if term.is_variable(): - if term.name not in varnames: - raise self.lplang.LpConversionFailure( - "Residual variable not assigned a value: %s" % term.name) - if len(varnames[term.name]) > 1: - raise self.lplang.LpConversionFailure( - "Variable name assigned to 2 different values: " - "%s assigned %s" % (term.name, varnames[term.name])) - return next(iter(varnames[term.name])) - return term.name - - def _varlit_to_lp_variable(self, lit): - args = [x.name for x in lit.arguments[1:]] - return self.lplang.makeVariable(lit.arguments[0].name, *args) - - ################# - # Miscellaneous - - def debug_mode(self): - tracer = base.Tracer() - tracer.trace('*') - self.policy.set_tracer(tracer) - - def production_mode(self): - tracer = base.Tracer() - self.policy.set_tracer(tracer) - - def parse(self, policy): - return compile.parse(policy, use_modules=False) - - def parse1(self, policy): - return compile.parse1(policy, use_modules=False) - - -class NotEnoughData(exception.CongressException): - pass - - -class LpProblemUnsolvable(exception.CongressException): - pass - - -class VmMigrator(object): - """Code for migrating VMs once we have a LP problem solution.""" - @classmethod - def migrate(cls, guest, host): - try: - call = ["nova", "live-migration", str(guest), str(host)] - LOG.info("migrating: %s", call) - ret = subprocess.check_output(call, stderr=subprocess.STDOUT) - if ret == 0: - return True - except Exception: - pass - - @classmethod - def check_status(cls, guest, host, status): - g = subprocess.check_output(["nova", "list"]) - g = g.replace("-", "").replace("+", "").lstrip("[").rstrip("]") - elems = g.split('\n') - for elem in elems: - e = elem.split("|") - el = [x.strip() for x in e] - try: - if status == el[2]: - return True - except Exception: - pass - - @classmethod - def do_migration(cls, guest, newh, oldh): - if (newh == oldh): - return True - try: - done = cls.migrate(guest, newh) - if done: - for i in range(3): - if cls.check_migrate(guest, newh, "ACTIVE"): - return True - else: - time.sleep(2) - except Exception: - pass - return False - - # status: -1 if migration done - @classmethod - def getnext(cls, mapping, status): - hi = max(status.values()) - if hi > 0: - i = list(status.values()).index(hi) - return list(status.keys())[i] - - @classmethod - def do_migrations(cls, g_h_mapping): - max_attempts = 10 - guest_mig_status = dict.fromkeys(g_h_mapping.keys(), max_attempts) - g = cls.getnext(g_h_mapping, guest_mig_status) - while g: - newh, oldh = g_h_mapping[g] - if cls.do_migration(g, newh, oldh): - guest_mig_status[g] = -1 - else: - guest_mig_status[g] -= 1 - g = cls.getnext(g_h_mapping, guest_mig_status) - return guest_mig_status diff --git a/congress/server/__init__.py b/congress/server/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/server/congress_server.py b/congress/server/congress_server.py deleted file mode 100644 index ec2f1d6e..00000000 --- a/congress/server/congress_server.py +++ /dev/null @@ -1,176 +0,0 @@ -#! /usr/bin/python -# -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -import socket -import sys - -import eventlet -eventlet.monkey_patch() -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import service - -from congress.common import config -from congress.db import api as db_api -# FIXME It has to initialize distributed_architecture flag basing on the -# config file before the python interpreter imports python file which has -# if-statement for deepsix. Since the default value of the flag is False -# in current implementation, so it will import dse.deepsix as deepsix -# even if you set it to True in congress.conf. -# After changing the default to True, remove following one line. -# This appears in main() too. Removing either instance breaks something. -config.init(sys.argv[1:]) -from congress.common import eventlet_server - -from congress import harness - -LOG = logging.getLogger(__name__) - - -class ServerWrapper(object): - """Wraps an eventlet_server with some launching info & capabilities.""" - - def __init__(self, server, workers): - self.server = server - self.workers = workers - - def launch_with(self, launcher): - if hasattr(self.server, 'listen'): - self.server.listen() - if self.workers > 1: - # Use multi-process launcher - launcher.launch_service(self.server, self.workers) - else: - # Use single process launcher - launcher.launch_service(self.server) - - -def serve(*servers): - if max([server[1].workers for server in servers]) > 1: - # TODO(arosen) - need to provide way to communicate with DSE services - launcher = service.ProcessLauncher(cfg.CONF) - else: - launcher = service.ServiceLauncher(cfg.CONF) - - for name, server in servers: - try: - server.launch_with(launcher) - except socket.error: - LOG.exception(_('Failed to start the %s server'), name) - raise - - try: - launcher.wait() - except KeyboardInterrupt: - LOG.info("Congress server stopped by interrupt.") - - -def create_api_server(conf_path, node_id, host, port, workers, policy_engine, - datasources): - congress_api_server = eventlet_server.APIServer( - conf_path, node_id, host=host, port=port, - keepalive=cfg.CONF.tcp_keepalive, - keepidle=cfg.CONF.tcp_keepidle, - policy_engine=policy_engine, - api=True, - datasources=datasources, - bus_id=cfg.CONF.dse.bus_id) - # TODO(thinrichs): there's some sort of magic happening for the api - # server. We call eventlet_server, which on start() calls - # service.congress_app_factory, which uses harness to create the - # API service, which the magic seems to need to do the right thing. - # That's why we're not just calling harness.create2 here; instead, - # it's buried inside the congress_app_factory. - return node_id, ServerWrapper(congress_api_server, workers) - - -def create_nonapi_server(node_id, policy_engine, datasources, workers): - congress_server = eventlet_server.Server( - node_id, bus_id=cfg.CONF.dse.bus_id) - harness.create2(existing_node=congress_server.node, api=False, - policy_engine=policy_engine, - datasources=datasources) - return node_id, ServerWrapper(congress_server, workers) - - -def launch_servers(node_id, api, policy, data): - servers = [] - if api: - LOG.info("Starting congress API server on port %d", cfg.CONF.bind_port) - # API resource runtime encapsulation: - # event loop -> wsgi server -> webapp -> resource manager - paste_config = config.find_paste_config() - config.set_config_defaults() - servers.append(create_api_server(paste_config, - node_id, - cfg.CONF.bind_host, - cfg.CONF.bind_port, - cfg.CONF.api_workers, - policy_engine=policy, - datasources=data)) - else: - LOG.info("Starting congress server on node %s", node_id) - servers.append(create_nonapi_server(node_id, policy, data, - cfg.CONF.api_workers)) - - return servers - - -def main(): - args = sys.argv[1:] - - # TODO(thinrichs): find the right way to do deployment configuration. - # For some reason we need to config.init(args) in 2 places; here and - # at the top of the file. Remove either one, and things break. - # Note: config.init() will delete the deploy args, so grab them before. - config.init(args) - if not cfg.CONF.config_file: - sys.exit("ERROR: Unable to find configuration file via default " - "search paths ~/.congress/, ~/, /etc/congress/, /etc/) and " - "the '--config-file' option!") - if cfg.CONF.replicated_policy_engine and not ( - db_api.is_mysql() or db_api.is_postgres()): - if db_api.is_sqlite(): - LOG.warning("Deploying replicated policy engine with SQLite " - "backend is not officially supported. Unexpected " - "behavior may occur. Officially supported backends " - "are MySQL and PostgresSQL.") - else: - sys.exit("ERROR: replicated_policy_engine option can be used only " - "with MySQL or PostgreSQL database backends. Please set " - "the connection option in [database] section of " - "congress.conf to use a supported backend.") - config.setup_logging() - - if not (cfg.CONF.api or cfg.CONF.policy_engine or cfg.CONF.datasources): - # No flags provided, start all services - cfg.CONF.api = True - cfg.CONF.policy_engine = True - cfg.CONF.datasources = True - - # Construct requested deployment - servers = launch_servers(cfg.CONF.node_id, cfg.CONF.api, - cfg.CONF.policy_engine, cfg.CONF.datasources) - - serve(*servers) - - -if __name__ == '__main__': - main() diff --git a/congress/service.py b/congress/service.py deleted file mode 100644 index 0ca24963..00000000 --- a/congress/service.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import functools -import json -import sys - -from oslo_log import log as logging - -from congress.api import application -from congress import harness - -LOG = logging.getLogger(__name__) - - -def fail_gracefully(f): - """Logs exceptions and aborts.""" - @functools.wraps(f) - def wrapper(*args, **kw): - try: - return f(*args, **kw) - except Exception: - LOG.exception("Fatal Exception:") - sys.exit(1) - - return wrapper - - -@fail_gracefully -def congress_app_factory(global_conf, **local_conf): - # global_conf only accepts an iteratable value as its dict value - flags_dict = json.loads(global_conf['flags']) - services = harness.create2( - node_id=global_conf['node_id'], - bus_id=global_conf['bus_id'], - policy_engine=flags_dict['policy_engine'], - api=flags_dict['api'], - datasources=flags_dict['datasources']) - return application.ApiApplication(services['api_service']) diff --git a/congress/synchronizer/__init__.py b/congress/synchronizer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/synchronizer/datasource_synchronizer.py b/congress/synchronizer/datasource_synchronizer.py deleted file mode 100644 index 101def59..00000000 --- a/congress/synchronizer/datasource_synchronizer.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) 2016 NEC Corp. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import eventlet -from futurist import periodics -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging - -from congress.db import datasources - -LOG = logging.getLogger(__name__) - -SYNCHRONIZER_SERVICE_ID = '_datasource_synchronizer' - - -class DatasourceSynchronizer(object): - - def __init__(self, node): - self.name = SYNCHRONIZER_SERVICE_ID - self.sync_thread = None - self.periodic_tasks = None - self.node = node - - def start(self): - callables = [(self.synchronize_all_datasources, None, {}), - (self._check_resub_all, None, {})] - self.periodic_tasks = periodics.PeriodicWorker(callables) - self.sync_thread = eventlet.spawn_n(self.periodic_tasks.start) - LOG.info("started datasource synchronizer on node %s", - self.node.node_id) - - def stop(self): - if self.periodic_tasks: - self.periodic_tasks.stop() - self.periodic_tasks.wait() - self.periodic_tasks = None - if self.sync_thread: - eventlet.greenthread.kill(self.sync_thread) - self.sync_thread = None - - @periodics.periodic(spacing=cfg.CONF.dse.time_to_resub) - def _check_resub_all(self): - LOG.debug("Running periodic resub on node %s", self.node.node_id) - for s in self.node.get_services(True): - s.check_resub_all() - - @lockutils.synchronized('congress_synchronize_datasources') - def sync_datasource(self, ds_name): - if not cfg.CONF.datasources: - LOG.info("sync not supported on non-datasource node") - return - datasource = datasources.get_datasource_by_name(ds_name) - service_obj = self.node.service_object(ds_name) - - if datasource and not service_obj: - # register service with data node - service = self.node.create_datasource_service(datasource) - self.node.register_service(service) - LOG.debug("service %s registered by synchronizer", ds_name) - return - if service_obj and datasource is None: - # unregister, datasource not present in DB - self.node.unregister_service(ds_name) - LOG.debug("service %s unregistered by synchronizer", ds_name) - return - - @lockutils.synchronized('congress_synchronize_datasources') - @periodics.periodic(spacing=cfg.CONF.datasource_sync_period) - def synchronize_all_datasources(self): - LOG.debug("synchronizing datasources on node %s", self.node.node_id) - added = 0 - removed = 0 - datasources = self.node.get_datasources(filter_secret=False) - db_datasources = [] - # Look for datasources in the db, but not in the services. - for configured_ds in datasources: - db_datasources.append(configured_ds['id']) - active_ds = self.node.service_object(uuid_=configured_ds['id']) - # If datasource is not enabled, unregister the service - if not configured_ds['enabled']: - if active_ds: - LOG.debug("unregistering %s service, datasource disabled " - "in DB.", active_ds.service_id) - self.node.unregister_service(active_ds.service_id) - removed = removed + 1 - continue - if active_ds is None: - # service is not up, create the service - LOG.debug("registering %s service on node %s", - configured_ds['name'], self.node.node_id) - service = self.node.create_datasource_service(configured_ds) - self.node.register_service(service) - added = added + 1 - - # Unregister the services which are not in DB - active_ds_services = [s for s in self.node.get_services(True) - if getattr(s, 'type', '') == 'datasource_driver'] - db_datasources_set = set(db_datasources) - stale_services = [s for s in active_ds_services - if s.ds_id not in db_datasources_set] - for s in stale_services: - LOG.debug("unregistering %s service, datasource not found in DB ", - s.service_id) - self.node.unregister_service(uuid_=s.ds_id) - removed = removed + 1 - - LOG.info("synchronized datasources, added %d removed %d on node %s", - added, removed, self.node.node_id) - - # This might be required once we support update datasource config - # if not self._config_eq(configured_ds, active_ds): - # LOG.debug('configured and active disagree: %s %s', - # strutils.mask_password(active_ds), - # strutils.mask_password(configured_ds)) - - # LOG.info('Reloading datasource: %s', - # strutils.mask_password(configured_ds)) - # self.delete_datasource(configured_ds['name'], - # update_db=False) - # self.add_datasource(configured_ds, update_db=False) - - # def _config_eq(self, db_config, active_config): - # return (db_config['name'] == active_config.service_id and - # db_config['config'] == active_config.service_info['args']) diff --git a/congress/synchronizer/policy_rule_synchronizer.py b/congress/synchronizer/policy_rule_synchronizer.py deleted file mode 100644 index a50029d2..00000000 --- a/congress/synchronizer/policy_rule_synchronizer.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (c) 2016 NEC Corp. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import eventlet -from futurist import periodics -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging - -from congress.datalog import base -from congress.datalog import compile -from congress.db import datasources -from congress.db import db_policy_rules - -LOG = logging.getLogger(__name__) - -SYNCHRONIZER_SERVICE_ID = '_policy_rule_synchronizer' - - -class PolicyRuleSynchronizer(object): - - def __init__(self, service_obj, node): - self.name = SYNCHRONIZER_SERVICE_ID - self.engine = service_obj - self.sync_thread = None - self.periodic_tasks = None - self.node = node - - def start(self): - callables = [(self.synchronize_all_policies, None, {}), - (self.synchronize_rules, None, {})] - self.periodic_tasks = periodics.PeriodicWorker(callables) - self.sync_thread = eventlet.spawn_n(self.periodic_tasks.start) - LOG.info("started policy-rule synchronizer on node %s", - self.node.node_id) - - def stop(self): - if self.periodic_tasks: - self.periodic_tasks.stop() - self.periodic_tasks.wait() - self.periodic_tasks = None - if self.sync_thread is not None: - eventlet.greenthread.kill(self.sync_thread) - self.sync_thread = None - - def _register_datasource_with_pe(self, ds_name): - """create datasource policy in PE for newly created datasource.""" - if not self.node.is_valid_service(ds_name): - # datasource service not up, nothing to register - return - # Get the datasource schema to sync the schema with PE - schema = self.node.invoke_service_rpc(ds_name, 'get_datasource_schema', - {'source_id': ds_name}) - self.engine.initialize_datasource(ds_name, schema) - LOG.debug("registered datasource '%s' with PE on node %s", ds_name, - self.node.node_id) - - def _sync_datasource_policies(self): - added = 0 - removed = 0 - db_datasources = [ds['name'] for ds in self.node.get_datasources()] - ds_policies = [p['name'] for p in - self._get_engine_policies(datasource=True)] - - for ds in db_datasources: - # check if ds is registered with PE - if ds not in ds_policies: - self._register_datasource_with_pe(ds) - added = added + 1 - - # get the policies registered with PE , but not in database - remove_policies = list(set(ds_policies) - set(db_datasources)) - for p in remove_policies: - self.engine.delete_policy(p) - removed = removed+1 - - LOG.debug("datasource policies synchronized, added %d removed %d ", - added, removed) - - def _get_engine_policies(self, datasource=False): - all_policies = [self.engine.policy_object(n) for n in - self.engine.policy_names()] - dpolicies = [p for p in all_policies - if p.kind == base.DATASOURCE_POLICY_TYPE] - epolicies = list(set(all_policies) - set(dpolicies)) - policies = dpolicies if datasource else epolicies - active_policies = [] - for policy in policies: - active_policies.append({'id': policy.id, - 'name': policy.name, - 'abbreviation': policy.abbr, - 'description': policy.desc, - 'owner_id': policy.owner, - 'kind': policy.kind}) - - return active_policies - - @lockutils.synchronized('congress_synchronize_policies') - def sync_one_policy(self, name, datasource=True, db_session=None): - return self.sync_one_policy_nonlocking( - name, datasource=datasource, db_session=db_session) - - def sync_one_policy_nonlocking( - self, name, datasource=True, db_session=None): - """Synchronize single policy with DB. - - :param name: policy name to be synchronized - :param datasource: True, to sync a datasource policy - - """ - LOG.info("sync %s policy with DB", name) - - if datasource: - policy_object = datasources.get_datasource_by_name( - name, session=db_session) - if policy_object is not None: - if name not in self.engine.policy_names(): - self._register_datasource_with_pe(name) - return - - policy_object = db_policy_rules.get_policy_by_name( - name, session=db_session) - if policy_object is None: - if name in self.engine.policy_names(): - self.engine.delete_policy(name) - LOG.info("policy %s deleted by synchronizer", name) - return - - p = policy_object.to_dict() - if name not in self.engine.policy_names(): - self.engine.create_policy( - p['name'], id_=p['id'], abbr=p['abbreviation'], - kind=p['kind'], desc=p['description'], - owner=p['owner_id']) - LOG.debug("policy %s added by synchronizer", name) - - elif p['id'] != self.engine.policy_object(name).id: - # if same name but not identical attributes - # replace by new policy obj according to DB - self.engine.delete_policy(name) - self.engine.create_policy( - p['name'], id_=p['id'], abbr=p['abbreviation'], - kind=p['kind'], desc=p['description'], - owner=p['owner_id']) - LOG.debug("synchronizer, policy replaced %s", name) - - @periodics.periodic(spacing=cfg.CONF.datasource_sync_period) - @lockutils.synchronized('congress_synchronize_policies') - def synchronize_all_policies(self): - """Function to synchronize im-mem policies with DB""" - added = 0 - removed = 0 - try: - db_policies = [p.to_dict() for p in db_policy_rules.get_policies()] - active_policies = self._get_engine_policies() - # Delete engine policies which are not in DB - for p in active_policies: - if p not in db_policies: - LOG.debug("removing policy %s", str(p)) - self.engine.delete_policy(p['id']) - removed = removed + 1 - # Add policies to PE, which are in DB - for p in db_policies: - if p not in active_policies: - LOG.debug("adding policy %s", str(p)) - self.engine.create_policy(p['name'], id_=p['id'], - abbr=p['abbreviation'], - kind=p['kind'], - desc=p['description'], - owner=p['owner_id']) - added = added + 1 - LOG.info("engine policies synchronized, added %d removed %d ", - added, removed) - # synchronize datasource policies - self._sync_datasource_policies() - LOG.info("completed synchronization of policies") - except Exception: - LOG.exception("Exception occurred in policy synchronizer periodic" - "task on node %s", self.node.node_id) - return - - @periodics.periodic(spacing=cfg.CONF.datasource_sync_period) - @lockutils.synchronized('congress_synchronize_rules') - def synchronize_rules(self, db_session=None): - self.synchronize_rules_nonlocking(db_session=db_session) - - def synchronize_rules_nonlocking(self, db_session=None): - LOG.debug("Synchronizing rules on node %s", self.node.node_id) - try: - # Read rules from DB. - configured_rules = [] - configured_facts = [] - for r in db_policy_rules.get_policy_rules(session=db_session): - if ':-' in r.rule: # if rule has body - configured_rules.append({'rule': r.rule, - 'id': r.id, - 'comment': r.comment, - 'name': r.name, - 'policy_name': r.policy_name}) - else: # head-only rule, ie., fact - configured_facts.append( - {'rule': self.engine.parse1(r.rule).pretty_str(), - # note:parse to remove effect of extraneous formatting - 'policy_name': r.policy_name}) - - # Read rules from engine - policies = {n: self.engine.policy_object(n) for n in - self.engine.policy_names()} - active_policy_rules = [] - active_policy_facts = [] - for policy_name, policy in policies.items(): - if policy.kind != base.DATASOURCE_POLICY_TYPE: - for active_rule in policy.content(): - # FIXME: This assumes r.original_str is None iff - # r is a head-only rule (fact). This works in - # non-recursive policy but not in recursive policies - if active_rule.original_str is None: - active_policy_facts.append( - {'rule': str(active_rule.head), - 'policy_name': policy_name}) - else: - active_policy_rules.append( - {'rule': active_rule.original_str, - 'id': active_rule.id, - 'comment': active_rule.comment, - 'name': active_rule.name, - 'policy_name': policy_name}) - - # ALEX: the Rule object does not have fields like the rule-string - # or id or comment. We can add those fields to the Rule object, - # as long as we don't add them to the Fact because there are many - # fact instances. If a user tries to create a lot of Rules, they - # are probably doing something wrong and should use a datasource - # driver instead. - - changes = [] - - # add configured rules - for r in configured_rules: - if r not in active_policy_rules: - LOG.debug("adding rule %s", str(r)) - parsed_rule = self.engine.parse1(r['rule']) - parsed_rule.set_id(r['id']) - parsed_rule.set_name(r['name']) - parsed_rule.set_comment(r['comment']) - parsed_rule.set_original_str(r['rule']) - - event = compile.Event(formula=parsed_rule, - insert=True, - target=r['policy_name']) - changes.append(event) - - # add configured facts - for r in configured_facts: - if r not in active_policy_facts: - LOG.debug("adding rule %s", str(r)) - parsed_rule = self.engine.parse1(r['rule']) - event = compile.Event(formula=parsed_rule, - insert=True, - target=r['policy_name']) - changes.append(event) - - # remove active rules not configured - for r in active_policy_rules: - if r not in configured_rules: - LOG.debug("removing rule %s", str(r)) - parsed_rule = self.engine.parse1(r['rule']) - parsed_rule.set_id(r['id']) - parsed_rule.set_name(r['name']) - parsed_rule.set_comment(r['comment']) - parsed_rule.set_original_str(r['rule']) - - event = compile.Event(formula=parsed_rule, - insert=False, - target=r['policy_name']) - changes.append(event) - - # remove active facts not configured - for r in active_policy_facts: - if r not in configured_facts: - LOG.debug("removing rule %s", str(r)) - parsed_rule = self.engine.parse1(r['rule']) - event = compile.Event(formula=parsed_rule, - insert=False, - target=r['policy_name']) - changes.append(event) - - permitted, changes = self.engine.process_policy_update(changes) - LOG.info("synchronize_rules, permitted %d, made %d changes on " - "node %s", permitted, len(changes), self.node.node_id) - except Exception: - LOG.exception("synchronizing rules failed") diff --git a/congress/tests/__init__.py b/congress/tests/__init__.py deleted file mode 100644 index 02bfe4da..00000000 --- a/congress/tests/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# NOTE(boden): patch upfront to ensure all imports get patched modules -import eventlet -eventlet.monkey_patch() diff --git a/congress/tests/api/__init__.py b/congress/tests/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/api/base.py b/congress/tests/api/base.py deleted file mode 100644 index 6fd889b3..00000000 --- a/congress/tests/api/base.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2016 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from futurist import periodics -import mock -from oslo_config import cfg - -from congress.api import base as api_base -from congress.common import config -from congress import harness -from congress.tests import fake_datasource -from congress.tests import helper - - -def setup_config(with_fake_datasource=True, node_id='testnode', - same_partition_as_node=None, api=True, policy=True, - datasources=True): - """Setup DseNode for testing. - - :param services is an array of DataServices - :param api is a dictionary mapping api name to API model instance - """ - config.set_config_defaults() - # Load the fake driver. - cfg.CONF.set_override( - 'drivers', - ['congress.tests.fake_datasource.FakeDataSource']) - - if same_partition_as_node is None: - node = helper.make_dsenode_new_partition(node_id) - else: - node = helper.make_dsenode_same_partition( - same_partition_as_node, node_id) - - if datasources: - cfg.CONF.set_override('datasources', True) - - with mock.patch.object(periodics, 'PeriodicWorker', autospec=True): - services = harness.create2( - existing_node=node, policy_engine=policy, api=api, - datasources=datasources) - - data = None - if with_fake_datasource: - data = fake_datasource.FakeDataSource('data') - # FIXME(ekcs): this is a hack to prevent the synchronizer from - # attempting to delete this DSD because it's not in DB - data.type = 'no_sync_datasource_driver' - node.register_service(data) - - engine_service = None - library_service = None - api_service = None - if policy: - engine_service = services[api_base.ENGINE_SERVICE_ID] - library_service = services[api_base.LIBRARY_SERVICE_ID] - if api: - api_service = services['api'] - if datasources: - ds_manager = services['ds_manager'] - - return {'node': node, 'engine': engine_service, 'library': library_service, - 'data': data, 'api': api_service, 'ds_manager': ds_manager} diff --git a/congress/tests/api/test_action_model.py b/congress/tests/api/test_action_model.py deleted file mode 100644 index 74b7850e..00000000 --- a/congress/tests/api/test_action_model.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2015 Intel, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestActionModel(base.SqlTestCase): - def setUp(self): - super(TestActionModel, self).setUp() - services = api_base.setup_config() - self.action_model = services['api']['api-action'] - self.datasource = services['data'] - - def test_get_datasource_actions(self): - context = {'ds_id': self.datasource.service_id} - actions = self.action_model.get_items({}, context=context) - expected_ret = {'results': [{'name': 'fake_act', - 'args': [{'name': 'server_id', - 'description': 'server to act'}], - 'description': 'fake action'}]} - self.assertEqual(expected_ret, actions) - - def test_get_invalid_datasource_action(self): - context = {'ds_id': 'invalid_id'} - self.assertRaises(webservice.DataModelException, - self.action_model.get_items, {}, context=context) diff --git a/congress/tests/api/test_api_utils.py b/congress/tests/api/test_api_utils.py deleted file mode 100644 index 5dd89d43..00000000 --- a/congress/tests/api/test_api_utils.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2015 NTT, OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import webservice -from congress.tests import base - - -class TestAPIUtils(base.SqlTestCase): - - def setUp(self): - super(TestAPIUtils, self).setUp() - - def test_create_table_dict(self): - table_name = 'fake_table' - schema = {'fake_table': ({'name': 'id', 'desc': None}, - {'name': 'name', 'desc': None})} - expected = {'table_id': table_name, - 'columns': [{'name': 'id', 'description': None}, - {'name': 'name', 'description': None}]} - result = api_utils.create_table_dict(table_name, schema) - self.assertEqual(expected, result) - - def test_get_id_from_context_ds_id(self): - context = {'ds_id': 'datasource id'} - expected = ('datasource id', 'datasource id') - result = api_utils.get_id_from_context(context) - self.assertEqual(expected, result) - - def test_get_id_from_context_policy_id(self): - context = {'policy_id': 'policy id'} - expected = ('__engine', 'policy id') - result = api_utils.get_id_from_context(context) - self.assertEqual(expected, result) - - def test_get_id_from_context_with_invalid_context(self): - context = {'invalid_id': 'invalid id'} - self.assertRaises(webservice.DataModelException, - api_utils.get_id_from_context, context) diff --git a/congress/tests/api/test_application.py b/congress/tests/api/test_application.py deleted file mode 100644 index bf10fafa..00000000 --- a/congress/tests/api/test_application.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -import mock -from oslo_config import cfg -import webob - -from congress.api import application -from congress.api import webservice -from congress.tests import base - - -class TestApiApplication(base.TestCase): - - def setUp(self): - super(TestApiApplication, self).setUp() - cfg.CONF.set_override('auth_strategy', 'noauth') - - def _check_data_model_exc_response(self, method, exc, response): - self.assertEqual(response.status_code, exc.http_status_code, - 'Correct %s HTTP error status' % method) - body = json.loads(response.body.decode('utf-8')) - self.assertEqual(body['error']['error_code'], exc.error_code, - 'Correct %s error code in response body' % method) - self.assertEqual( - body['error']['message'], exc.description, - 'Correct %s description in response body' % method) - self.assertEqual(body['error']['error_data'], exc.data, - 'Correct %s error data in response body' % method) - - def test_data_model_exception(self): - exc = webservice.DataModelException(1, "error1", [1, {'a': 'b'}], 409) - model = webservice.SimpleDataModel("test") - for method in [m for m in dir(model) if "_item" in m]: - setattr(model, method, mock.Mock(side_effect=exc)) - - resource_mgr = application.ResourceManager() - app = application.ApiApplication(resource_mgr) - - collection_handler = webservice.CollectionHandler(r'/c', model) - resource_mgr.register_handler(collection_handler) - for method in ['GET', 'POST']: - request = webob.Request.blank('/c', body='{}'.encode('utf-8'), - method=method) - response = app(request) - self._check_data_model_exc_response(method, exc, response) - - element_handler = webservice.ElementHandler(r'/e', model) - resource_mgr.register_handler(element_handler) - for method in ['GET', 'PUT', 'PATCH', 'DELETE']: - request = webob.Request.blank('/e', body='{}'.encode('utf-8'), - method=method) - response = app(request) - self._check_data_model_exc_response(method, exc, response) - - def _check_base_exc_response(self, method, response, expected_status): - self.assertEqual(response.status_code, expected_status, - 'Correct %s HTTP error status' % method) - body = json.loads(response.body.decode('utf-8')) - self.assertEqual(body['error']['error_code'], expected_status, - 'Correct %s error code in response body' % method) - if expected_status == 500: - description = "Internal server error" - elif expected_status == 404: - description = "The resouce could not be found." - else: - self.fail("Unsupported expected_status value.") - - self.assertEqual( - body['error']['message'], description, - 'Correct %s description in response body' % method) - - def test__exception(self): - model = webservice.SimpleDataModel("test") - for method in [m for m in dir(model) if "_item" in m]: - setattr(model, method, mock.Mock(side_effect=Exception())) - - resource_mgr = application.ResourceManager() - app = application.ApiApplication(resource_mgr) - - collection_handler = webservice.CollectionHandler(r'/c', model) - resource_mgr.register_handler(collection_handler) - for method in ['GET', 'POST']: - request = webob.Request.blank('/c', body='{}'.encode('utf-8'), - method=method) - response = app(request) - self._check_base_exc_response(method, response, 500) - - element_handler = webservice.ElementHandler(r'/e', model) - resource_mgr.register_handler(element_handler) - for method in ['GET', 'PUT', 'PATCH', 'DELETE']: - request = webob.Request.blank('/e', body='{}'.encode('utf-8'), - method=method) - response = app(request) - self._check_base_exc_response(method, response, 500) - - # Tests that making a request to an invalid url returns 404. - request = webob.Request.blank('/invalid', - body='{}'.encode('utf-8'), - method=method) - response = app(request) - self._check_base_exc_response(method, response, 404) diff --git a/congress/tests/api/test_datasource_model.py b/congress/tests/api/test_datasource_model.py deleted file mode 100644 index 4fa15884..00000000 --- a/congress/tests/api/test_datasource_model.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -from oslo_config import cfg -from six.moves import reduce - -from congress.api import webservice -from congress.datasources import nova_driver -from congress import exception -from congress.tests.api import base as api_base -from congress.tests import base -from congress.tests import helper - - -class TestDatasourceModel(base.SqlTestCase): - def setUp(self): - super(TestDatasourceModel, self).setUp() - services = api_base.setup_config(with_fake_datasource=False) - self.datasource_model = services['api']['api-datasource'] - self.data = services['data'] - self.node = services['node'] - self.engine = services['engine'] - self.ds_manager = services['ds_manager'] - self.datasource = self._get_datasource_request() - self.ds_manager.add_datasource(self.datasource) - - def tearDown(self): - super(TestDatasourceModel, self).tearDown() - self.node.stop() - - def _get_datasource_request(self): - # leave ID out--generated during creation - return {'name': 'datasource1', - 'driver': 'fake_datasource', - 'description': 'hello world!', - 'enabled': True, - 'type': None, - 'config': {'auth_url': 'foo', - 'username': 'armax', - 'password': '', - 'tenant_name': 'armax'}} - - def test_get_items(self): - dinfo = self.datasource_model.get_items(None)['results'] - self.assertEqual(1, len(dinfo)) - datasource2 = self._get_datasource_request() - datasource2['name'] = 'datasource2' - self.ds_manager.add_datasource(datasource2) - dinfo = self.datasource_model.get_items(None)['results'] - self.assertEqual(2, len(dinfo)) - del dinfo[0]['id'] - self.assertEqual(self.datasource, dinfo[0]) - - def test_add_item(self): - datasource3 = self._get_datasource_request() - datasource3['name'] = 'datasource_test_3' - self.datasource_model.add_item(datasource3, {}) - ds_obj = self.node.service_object('datasource_test_3') - obj = self.engine.policy_object('datasource_test_3') - self.assertIsNotNone(obj.schema) - self.assertEqual('datasource_test_3', obj.name) - self.assertIsNotNone(ds_obj) - - def test_add_item_duplicate(self): - self.assertRaises(webservice.DataModelException, - self.datasource_model.add_item, - self.datasource, {}) - - def test_add_item_invalid_tablename(self): - datasource = self._get_datasource_request() - datasource['name'] = "invalid-table-name" - self.assertRaises(webservice.DataModelException, - self.datasource_model.add_item, - datasource, {}) - - def test_delete_item(self): - datasource = self._get_datasource_request() - datasource['name'] = 'test_datasource' - d_id, dinfo = self.datasource_model.add_item(datasource, {}) - self.assertTrue(self.engine.assert_policy_exists('test_datasource')) - context = {'ds_id': d_id} - self.datasource_model.delete_item(None, {}, context=context) - ds_obj = self.node.service_object('test_datasource') - self.assertIsNone(ds_obj) - self.assertRaises(exception.PolicyRuntimeException, - self.engine.assert_policy_exists, 'test_datasource') - self.assertRaises(exception.DatasourceNotFound, - self.node.get_datasource, d_id) - - def test_delete_item_invalid_datasource(self): - context = {'ds_id': 'fake'} - self.assertRaises(webservice.DataModelException, - self.datasource_model.delete_item, - None, {}, context=context) - - def test_datasource_api_model_execute(self): - def _execute_api(client, action, action_args): - positional_args = action_args.get('positional', []) - named_args = action_args.get('named', {}) - method = reduce(getattr, action.split('.'), client) - method(*positional_args, **named_args) - - class NovaClient(object): - def __init__(self, testkey): - self.testkey = testkey - - def _get_testkey(self): - return self.testkey - - def disconnect(self, arg1, arg2, arg3): - self.testkey = "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) - - def disconnect_all(self): - self.testkey = "action_has_no_args" - - nova_client = NovaClient("testing") - args = helper.datasource_openstack_args() - nova = nova_driver.NovaDriver('nova', args=args) - nova.nova_client = nova_client - nova.update_from_datasource = mock.MagicMock() - nova._execute_api = _execute_api - self.node.register_service(nova) - - execute_action = self.datasource_model.execute_action - - # Positive test: valid body args, ds_id - context = {'ds_id': 'nova'} - body = {'name': 'disconnect', - 'args': {'positional': ['value1', 'value2'], - 'named': {'arg3': 'value3'}}} - request = helper.FakeRequest(body) - result = execute_action({}, context, request) - self.assertEqual(result, {}) - expected_result = "arg1=value1 arg2=value2 arg3=value3" - f = nova.nova_client._get_testkey - helper.retry_check_function_return_value(f, expected_result) - - # Positive test: no body args - context = {'ds_id': 'nova'} - body = {'name': 'disconnect_all'} - request = helper.FakeRequest(body) - result = execute_action({}, context, request) - self.assertEqual(result, {}) - expected_result = "action_has_no_args" - f = nova.nova_client._get_testkey - helper.retry_check_function_return_value(f, expected_result) - - # Negative test: invalid ds_id - context = {'ds_id': 'unknown_ds'} - self.assertRaises(webservice.DataModelException, execute_action, - {}, context, request) - - # Negative test: no ds_id - context = {} - self.assertRaises(webservice.DataModelException, execute_action, - {}, context, request) - - # Negative test: empty body - context = {'ds_id': 'nova'} - bad_request = helper.FakeRequest({}) - self.assertRaises(webservice.DataModelException, execute_action, - {}, context, bad_request) - - # Negative test: no body name/action - context = {'ds_id': 'nova'} - body = {'args': {'positional': ['value1', 'value2'], - 'named': {'arg3': 'value3'}}} - bad_request = helper.FakeRequest(body) - self.assertRaises(webservice.DataModelException, execute_action, - {}, context, bad_request) - - # Positive test with retry: no body args - cfg.CONF.dse.execute_action_retry = True - context = {'ds_id': 'nova'} - body = {'name': 'disconnect_all'} - request = helper.FakeRequest(body) - result = execute_action({}, context, request) - self.assertEqual(result, {}) - expected_result = "action_has_no_args" - f = nova.nova_client._get_testkey - helper.retry_check_function_return_value(f, expected_result) diff --git a/congress/tests/api/test_driver_model.py b/congress/tests/api/test_driver_model.py deleted file mode 100644 index d2036057..00000000 --- a/congress/tests/api/test_driver_model.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestDriverModel(base.SqlTestCase): - def setUp(self): - super(TestDriverModel, self).setUp() - services = api_base.setup_config() - self.node = services['node'] - self.ds_manager = services['ds_manager'] - - self.ds_manager.add_datasource(self._get_datasource_request()) - self.driver_model = services['api']['api-system'] - - def _get_datasource_request(self): - req = {'driver': 'fake_datasource', - 'name': 'fake_datasource'} - req['config'] = {'auth_url': 'foo', - 'username': 'foo', - 'password': 'password', - 'tenant_name': 'foo'} - return req - - def tearDown(self): - super(TestDriverModel, self).tearDown() - - def test_drivers_list(self): - context = {} - expected_ret = {"results": [ - { - "description": "This is a fake driver used for testing", - "id": "fake_datasource" - } - ]} - - ret = self.driver_model.get_items({}, context) - self.assertEqual(expected_ret, ret) - - def test_driver_details(self): - context = { - "driver_id": "fake_datasource" - } - expected_ret = { - "config": { - "auth_url": "required", - "endpoint": "(optional)", - "password": "required", - "poll_time": "(optional)", - "region": "(optional)", - "project_name": "(optional)", - "tenant_name": "required", - "username": "required" - }, - "description": "This is a fake driver used for testing", - "id": "fake_datasource", - "module": "congress.tests.fake_datasource.FakeDataSource", - "secret": ["password"], - "tables": [{'columns': [ - {'description': None, 'name': 'id'}, - {'description': None, 'name': 'name'}], - 'table_id': 'fake_table'} - ] - } - - ret = self.driver_model.get_item('fake_datasource', {}, context) - self.assertEqual(expected_ret, ret) - - def test_invalid_driver_details(self): - context = { - "driver_id": "invalid-id" - } - self.assertRaises(webservice.DataModelException, - self.driver_model.get_item, - 'invalid-id', {}, context) diff --git a/congress/tests/api/test_error_codes.py b/congress/tests/api/test_error_codes.py deleted file mode 100644 index ecc2f8a7..00000000 --- a/congress/tests/api/test_error_codes.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import error_codes -from congress.tests import base - - -class TestErrorCodes(base.TestCase): - - def setUp(self): - super(TestErrorCodes, self).setUp() - self.original_errors = error_codes.errors - - def tearDown(self): - super(TestErrorCodes, self).tearDown() - error_codes.errors = self.original_errors - - def test_get_error_code(self): - name = 'fake-error' - error_codes.errors = { - "fake-error": (0000, 'This is a fake error code.', 400) - } - expected_num = 0000 - expected_desc = 'This is a fake error code.' - expected_http = 400 - expected_ret = (expected_num, expected_desc) - - ret = error_codes.get(name) - self.assertEqual(expected_ret, ret) - - num = error_codes.get_num(name) - self.assertEqual(expected_num, num) - - desc = error_codes.get_desc(name) - self.assertEqual(expected_desc, desc) - - http = error_codes.get_http(name) - self.assertEqual(expected_http, http) - - def test_get_unknown__error_code(self): - name = 'fake_error_code' - error_codes.errors = { - error_codes.UNKNOWN: (0000, 'Unknown error', 400), - 'fake-error': (1000, 'Fake error', 404) - } - expected_num = 0000 - expected_desc = 'Unknown error' - expected_http = 400 - expected_ret = (expected_num, expected_desc) - - ret = error_codes.get(name) - self.assertEqual(expected_ret, ret) - - num = error_codes.get_num(name) - self.assertEqual(expected_num, num) - - desc = error_codes.get_desc(name) - self.assertEqual(expected_desc, desc) - - http = error_codes.get_http(name) - self.assertEqual(expected_http, http) diff --git a/congress/tests/api/test_library_policy_model.py b/congress/tests/api/test_library_policy_model.py deleted file mode 100644 index d5d62296..00000000 --- a/congress/tests/api/test_library_policy_model.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (c) 2017 VMware Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy - -from congress.api import webservice -from congress.db import db_library_policies -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestLibraryPolicyModel(base.SqlTestCase): - def setUp(self): - super(TestLibraryPolicyModel, self).setUp() - - services = api_base.setup_config() - self.library_policy_model = services['api']['api-library-policy'] - self.node = services['node'] - self.engine = services['engine'] - - # clear the library policies loaded on startup - db_library_policies.delete_policies() - - self._add_test_policy() - - def _add_test_policy(self): - test_policy = { - "name": "test_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) :- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - test_policy_id, obj = self.library_policy_model.add_item( - test_policy, {}) - test_policy["id"] = test_policy_id - - test_policy2 = { - "name": "test_policy2", - "description": "test policy2 description", - "kind": "nonrecursive", - "abbreviation": "abbr2", - "rules": [] - } - test_policy_id, obj = self.library_policy_model.add_item( - test_policy2, {}) - test_policy2["id"] = test_policy_id - - self.policy = test_policy - self.policy2 = test_policy2 - - self.policy_metadata = copy.deepcopy(test_policy) - self.policy2_metadata = copy.deepcopy(test_policy2) - del self.policy_metadata['rules'] - del self.policy2_metadata['rules'] - - def test_get_items(self): - ret = self.library_policy_model.get_items({}) - self.assertTrue(all(p in ret['results'] - for p in [self.policy, - self.policy2])) - - def test_get_item(self): - expected_ret = self.policy - ret = self.library_policy_model.get_item(self.policy["id"], {}) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_item(self): - self.assertRaises(KeyError, - self.library_policy_model.get_item, - 'invalid-id', {}) - - def test_add_item(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [] - } - expected_ret = copy.deepcopy(test) - del expected_ret['rules'] - - policy_id, policy_obj = self.library_policy_model.add_item(test, {}) - test['id'] = policy_id - self.assertEqual(test, policy_obj) - - def test_add_item_duplicate_name(self): - test = { - "name": "test_policy", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [] - } - # duplicate name allowed - self.assertRaises(KeyError, - self.library_policy_model.add_item, test, {}) - ret = self.library_policy_model.get_items({}) - self.assertEqual(len(ret['results']), 2) - - def test_add_item_with_id(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [] - } - - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, test, {}, 'id') - - def test_add_item_without_name(self): - test = { - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, test, {}) - - def test_add_item_with_long_abbreviation(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "123456", - "rules": [] - } - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, test, {}) - - def test_update_item_without_name(self): - test = { - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], test, {}) - - def test_update_item_with_long_abbreviation(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "123456", - "rules": [] - } - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], test, {}) - - def test_delete_item(self): - # delete non-existent policy - self.assertRaises(KeyError, self.library_policy_model.delete_item, - 'no_such_policy', {}) - - # delete existing policy - expected_ret = self.policy - policy_id = self.policy['id'] - - ret = self.library_policy_model.delete_item(policy_id, {}) - self.assertEqual(expected_ret, ret) - self.assertRaises(KeyError, - self.library_policy_model.get_item, - self.policy['id'], {}) - - def test_policy_api_model_error(self): - """Test the policy api model.""" - - # policy without name - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, - {'rules': []}, {}) - - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], {'rules': []}, {}) - - # policy with bad name - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, - {'name': '7*7', 'rules': []}, {}) - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], {'name': '7*7', 'rules': []}, {}) - - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, - {'name': 'p(x) :- q(x)'}, {}) - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], {'name': 'p(x) :- q(x)'}, {}) - - # policy with invalid 'kind' - self.assertRaises(webservice.DataModelException, - self.library_policy_model.add_item, - {'kind': 'nonexistent', 'name': 'alice', - 'rules': []}, {}) - self.assertRaises(webservice.DataModelException, - self.library_policy_model.update_item, - self.policy['id'], - {'kind': 'nonexistent', 'name': 'alice', - 'rules': []}, {}) - - def test_update_item(self): - replacement_policy = { - "name": "new_name", - "description": "new test policy2 description", - "kind": "nonrecursive", - "abbreviation": "newab", - "rules": [{"rule": "r(x) :- c(x)", "comment": "test comment", - "name": "test name"}] - } - - # update non-existent item - self.assertRaises(KeyError, - self.library_policy_model.update_item, 'no_such_id', - replacement_policy, {}, {}) - - # update existing item - self.library_policy_model.update_item( - self.policy2['id'], replacement_policy, {}, {}) - - replacement_policy_w_id = copy.deepcopy(replacement_policy) - replacement_policy_w_id['id'] = self.policy2['id'] - - ret = self.library_policy_model.get_items({}) - self.assertEqual(len(ret['results']), 2) - self.assertTrue(all(p in ret['results'] - for p in [self.policy, - replacement_policy_w_id])) diff --git a/congress/tests/api/test_policy_model.py b/congress/tests/api/test_policy_model.py deleted file mode 100644 index 35047cf3..00000000 --- a/congress/tests/api/test_policy_model.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import mock -from oslo_utils import uuidutils - -from congress.api import error_codes -from congress.api import webservice -from congress.datalog import compile -from congress.tests.api import base as api_base -from congress.tests import base -from congress.tests import helper - - -class TestPolicyModel(base.SqlTestCase): - def setUp(self): - super(TestPolicyModel, self).setUp() - - services = api_base.setup_config() - self.policy_model = services['api']['api-policy'] - self.rule_api = services['api']['api-rule'] - self.node = services['node'] - self.engine = services['engine'] - self.initial_policies = set(self.engine.policy_names()) - self._add_test_policy() - - def _add_test_policy(self): - test_policy = { - "name": "test_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - test_policy_id, obj = self.policy_model.add_item(test_policy, {}) - test_policy["id"] = test_policy_id - test_policy["owner_id"] = obj["owner_id"] - - test_policy2 = { - "name": "test_policy2", - "description": "test policy2 description", - "kind": "nonrecursive", - "abbreviation": "abbr2" - } - test_policy_id, obj = self.policy_model.add_item(test_policy2, {}) - test_policy2["id"] = test_policy_id - test_policy2["owner_id"] = obj["owner_id"] - - self.policy = test_policy - self.policy2 = test_policy2 - - action_policy = self.policy_model.get_item('action', {}) - self.action_policy = action_policy - - def test_in_mem_and_db_policies(self): - ret = self.policy_model.get_items({}) - db = [p['name'] for p in ret['results']] - mem = self.engine.policy_names() - new_memory = set(mem) - self.initial_policies - new_db = set(db) - self.initial_policies - self.assertEqual(new_memory, new_db) - - def test_get_items(self): - ret = self.policy_model.get_items({}) - self.assertTrue(all(p in ret['results'] - for p in [self.policy, self.policy2])) - - def test_get_item(self): - expected_ret = self.policy - ret = self.policy_model.get_item(self.policy["id"], {}) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_item(self): - self.assertRaisesRegex(webservice.DataModelException, - '^Not Found', - self.policy_model.get_item, - 'invalid-id', {}) - - @mock.patch('oslo_utils.uuidutils.generate_uuid') - def test_add_item(self, patched_gen_uuid): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - patched_gen_uuid.return_value = 'uuid' - uuidutils.generate_uuid = mock.Mock() - uuidutils.generate_uuid.return_value = 'uuid' - expected_ret1 = 'uuid' - expected_ret2 = { - 'id': 'uuid', - 'name': test['name'], - 'owner_id': 'user', - 'description': test['description'], - 'abbreviation': test['abbreviation'], - 'kind': test['kind'] - } - - policy_id, policy_obj = self.policy_model.add_item(test, {}) - self.assertEqual(expected_ret1, policy_id) - self.assertEqual(expected_ret2, policy_obj) - - def test_add_item_with_rules(self): - res = self.policy_model.get_items({})['results'] - self.assertEqual(len(res), 4) # built-in-and-setup - - def adjust_for_comparison(rules): - # compile rule string into rule object - # replace dict with tuple for sorting - # 'id' field implicitly dropped if present - rules = [(compile.parse1(rule['rule']), rule['name'], - rule['comment']) for rule in rules] - - # sort lists for comparison - return sorted(rules) - - test_policy = { - "name": "test_rules_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) :- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - - add_policy_id, add_policy_obj = self.policy_model.add_item( - test_policy, {}) - - test_policy['id'] = add_policy_id - - # adjust for comparison - test_policy['owner_id'] = 'user' - test_policy['rules'] = adjust_for_comparison(test_policy['rules']) - - add_policy_obj['rules'] = adjust_for_comparison( - add_policy_obj['rules']) - - self.assertEqual(add_policy_obj, test_policy) - - res = self.policy_model.get_items({})['results'] - del test_policy['rules'] - self.assertIn(test_policy, res) - - res = self.policy_model.get_items({})['results'] - self.assertEqual(len(res), 5) - - # failure by duplicate policy name - duplicate_name_policy = copy.deepcopy(test_policy) - duplicate_name_policy['description'] = 'diff description' - duplicate_name_policy['abbreviation'] = 'diff' - duplicate_name_policy['rules'] = [] - - self.assertRaises( - KeyError, self.policy_model.add_item, duplicate_name_policy, {}) - - res = self.policy_model.get_items({})['results'] - self.assertEqual(len(res), 5) - - def test_add_item_with_bad_rules(self): - res = self.policy_model.get_items({})['results'] - self.assertEqual(len(res), 4) # two built-in and two setup policies - - test_policy = { - "name": "test_rules_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) ====:- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, test_policy, {}) - - res = self.policy_model.get_items({})['results'] - self.assertEqual(len(res), 4) # unchanged - - def test_add_item_with_id(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, test, {}, 'id') - - def test_add_item_without_name(self): - test = { - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "abbr" - } - - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, test, {}) - - def test_add_item_with_long_abbreviation(self): - test = { - "name": "test", - "description": "test description", - "kind": "nonrecursive", - "abbreviation": "123456" - } - try: - self.policy_model.add_item(test, {}) - self.fail("DataModelException should been raised.") - except webservice.DataModelException as e: - error_key = 'policy_abbreviation_error' - self.assertEqual(error_codes.get_num(error_key), e.error_code) - self.assertEqual(error_codes.get_desc(error_key), e.description) - self.assertEqual(error_codes.get_http(error_key), - e.http_status_code) - - def test_delete_item(self): - expected_ret = self.policy - policy_id = self.policy['id'] - - ret = self.policy_model.delete_item(policy_id, {}) - self.assertEqual(expected_ret, ret) - self.assertRaisesRegex(webservice.DataModelException, - '^Not Found', - self.policy_model.get_item, - self.policy['id'], {}) - - # check that deleting the policy also deletes the rules - self.assertRaises(webservice.DataModelException, - self.rule_api.get_items, - {}, {'policy_id': policy_id}) - - def test_simulate_action(self): - context = { - 'policy_id': self.action_policy['name'] - } - action_rule1 = { - 'rule': 'action("q")', - } - action_rule2 = { - 'rule': 'p+(x):- q(x)' - } - self.rule_api.add_item(action_rule1, {}, context=context) - self.rule_api.add_item(action_rule2, {}, context=context) - - request_body = { - 'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)' - } - request = helper.FakeRequest(request_body) - expected_ret = { - 'result': [ - "p(1)" - ] - } - - ret = self.policy_model.simulate_action({}, context, request) - self.assertEqual(expected_ret, ret) - - def test_simulate_with_delta(self): - context = { - 'policy_id': self.action_policy['name'] - } - action_rule1 = { - 'rule': 'action("q")', - } - action_rule2 = { - 'rule': 'p+(x):- q(x)' - } - self.rule_api.add_item(action_rule1, {}, context=context) - self.rule_api.add_item(action_rule2, {}, context=context) - - request_body = { - 'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)' - } - request = helper.FakeRequest(request_body) - params = { - 'delta': 'true' - } - expected_ret = { - 'result': [ - "p+(1)" - ] - } - - ret = self.policy_model.simulate_action(params, context, request) - self.assertEqual(expected_ret, ret) - - def test_simulate_with_trace(self): - context = { - 'policy_id': self.action_policy['name'] - } - action_rule1 = { - 'rule': 'action("q")', - } - action_rule2 = { - 'rule': 'p+(x):- q(x)' - } - self.rule_api.add_item(action_rule1, {}, context=context) - self.rule_api.add_item(action_rule2, {}, context=context) - - request_body = { - 'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)' - } - request = helper.FakeRequest(request_body) - params = { - 'trace': 'true' - } - expected_ret = { - 'result': [ - "p(1)" - ], - 'trace': "trace strings" - } - - ret = self.policy_model.simulate_action(params, context, request) - # check response's keys equal expected_ret's key - self.assertTrue(all(key in expected_ret.keys() for key in ret.keys())) - self.assertEqual(expected_ret['result'], ret['result']) - self.assertGreater(len(ret['trace']), 10) - - def test_simulate_with_delta_and_trace(self): - context = { - 'policy_id': self.action_policy['name'] - } - action_rule1 = { - 'rule': 'action("q")', - } - action_rule2 = { - 'rule': 'p+(x):- q(x)' - } - self.rule_api.add_item(action_rule1, {}, context=context) - self.rule_api.add_item(action_rule2, {}, context=context) - - request_body = { - 'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)' - } - request = helper.FakeRequest(request_body) - params = { - 'trace': 'true', - 'delta': 'true' - } - expected_ret = { - 'result': [ - "p+(1)" - ], - 'trace': "trace strings" - } - - ret = self.policy_model.simulate_action(params, context, request) - # check response's keys equal expected_ret's key - self.assertTrue(all(key in expected_ret.keys() for key in ret.keys())) - self.assertEqual(expected_ret['result'], ret['result']) - self.assertGreater(len(ret['trace']), 10) - - def test_simulate_invalid_policy(self): - context = { - 'policy_id': 'invalid-policy' - } - request_body = { - 'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)' - } - request = helper.FakeRequest(request_body) - - self.assertRaises(webservice.DataModelException, - self.policy_model.simulate_action, - {}, context, request) - - def test_simulate_invalid_sequence(self): - context = { - 'policy_id': self.action_policy['name'] - } - action_rule = { - 'rule': 'w(x):-z(x)', - } - self.rule_api.add_item(action_rule, {}, context=context) - - request_body = { - 'query': 'w(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'z(1)' - } - request = helper.FakeRequest(request_body) - - self.assertRaises(webservice.DataModelException, - self.policy_model.simulate_action, - {}, context, request) - - def test_simulate_policy_errors(self): - def check_err(params, context, request, emsg): - try: - self.policy_model.simulate_action(params, context, request) - self.assertFail() - except webservice.DataModelException as e: - self.assertIn(emsg, str(e)) - - context = { - 'policy_id': self.action_policy['name'] - } - - # Missing query - body = {'action_policy': self.action_policy['name'], - 'sequence': 'q(1)'} - check_err({}, context, helper.FakeRequest(body), - 'Simulate requires parameters') - - # Invalid query - body = {'query': 'p(x', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)'} - check_err({}, context, helper.FakeRequest(body), - 'Parse failure') - - # Multiple querys - body = {'query': 'p(x) q(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1)'} - check_err({}, context, helper.FakeRequest(body), - 'more than 1 rule') - - # Missing action_policy - body = {'query': 'p(x)', - 'sequence': 'q(1)'} - check_err({}, context, helper.FakeRequest(body), - 'Simulate requires parameters') - - # Missing sequence - body = {'query': 'p(x)', - 'action_policy': self.action_policy['name']} - check_err({}, context, helper.FakeRequest(body), - 'Simulate requires parameters') - - # Syntactically invalid sequence - body = {'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'q(1'} - check_err({}, context, helper.FakeRequest(body), - 'Parse failure') - - # Semantically invalid sequence - body = {'query': 'p(x)', - 'action_policy': self.action_policy['name'], - 'sequence': 'r(1)'} # r is not an action - check_err({}, context, helper.FakeRequest(body), - 'non-action, non-update') - - def test_policy_api_model_error(self): - """Test the policy api model.""" - - # add policy without name - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, {}, {}) - - # add policy with bad ID - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, {'name': '7*7'}, {}) - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, - {'name': 'p(x) :- q(x)'}, {}) - - # add policy with invalid 'kind' - self.assertRaises(webservice.DataModelException, - self.policy_model.add_item, - {'kind': 'nonexistent', 'name': 'alice'}, {}) - - # add existing policy - self.policy_model.add_item({'name': 'Test1'}, {}) - self.assertRaises(KeyError, self.policy_model.add_item, - {'name': 'Test1'}, {}) - - # delete non-existent policy - self.assertRaises(KeyError, self.policy_model.delete_item, - 'noexist', {}) - - # delete system-maintained policy - policies = self.policy_model.get_items({})['results'] - class_policy = [p for p in policies if p['name'] == 'classification'] - class_policy = class_policy[0] - self.assertRaises(KeyError, self.policy_model.delete_item, - class_policy['id'], {}) diff --git a/congress/tests/api/test_row_model.py b/congress/tests/api/test_row_model.py deleted file mode 100644 index 2edc0d3b..00000000 --- a/congress/tests/api/test_row_model.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestRowModel(base.SqlTestCase): - - def setUp(self): - super(TestRowModel, self).setUp() - services = api_base.setup_config() - self.policy_model = services['api']['api-policy'] - self.rule_model = services['api']['api-rule'] - self.row_model = services['api']['api-row'] - self.node = services['node'] - self.data = services['data'] - - def test_get_items_datasource_row(self): - # adjust datasource to have required value - row = ('data1', 'data2') - self.data.state['fake_table'] = set([row]) - - # check result - context = {'ds_id': self.data.service_id, - 'table_id': 'fake_table'} - data = [{'data': row}] - expected_ret = {'results': data} - ret = self.row_model.get_items({}, context) - self.assertEqual(expected_ret, ret) - - def test_get_items_invalid_ds_name(self): - context = {'ds_id': 'invalid_ds', - 'table_id': 'fake_table'} - self.assertRaises(webservice.DataModelException, - self.row_model.get_items, {}, context) - - def test_get_items_invalid_ds_table_name(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'invalid_table'} - self.assertRaises(webservice.DataModelException, - self.row_model.get_items, {}, context) - - def test_get_items_policy_row(self): - # create policy - policyname = 'test_policy' - self.policy_model.add_item({"name": policyname}, {}) - - # insert rules - context = {'policy_id': policyname, - 'table_id': 'p'} - self.rule_model.add_item({'rule': 'p("x"):- true'}, {}, - context=context) - - # check results - row = ('x',) - data = [{'data': row}] - ret = self.row_model.get_items({}, context) - self.assertEqual({'results': data}, ret) - - # Enable trace and check - ret = self.row_model.get_items({'trace': 'true'}, context=context) - s = frozenset([tuple(x['data']) for x in ret['results']]) - t = frozenset([('x',)]) - self.assertEqual(s, t, "Rows with tracing") - self.assertIn('trace', ret, "Rows should have trace") - self.assertEqual(len(ret['trace'].split('\n')), 9) - - def test_get_items_invalid_policy_name(self): - context = {'policy_id': 'invalid_policy', - 'table_id': 'p'} - - self.assertRaises(webservice.DataModelException, - self.row_model.get_items, {}, context) - - def test_get_items_invalid_policy_table_name(self): - # create policy - policyname = 'test_policy' - self.policy_model.add_item({"name": policyname}, {}) - - context = {'policy_id': policyname, - 'table_id': 'invalid_table'} - - self.assertRaises(webservice.DataModelException, - self.row_model.get_items, {}, context) - - def test_update_items(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'fake_table'} - objs = [ - {"id": 'id-1', "name": 'name-1'}, - {"id": 'id-2', "name": 'name-2'} - ] - expected_state = (('id-1', 'name-1'), ('id-2', 'name-2')) - - self.row_model.update_items(objs, {}, context=context) - table_row = self.data.state['fake_table'] - - self.assertEqual(len(expected_state), len(table_row)) - for row in expected_state: - self.assertIn(row, table_row) - - def test_update_items_invalid_table(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'invalid_table'} - objs = [ - {"id": 'id-1', "name": 'name-1'}, - {"id": 'id-2', "name": 'name-2'} - ] - self.assertRaises(webservice.DataModelException, - self.row_model.update_items, objs, {}, context) diff --git a/congress/tests/api/test_rule_model.py b/congress/tests/api/test_rule_model.py deleted file mode 100644 index 6de8ea8c..00000000 --- a/congress/tests/api/test_rule_model.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.api import rule_model -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestRuleModel(base.SqlTestCase): - def setUp(self): - super(TestRuleModel, self).setUp() - - services = api_base.setup_config() - self.policy_model = services['api']['api-policy'] - self.rule_model = services['api']['api-rule'] - self.node = services['node'] - - self.action_policy = self.policy_model.get_item('action', {}) - self.context = {'policy_id': self.action_policy["name"]} - self._add_test_rule() - - def _add_test_rule(self): - test_rule1 = { - "rule": "p(x) :- q(x)", - "name": "test-rule1", - "comment": "test-comment" - } - test_rule2 = { - "rule": 'p(x) :- q(x), not r(x)', - "name": "test-rule2", - "comment": "test-comment-2" - } - test_rule_id, obj = self.rule_model.add_item(test_rule1, {}, - context=self.context) - test_rule1["id"] = test_rule_id - self.rule1 = test_rule1 - - test_rule_id, obj = self.rule_model.add_item(test_rule2, {}, - context=self.context) - test_rule2["id"] = test_rule_id - self.rule2 = test_rule2 - - @mock.patch.object(rule_model.RuleModel, 'policy_name') - def test_add_rule_with_invalid_policy(self, policy_name_mock): - test_rule = {'rule': 'p()', 'name': 'test'} - policy_name_mock.return_value = 'invalid' - self.assertRaises(webservice.DataModelException, - self.rule_model.add_item, - test_rule, {}) - - # TODO(dse2): Fix this test; it must create a 'beta' service on the dse - # so that when it subscribes, the snapshot can be returned. - # Or fix the subscribe() implementation so that we can subscribe before - # the service has been created. - # def test_add_rule_with_colrefs(self): - # engine = self.engine - # engine.create_policy('beta', kind=datalogbase.DATASOURCE_POLICY_TYPE) - # engine.set_schema( - # 'beta', compile.Schema({'q': ("name", "status", "year")})) - # # insert/retrieve rule with column references - # # just testing that no errors are thrown--correctness elsewhere - # # Assuming that api-models are pass-throughs to functionality - # (id1, _) = self.rule_model.add_item( - # {'rule': 'p(x) :- beta:q(name=x)'}, - # {}, context=self.context) - # self.rule_model.get_item(id1, {}, context=self.context) - - # def test_add_rule_with_bad_colrefs(self): - # engine = self.engine - # engine.create_policy('beta') # not datasource policy - # # insert/retrieve rule with column references - # # just testing that no errors are thrown--correctness elsewhere - # # Assuming that api-models are pass-throughs to functionality - # self.assertRaises( - # webservice.DataModelException, - # self.rule_model.add_item, - # {'rule': 'p(x) :- beta:q(name=x)'}, - # {}, context=self.context) - - def test_add_rule_with_cross_policy_table(self): - test_rule = { - "rule": "p(x) :- classification:q(x)", - "name": "test-rule-cross", - "comment": "test-comment" - } - test_rule_id, obj = self.rule_model.add_item(test_rule, {}, - context=self.context) - test_rule['id'] = test_rule_id - ret = self.rule_model.get_item(test_rule_id, {}, - context=self.context) - self.assertEqual(test_rule, ret) - - def test_get_items(self): - ret = self.rule_model.get_items({}, context=self.context) - self.assertTrue(all(p in ret['results'] - for p in [self.rule1, self.rule2])) - - def test_get_item(self): - expected_ret = self.rule1 - ret = self.rule_model.get_item(self.rule1["id"], {}, - context=self.context) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_item(self): - expected_ret = None - ret = self.rule_model.get_item('invalid-id', {}, context=self.context) - self.assertEqual(expected_ret, ret) - - def test_delete_item(self): - expected_ret = self.rule1 - - ret = self.rule_model.delete_item(self.rule1['id'], {}, - context=self.context) - self.assertEqual(expected_ret, ret) - - expected_ret = None - ret = self.rule_model.get_item(self.rule1['id'], {}, - context=self.context) - self.assertEqual(expected_ret, ret) - - def test_rule_api_model_errors(self): - """Test syntax errors. - - Test that syntax errors thrown by the policy runtime - are returned properly to the user so they can see the - error messages. - """ - # lexer error - with self.assertRaisesRegex( - webservice.DataModelException, - "Lex failure"): - self.rule_model.add_item({'rule': 'p#'}, {}, - context=self.context) - - # parser error - with self.assertRaisesRegex( - webservice.DataModelException, - "Parse failure"): - self.rule_model.add_item({'rule': 'p('}, {}, - context=self.context) - - # single-rule error: safety in the head - with self.assertRaisesRegex( - webservice.DataModelException, - "Variable x found in head but not in body"): - # TODO(ramineni):check for action - self.context = {'policy_id': 'classification'} - self.rule_model.add_item({'rule': 'p(x,y) :- q(y)'}, {}, - context=self.context) - - # multi-rule error: recursion through negation - self.rule_model.add_item({'rule': 'p(x) :- q(x), not r(x)'}, {}, - context=self.context) - with self.assertRaisesRegex( - webservice.DataModelException, - "Rules are recursive"): - self.rule_model.add_item({'rule': 'r(x) :- q(x), not p(x)'}, {}, - context=self.context) - - self.rule_model.add_item({'rule': 'p1(x) :- q1(x)'}, {}, - context=self.context) - # duplicate rules - with self.assertRaisesRegex( - webservice.DataModelException, - "Rule already exists"): - self.rule_model.add_item({'rule': 'p1(x) :- q1(x)'}, {}, - context=self.context) diff --git a/congress/tests/api/test_schema_model.py b/congress/tests/api/test_schema_model.py deleted file mode 100644 index 4fecf959..00000000 --- a/congress/tests/api/test_schema_model.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import api_utils -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestSchemaModel(base.TestCase): - def setUp(self): - super(TestSchemaModel, self).setUp() - services = api_base.setup_config() - self.schema_model = services['api']['api-schema'] - self.data = services['data'] - - def test_get_item_all_table(self): - context = {'ds_id': self.data.service_id} - schema = self.data.get_schema() - fake_tables = {'tables': - [api_utils.create_table_dict( - table_, schema) for table_ in schema]} - tables = self.schema_model.get_item(None, {}, context=context) - self.assertEqual(fake_tables, tables) - - def test_get_item_table(self): - context = {'ds_id': self.data.service_id, 'table_id': 'fake_table'} - fake_schema = self.data.get_schema() - fake_table = api_utils.create_table_dict( - "fake_table", fake_schema) - table = self.schema_model.get_item(None, {}, context=context) - self.assertEqual(fake_table, table) - - def test_get_invalid_datasource_table(self): - context = {'ds_id': self.data.service_id, 'table_id': 'invalid_table'} - try: - self.schema_model.get_item(None, {}, context=context) - except webservice.DataModelException as e: - self.assertEqual(404, e.error_code) - else: - raise Exception("Should not get here") - - def test_get_invalid_datasource(self): - context = {'ds_id': 'invalid'} - try: - self.schema_model.get_item(None, {}, context=context) - except webservice.DataModelException as e: - self.assertEqual(404, e.error_code) - else: - raise Exception("Should not get here") diff --git a/congress/tests/api/test_status_model.py b/congress/tests/api/test_status_model.py deleted file mode 100644 index f912ebb3..00000000 --- a/congress/tests/api/test_status_model.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2016 NTT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_utils import uuidutils - -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestStatusModel(base.SqlTestCase): - def setUp(self): - super(TestStatusModel, self).setUp() - services = api_base.setup_config() - self.policy_model = services['api']['api-policy'] - self.rule_model = services['api']['api-rule'] - self.status_model = services['api']['api-status'] - self.node = services['node'] - self.datasource = services['data'] - - def test_get_datasource_status(self): - context = {'ds_id': self.datasource.service_id} - status = self.status_model.get_item(None, {}, context=context) - expected_status_keys = ['last_updated', 'subscriptions', - 'last_error', 'subscribers', - 'initialized', 'number_of_updates'] - self.assertEqual(set(expected_status_keys), set(status.keys())) - - def test_get_invalid_datasource_status(self): - context = {'ds_id': 'invalid_id'} - self.assertRaises(webservice.DataModelException, - self.status_model.get_item, None, {}, - context=context) - - def test_policy_id_status(self): - result = self.policy_model.add_item({'name': 'test_policy'}, {}) - - context = {'policy_id': result[0]} - status = self.status_model.get_item(None, {}, context=context) - expected_status = {'name': 'test_policy', - 'id': result[0]} - self.assertEqual(expected_status, status) - - # test with policy_name - context = {'policy_id': result[1]['name']} - status = self.status_model.get_item(None, {}, context=context) - self.assertEqual(expected_status, status) - - def test_invalid_policy_id_status(self): - invalid_id = uuidutils.generate_uuid() - context = {'policy_id': invalid_id} - self.assertRaises(webservice.DataModelException, - self.status_model.get_item, None, {}, - context=context) - - def test_rule_status_policy_id(self): - result = self.policy_model.add_item({'name': 'test_policy'}, {}) - policy_id = result[0] - policy_name = result[1]['name'] - - result = self.rule_model.add_item({'name': 'test_rule', - 'rule': 'p(x) :- q(x)'}, {}, - context={'policy_id': 'test_policy'}) - - context = {'policy_id': policy_id, 'rule_id': result[0]} - status = self.status_model.get_item(None, {}, context=context) - expected_status = {'name': 'test_rule', - 'id': result[0], - 'comment': '', - 'original_str': 'p(x) :- q(x)'} - self.assertEqual(expected_status, status) - - # test with policy_name - context = {'policy_id': policy_name, 'rule_id': result[0]} - status = self.status_model.get_item(None, {}, context=context) - expected_status = {'name': 'test_rule', - 'id': result[0], - 'comment': '', - 'original_str': 'p(x) :- q(x)'} - self.assertEqual(expected_status, status) - - def test_rule_status_invalid_rule_policy_id(self): - result = self.policy_model.add_item({'name': 'test_policy'}, {}) - policy_id = result[0] - invalid_rule = uuidutils.generate_uuid() - - context = {'policy_id': policy_id, 'rule_id': invalid_rule} - self.assertRaises(webservice.DataModelException, - self.status_model.get_item, None, {}, - context=context) - - def test_rule_status_invalid_policy_id(self): - invalid_policy = uuidutils.generate_uuid() - invalid_rule = uuidutils.generate_uuid() - - context = {'policy_id': invalid_policy, 'rule_id': invalid_rule} - self.assertRaises(webservice.DataModelException, - self.status_model.get_item, None, {}, - context=context) diff --git a/congress/tests/api/test_table_model.py b/congress/tests/api/test_table_model.py deleted file mode 100644 index f4e7045d..00000000 --- a/congress/tests/api/test_table_model.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.api import webservice -from congress.tests.api import base as api_base -from congress.tests import base - - -class TestTableModel(base.SqlTestCase): - def setUp(self): - super(TestTableModel, self).setUp() - services = api_base.setup_config() - self.policy_model = services['api']['api-policy'] - self.table_model = services['api']['api-table'] - self.api_rule = services['api']['api-rule'] - self.node = services['node'] - self.engine = services['engine'] - self.data = services['data'] - # create test policy - self._create_test_policy() - - def tearDown(self): - self.node.stop() - super(TestTableModel, self).tearDown() - - def _create_test_policy(self): - # create policy - self.policy_model.add_item({"name": 'test_policy'}, {}) - - def test_get_datasource_table_with_id(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'fake_table'} - expected_ret = {'id': 'fake_table'} - ret = self.table_model.get_item('fake_table', {}, context) - self.assertEqual(expected_ret, ret) - - def test_get_datasource_table_with_name(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'fake_table'} - expected_ret = {'id': 'fake_table'} - ret = self.table_model.get_item('fake_table', {}, context) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_datasource(self): - context = {'ds_id': 'invalid-id', - 'table_id': 'fake_table'} - self.assertRaises(webservice.DataModelException, - self.table_model.get_item, 'fake_table', - {}, context) - - def test_get_invalid_datasource_table(self): - context = {'ds_id': self.data.service_id, - 'table_id': 'invalid-table'} - expected_ret = None - ret = self.table_model.get_item('invalid-table', {}, context) - self.assertEqual(expected_ret, ret) - - def test_get_policy_table(self): - context = {'policy_id': 'test_policy', - 'table_id': 'p'} - expected_ret = {'id': 'p'} - - self.api_rule.add_item({'rule': 'p(x) :- q(x)'}, {}, context=context) - self.api_rule.add_item({'rule': 'q(x) :- r(x)'}, {}, context=context) - - ret = self.table_model.get_item('p', {}, context) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_policy(self): - context = {'policy_id': 'test_policy', - 'table_id': 'fake_table'} - invalid_context = {'policy_id': 'invalid-policy', - 'table_id': 'fake_table'} - expected_ret = None - - self.api_rule.add_item({'rule': 'p(x) :- q(x)'}, {}, context=context) - self.api_rule.add_item({'rule': 'q(x) :- r(x)'}, {}, context=context) - - ret = self.table_model.get_item('test_policy', - {}, invalid_context) - self.assertEqual(expected_ret, ret) - - def test_get_invalid_policy_table(self): - context = {'policy_id': 'test_policy', - 'table_id': 'fake_table'} - invalid_context = {'policy_id': 'test_policy', - 'table_id': 'invalid_name'} - expected_ret = None - - self.api_rule.add_item({'rule': 'p(x) :- q(x)'}, {}, context=context) - self.api_rule.add_item({'rule': 'q(x) :- r(x)'}, {}, context=context) - - ret = self.table_model.get_item('test_policy', {}, - invalid_context) - self.assertEqual(expected_ret, ret) - - def test_get_items_datasource_table(self): - context = {'ds_id': self.data.service_id} - expected_ret = {'results': [{'id': 'fake_table'}]} - - ret = self.table_model.get_items({}, context) - self.assertEqual(expected_ret, ret) - - def test_get_items_invalid_datasource(self): - context = {'ds_id': 'invalid-id', - 'table_id': 'fake_table'} - - self.assertRaises(webservice.DataModelException, - self.table_model.get_items, {}, context) - - def _get_id_list_from_return(self, result): - return [r['id'] for r in result['results']] - - def test_get_items_policy_table(self): - context = {'policy_id': 'test_policy'} - expected_ret = {'results': [{'id': x} for x in ['q', 'p', 'r']]} - - self.api_rule.add_item({'rule': 'p(x) :- q(x)'}, {}, context=context) - self.api_rule.add_item({'rule': 'q(x) :- r(x)'}, {}, context=context) - - ret = self.table_model.get_items({}, context) - self.assertEqual(set(self._get_id_list_from_return(expected_ret)), - set(self._get_id_list_from_return(ret))) - - def test_get_items_invalid_policy(self): - context = {'policy_id': 'test_policy'} - invalid_context = {'policy_id': 'invalid_policy'} - expected_ret = None - - self.api_rule.add_item({'rule': 'p(x) :- q(x)'}, {}, context=context) - self.api_rule.add_item({'rule': 'q(x) :- r(x)'}, {}, context=context) - - ret = self.table_model.get_items({}, invalid_context) - self.assertEqual(expected_ret, ret) diff --git a/congress/tests/api/test_versions.py b/congress/tests/api/test_versions.py deleted file mode 100644 index e8895ae9..00000000 --- a/congress/tests/api/test_versions.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) 2015 Huawei, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from six.moves import http_client -import webob - -from congress.tests import base -from congress.tests import fake_wsgi - - -class TestVersions(base.TestCase): - - def setUp(self): - super(TestVersions, self).setUp() - - def test_versions_list(self): - req = webob.Request.blank('/') - req.accept = "application/json" - res = req.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("application/json", res.content_type) - versions = json.loads(res.body.decode('utf-8')) - expected = { - "versions": [{ - "status": "CURRENT", - "updated": "2013-08-12T17:42:13Z", - "id": "v1", - "links": [{ - "href": "http://localhost/v1/", - "rel": "self" - }] - }] - } - self.assertEqual(expected, versions) - - def test_versions_choices(self): - req = webob.Request.blank('/fake') - req.accept = "application/json" - res = req.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.MULTIPLE_CHOICES, res.status_int) - self.assertEqual("application/json", res.content_type) - versions = json.loads(res.body.decode('utf-8')) - expected = { - "choices": [{ - "status": "CURRENT", - "updated": "2013-08-12T17:42:13Z", - "id": "v1", - "links": [{ - "href": "http://localhost/v1/fake", - "rel": "self" - }] - }] - } - self.assertEqual(expected, versions) - - def test_version_v1_show(self): - req = webob.Request.blank('/v1') - req.accept = "application/json" - res = req.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("application/json", res.content_type) - versions = json.loads(res.body.decode('utf-8')) - expected = { - "version": { - "status": "CURRENT", - "updated": "2013-08-12T17:42:13Z", - "id": "v1", - "links": [{ - "href": "http://localhost/v1/", - "rel": "self" - }, { - "rel": "describedby", - "type": "text/html", - "href": "http://congress.readthedocs.org/", - }] - } - } - self.assertEqual(expected, versions) - - def test_version_v1_multiple_path(self): - req = webob.Request.blank('/v1') - req.accept = "application/json" - res = req.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("application/json", res.content_type) - - req_ = webob.Request.blank('/v1/') - req_.accept = "application/json" - res_ = req_.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.OK, res_.status_int) - self.assertEqual("application/json", res_.content_type) - - self.assertEqual(json.loads(res.body.decode('utf-8')), - json.loads(res_.body.decode('utf-8'))) - - def test_version_v1_not_found(self): - req = webob.Request.blank('/v1/fake') - req.accept = "application/json" - res = req.get_response(fake_wsgi.wsgi_app()) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual("application/json", res.content_type) diff --git a/congress/tests/api/test_webservice.py b/congress/tests/api/test_webservice.py deleted file mode 100644 index a9a4e42a..00000000 --- a/congress/tests/api/test_webservice.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -try: - # For Python 3 - import http.client as httplib -except ImportError: - import httplib -import json - -import mock -import webob - -from oslo_utils import uuidutils - -from congress.api import webservice -from congress.tests import base - - -class TestSimpleDataModel(base.TestCase): - # if random ID matches, go to Vegas or file a uuid library bug - UNADDED_ID = uuidutils.generate_uuid() - CONTEXTS = [None, {'a': 'ctxt1'}, {'b': 'ctxt2', 'c': 'ctxt3'}] - - def test_get_items(self): - """Test API DataModel get_items functionality.""" - model = webservice.SimpleDataModel("test") - for context in self.CONTEXTS: - ret = model.get_items({}, context=context) - self.assertEqual( - ret.keys(), {'results': None}.keys(), - "get_items returns dict with single 'results' key") - self.assertEqual( - tuple(ret['results']), tuple(), - "get_items of empty model returns empty results list") - items = [{"i1": "%s/foo" % context}, {"i2": "%s/bar" % context}] - for item in items: - model.add_item(item, {}, context=context) - ret2 = model.get_items({}, context=context) - self.assertEqual(sorted(list(ret2['results']), - key=(lambda x: str(type(x)) + repr(x))), - sorted(items, - key=(lambda x: str(type(x)) + repr(x))), - "get_items() returns all items added to model") - - def test_add_item(self): - """Test API DataModel add_item functionality.""" - model = webservice.SimpleDataModel("test") - assigned_ids = set() - for context in self.CONTEXTS: - items = ["%s/foo" % context, "%s/bar" % context] - ret = model.add_item(items[0], {}, context=context) - self.assertIsInstance(ret, tuple, "add_item returns a tuple") - self.assertEqual(len(ret), 2, - "add_item returns tuple of length 2") - self.assertNotIn(ret[0], assigned_ids, - "add_item assigned unique ID") - assigned_ids.add(ret[0]) - self.assertEqual(ret[1], items[0], "add_item returned added item") - - ret = model.add_item(items[1], {}, 'myid', context=context) - self.assertEqual(ret[0], 'myid', - "add_item returned provided ID") - self.assertEqual(ret[1], items[1], "add_item returned added item") - - def test_get_item(self): - """Test API DataModel get_item functionality.""" - model = webservice.SimpleDataModel("test") - for context in self.CONTEXTS: - items = ["%s/foo" % context, "%s/bar" % context] - id_ = model.add_item(items[0], {}, context=context)[0] - ret = model.get_item(id_, {}, context=context) - self.assertEqual(ret, items[0], - "get_item(assigned_id) returns proper item") - - id_ = 'myid' - ret = model.get_item(id_, {}, context=context) - self.assertIsNone(ret, - "get_item(unadded_provided_id) returns None") - model.add_item(items[1], {}, id_, context=context) - ret = model.get_item(id_, {}, context=context) - self.assertEqual(ret, items[1], - "get_item(provided_id) returned added item") - - ret = model.get_item(self.UNADDED_ID, {}, context=context) - self.assertIsNone(ret, "get_item(unadded_id) returns None") - - def test_update_item(self): - """Test API DataModel update_item functionality.""" - model = webservice.SimpleDataModel("test") - for context in self.CONTEXTS: - items = ["%s/foo%d" % (context, i) for i in [0, 1, 2]] - id_, item = model.add_item(items[0], {}, context=context) - self.assertNotEqual(item, items[1], "item not yet updated") - ret = model.update_item(id_, items[1], {}, context=context) - self.assertEqual(ret, items[1], - "update_item returned updated item") - ret = model.get_item(id_, {}, context=context) - self.assertEqual(ret, items[1], - "get_item(updated_item_id) returns updated item") - - self.assertNotEqual(item, items[2], "item not yet reupdated") - ret = model.update_item(id_, items[2], {}, context=context) - self.assertEqual(ret, items[2], - "update_item returned reupdated item") - ret = model.get_item(id_, {}, context=context) - self.assertEqual( - ret, items[2], - "get_item(reupdated_item_id) returns reupdated item") - - self.assertRaises(KeyError, model.update_item, - self.UNADDED_ID, 'blah', {}, context) - - def test_delete_item(self): - """Test API DataModel delete_item functionality.""" - model = webservice.SimpleDataModel("test") - - for context in self.CONTEXTS: - item_ids = [] - items = ["%s/foo%d" % (context, i) for i in [0, 1, 2]] - for i in range(len(items)): - id_, item = model.add_item(items[i], {}, context=context) - item_ids.append(id_) - - for i in range(len(items)): - ret = model.delete_item(item_ids[i], {}, context=context) - self.assertEqual(ret, items[i], - "delete_item returned deleted item") - self.assertRaises(KeyError, model.delete_item, item_ids[i], - {}, context) - self.assertEqual( - len(model.get_items({}, context=context)['results']), 0, - "all items deleted") - - self.assertRaises(KeyError, model.delete_item, self.UNADDED_ID, - {}, context) - - -class TestAbstractHandler(base.TestCase): - def test_parse_json_body(self): - abstract_handler = webservice.AbstractApiHandler(r'/') - request = mock.MagicMock() - data = {"some": ["simple", "json"]} - serialized_data = json.dumps({"some": ["simple", "json"]}) - invalid_json = 'this is not valid JSON' - - # correctly assume application/json when no content-type header - request = webob.Request.blank('/') - self.assertEqual(request.content_type, '') - request.body = serialized_data.encode('utf-8') - ret = abstract_handler._parse_json_body(request) - self.assertEqual(ret, data) - - # correctly validate valid content-type headers - for ct in ['application/json', - 'Application/jSoN', - 'application/json; charset=utf-8', - 'apPLICAtion/JSOn; charset=UtF-8', - 'apPLICAtion/JSOn; CHARset=utf-8; IGnored=c', - 'application/json; ignored_param=a; ignored2=b']: - request = webob.Request.blank('/', content_type=ct) - request.body = serialized_data.encode('utf-8') - try: - ret = abstract_handler._parse_json_body(request) - except Exception: - self.fail("accepts content type '%s'" % ct) - self.assertEqual(ret, data, "Accepts content type '%s'" % ct) - - # correctly fail on invalid content-type headers - request = webob.Request.blank('/', content_type='text/json') - request.body = serialized_data.encode('utf-8') - self.assertRaises(webservice.DataModelException, - abstract_handler._parse_json_body, request) - - # enforce unspecified or utf-8 charset - # valid charset checked above, just need to check invalid - request = webob.Request.blank( - '/', content_type='application/json; charset=utf-16') - request.body = serialized_data.encode('utf-8') - self.assertRaises(webservice.DataModelException, - abstract_handler._parse_json_body, request) - - # raise DataModelException on non-JSON body - request = webob.Request.blank( - '/', content_type='application/json; charset=utf-8') - request.body = invalid_json.encode('utf-8') - self.assertRaises(webservice.DataModelException, - abstract_handler._parse_json_body, request) - - -class TestElementHandler(base.TestCase): - def test_read(self): - # TODO(pballand): write tests - pass - - def test_action(self): - element_handler = webservice.ElementHandler(r'/', '') - element_handler.model = webservice.SimpleDataModel("test") - request = mock.MagicMock() - request.path = "/" - - response = element_handler.action(request) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertEqual(json.loads( - response.body.decode('utf-8'))['error']['message'], - "Missing required action parameter.") - - request.params = mock.MagicMock() - request.params.getall.return_value = ['do_test'] - request.params["action"] = "do_test" - request.path = "/" - response = element_handler.action(request) - self.assertEqual(501, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertEqual(json.loads( - response.body.decode('utf-8'))['error']['message'], - "Method not supported") - - # test action impl returning python primitives - simple_data = [1, 2] - element_handler.model.do_test_action = lambda *a, **kwa: simple_data - response = element_handler.action(request) - self.assertEqual(200, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertEqual(json.loads(response.body.decode('utf-8')), - simple_data) - - # test action impl returning custom webob response - custom_data = webob.Response(body="test".encode('utf-8'), status=599, - content_type="custom/test") - element_handler.model.do_test_action = lambda *a, **kwa: custom_data - response = element_handler.action(request) - self.assertEqual(599, response.status_code) - self.assertEqual('custom/test', response.content_type) - self.assertEqual(response.body.decode('utf-8'), "test") - - def test_replace(self): - # TODO(pballand): write tests - pass - - def test_update(self): - # TODO(pballand): write tests - pass - - def test_delete(self): - # TODO(pballand): write tests - pass - - -class TestCollectionHandler(base.TestCase): - def test_get_action_type(self): - collection_handler = webservice.CollectionHandler(r'/', '') - self.assertEqual('get', - collection_handler._get_action_type("GET")) - self.assertEqual('create', - collection_handler._get_action_type("POST")) - self.assertEqual('delete', - collection_handler._get_action_type("DELETE")) - self.assertEqual('update', - collection_handler._get_action_type("PATCH")) - self.assertEqual('update', - collection_handler._get_action_type("PUT")) - self.assertRaises(TypeError, collection_handler._get_action_type, - 'Wah!') - - def test_create_member(self): - collection_handler = webservice.CollectionHandler(r'/', '') - collection_handler.model = webservice.SimpleDataModel("test") - request = webob.Request.blank('/') - request.content_type = 'application/json' - request.body = '{"key": "value"}'.encode('utf-8') - response = collection_handler.create_member(request, id_='123') - self.assertEqual('application/json', response.content_type) - self.assertEqual( - str(int(httplib.CREATED)) + " Created", response.status) - self.assertEqual("%s/%s" % (request.path, '123'), response.location) - actual_response = json.loads(response.body.decode('utf-8')) - actual_id = actual_response.get("id") - actual_value = actual_response.get("key") - self.assertEqual('123', actual_id) - self.assertEqual('value', actual_value) - - def test_list_members(self): - collection_handler = webservice.CollectionHandler(r'/', '') - collection_handler.model = webservice.SimpleDataModel("test") - request = mock.MagicMock() - request.body = '{"key": "value"}' - request.params = mock.MagicMock() - request.path = "/" - response = collection_handler.list_members(request) - items = collection_handler.model.get_items( - request.params, - context=collection_handler._get_context(request)) - - expected_body = ("%s\n" % json.dumps(items, indent=2)).encode('utf-8') - self.assertEqual('application/json', response.content_type) - - self.assertEqual(expected_body, response.body) - self.assertEqual('application/json', response.content_type) - self.assertEqual(str(int(httplib.OK)) + " OK", response.status) - - def test_update_members(self): - collection_handler = webservice.CollectionHandler(r'/', '') - collection_handler.model = webservice.SimpleDataModel('test') - request = webob.Request.blank('/') - request.content_type = 'application/json' - request.body = '{"key1": "value1", "key2": "value2"}'.encode('utf-8') - response = collection_handler.update_members(request) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(str(int(httplib.OK)) + " OK", response.status) - expected_items = { - "key1": "value1", - "key2": "value2", - } - self.assertEqual(expected_items, collection_handler.model.items) diff --git a/congress/tests/base.py b/congress/tests/base.py deleted file mode 100644 index e3fa6651..00000000 --- a/congress/tests/base.py +++ /dev/null @@ -1,128 +0,0 @@ - -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -import contextlib -import os - -import fixtures -import mock -from mox3 import mox -from oslo_config import cfg -import six -import testtools - -from congress.common import config -from congress.db import api as db_api -# Import all data models -from congress.db.migration.models import head # noqa -from congress.db import model_base -from congress.tests import helper -from congress.tests import policy_fixture - -_TRUE_VALUES = ('true', '1', 'yes') - - -if six.PY3: - @contextlib.contextmanager - def nested(*contexts): - with contextlib.ExitStack() as stack: - yield [stack.enter_context(c) for c in contexts] -else: - nested = contextlib.nested - - -class TestCase(testtools.TestCase): - - """Test case base class for all unit tests.""" - - def setUp(self): - """Run before each test method to initialize test environment.""" - - super(TestCase, self).setUp() - - self.mox = mox.Mox() - self.setup_config() - self.addCleanup(cfg.CONF.reset) - config.setup_logging() - - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - self.addCleanup(mock.patch.stopall) - - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - self.log_fixture = self.useFixture(fixtures.FakeLogger()) - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - - def setup_config(self): - """Tests that need a non-default config can override this method.""" - config.init([], default_config_files=[]) - - def tearDown(self): - super(TestCase, self).tearDown() - self.mox.UnsetStubs() - self.mox = None - - -class SqlTestCase(TestCase): - - # flag to indicate that the models have been loaded - _TABLES_ESTABLISHED = False - - def setUp(self): - super(SqlTestCase, self).setUp() - # Register all data models - engine = db_api.get_engine() - if not SqlTestCase._TABLES_ESTABLISHED: - model_base.BASE.metadata.create_all(engine) - SqlTestCase._TABLES_ESTABLISHED = True - - def clear_tables(): - with engine.begin() as conn: - for table in reversed( - model_base.BASE.metadata.sorted_tables): - conn.execute(table.delete()) - - self.addCleanup(clear_tables) - - def setup_config(self): - """Tests that need a non-default config can override this method.""" - args = ['--config-file', helper.etcdir('congress.conf.test')] - config.init(args) - - -class Benchmark(SqlTestCase): - def setUp(self): - if os.getenv("TEST_BENCHMARK") != "true": - self.skipTest("Skipping slow benchmark tests") - super(Benchmark, self).setUp() diff --git a/congress/tests/common/__init__.py b/congress/tests/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/common/test_policy.py b/congress/tests/common/test_policy.py deleted file mode 100644 index 02797c1f..00000000 --- a/congress/tests/common/test_policy.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2011 Piston Cloud Computing, Inc. -# All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test of Policy Engine For Congress.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import os.path - -import mock -from oslo_config import cfg -from oslo_policy import policy as oslo_policy - -from congress.common import config -from congress.common import policy -from congress import context -from congress import exception -from congress.tests import base -from congress.tests import policy_fixture -from congress import utils - - -CONF = cfg.CONF - - -class PolicyFileTestCase(base.TestCase): - def setUp(self): - super(PolicyFileTestCase, self).setUp() - config.setup_logging() - self.context = context.RequestContext('fake', 'fake') - self.target = {} - - def test_modified_policy_reloads(self): - with utils.tempdir() as tmpdir: - tmpfilename = os.path.join(tmpdir, 'policy') - - CONF.set_override('policy_file', tmpfilename, 'oslo_policy') - - # NOTE(uni): context construction invokes policy check to determin - # is_admin or not. As a side-effect, policy reset is needed here - # to flush existing policy cache. - policy.reset() - - action = "example:test" - with open(tmpfilename, "w") as policyfile: - policyfile.write('{"example:test": ""}') - policy.enforce(self.context, action, self.target) - with open(tmpfilename, "w") as policyfile: - policyfile.write('{"example:test": "!"}') - policy._ENFORCER.load_rules(True) - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, self.target) - - -class PolicyTestCase(base.TestCase): - def setUp(self): - super(PolicyTestCase, self).setUp() - rules = oslo_policy.Rules.from_dict({ - "true": '@', - "example:allowed": '@', - "example:denied": "!", - "example:get_http": "http://www.example.com", - "example:my_file": "role:compute_admin or " - "project_id:%(project_id)s", - "example:early_and_fail": "! and @", - "example:early_or_success": "@ or !", - "example:lowercase_admin": "role:admin or role:sysadmin", - "example:uppercase_admin": "role:ADMIN or role:sysadmin", - }) - policy.reset() - policy.init() - policy.set_rules(rules) - self.context = context.RequestContext('fake', 'fake', roles=['member']) - self.target = {} - - def test_enforce_nonexistent_action_throws(self): - action = "example:noexist" - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, self.target) - - def test_enforce_bad_action_throws(self): - action = "example:denied" - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, self.target) - - def test_enforce_bad_action_noraise(self): - action = "example:denied" - result = policy.enforce(self.context, action, self.target, False) - self.assertFalse(result) - - def test_enforce_good_action(self): - action = "example:allowed" - result = policy.enforce(self.context, action, self.target) - self.assertTrue(result) - - @mock.patch.object(oslo_policy._checks.HttpCheck, '__call__', - return_value=True) - def test_enforce_http_true(self, mock_httpcheck): - action = "example:get_http" - target = {} - result = policy.enforce(self.context, action, target) - self.assertTrue(result) - - @mock.patch.object(oslo_policy._checks.HttpCheck, '__call__', - return_value=False) - def test_enforce_http_false(self, mock_httpcheck): - action = "example:get_http" - target = {} - self.assertRaises(exception.PolicyNotAuthorized, - policy.enforce, self.context, - action, target) - - def test_templatized_enforcement(self): - target_mine = {'project_id': 'fake'} - target_not_mine = {'project_id': 'another'} - action = "example:my_file" - policy.enforce(self.context, action, target_mine) - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, target_not_mine) - - def test_early_AND_enforcement(self): - action = "example:early_and_fail" - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, self.target) - - def test_early_OR_enforcement(self): - action = "example:early_or_success" - policy.enforce(self.context, action, self.target) - - def test_ignore_case_role_check(self): - lowercase_action = "example:lowercase_admin" - uppercase_action = "example:uppercase_admin" - # NOTE(dprince) we mix case in the Admin role here to ensure - # case is ignored - admin_context = context.RequestContext('admin', - 'fake', - roles=['AdMiN']) - policy.enforce(admin_context, lowercase_action, self.target) - policy.enforce(admin_context, uppercase_action, self.target) - - -class DefaultPolicyTestCase(base.TestCase): - - def setUp(self): - super(DefaultPolicyTestCase, self).setUp() - - self.rules = oslo_policy.Rules.from_dict({ - "default": '', - "example:exist": "!", - }) - - self._set_rules('default') - - self.context = context.RequestContext('fake', 'fake') - - def _set_rules(self, default_rule): - policy.reset() - policy.init(rules=self.rules, default_rule=default_rule, - use_conf=False) - - def test_policy_called(self): - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, "example:exist", {}) - - def test_not_found_policy_calls_default(self): - policy.enforce(self.context, "example:noexist", {}) - - def test_default_not_found(self): - self._set_rules("default_noexist") - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, "example:noexist", {}) - - -class IsAdminCheckTestCase(base.TestCase): - def setUp(self): - super(IsAdminCheckTestCase, self).setUp() - policy.init() - - def test_init_true(self): - check = policy.IsAdminCheck('is_admin', 'True') - - self.assertEqual(check.kind, 'is_admin') - self.assertEqual(check.match, 'True') - self.assertTrue(check.expected) - - def test_init_false(self): - check = policy.IsAdminCheck('is_admin', 'nottrue') - - self.assertEqual(check.kind, 'is_admin') - self.assertEqual(check.match, 'False') - self.assertFalse(check.expected) - - def test_call_true(self): - check = policy.IsAdminCheck('is_admin', 'True') - - self.assertTrue(check('target', dict(is_admin=True), policy._ENFORCER)) - self.assertFalse(check('target', dict(is_admin=False), - policy._ENFORCER)) - - def test_call_false(self): - check = policy.IsAdminCheck('is_admin', 'False') - - self.assertFalse(check('target', dict(is_admin=True), - policy._ENFORCER)) - self.assertTrue(check('target', dict(is_admin=False), - policy._ENFORCER)) - - -class AdminRolePolicyTestCase(base.TestCase): - def setUp(self): - super(AdminRolePolicyTestCase, self).setUp() - config.setup_logging() - self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture()) - self.context = context.RequestContext('fake', 'fake', roles=['member']) - self.actions = policy.get_rules().keys() - self.target = {} - - def test_enforce_admin_actions_with_nonadmin_context_throws(self): - """test for non-admin context - - Check if non-admin context passed to admin actions throws - Policy not authorized exception - """ - for action in self.actions: - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, self.target) diff --git a/congress/tests/datalog/__init__.py b/congress/tests/datalog/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/datalog/test_builtin.py b/congress/tests/datalog/test_builtin.py deleted file mode 100644 index c7b3b948..00000000 --- a/congress/tests/datalog/test_builtin.py +++ /dev/null @@ -1,1562 +0,0 @@ -#! /usr/bin/python -# -# Copyright (c) 2014 IBM, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from oslo_log import log as logging - -from congress.datalog import base as datalog_base -from congress.datalog import builtin -from congress.datalog import compile -from congress import exception -from congress.policy_engines import agnostic -from congress.tests import base -from congress.tests import helper - -LOG = logging.getLogger(__name__) - -addmap = { - 'comparison': [ - {'func': 'f(x,y)', 'num_inputs': 2, - 'code': lambda x, y: x if x > y else y}], - 'newcategory': [ - {'func': 'g(x,y)', 'num_inputs': 2, 'code': lambda x, y: x + y}]} - - -append_builtin = {'arithmetic': [{'func': 'div(x,y)', - 'num_inputs': 2, - 'code': 'lambda x,y: x / y'}]} - - -class TestBuiltins(base.TestCase): - def setUp(self): - super(TestBuiltins, self).setUp() - self.cbcmap = builtin.CongressBuiltinCategoryMap( - builtin._builtin_map) - self.predl = self.cbcmap.builtin('lt') - - def test_add_and_delete_map(self): - cbcmap_before = self.cbcmap - self.cbcmap.add_map(append_builtin) - self.cbcmap.delete_map(append_builtin) - self.assertTrue(self.cbcmap.mapequal(cbcmap_before)) - - def test_add_map_only(self): - self.cbcmap.add_map(append_builtin) - predl = self.cbcmap.builtin('div') - self.assertIsNotNone(predl) - self.cbcmap.add_map(addmap) - predl = self.cbcmap.builtin('max') - self.assertIsNotNone(predl) - - def test_add_and_delete_builtin(self): - cbcmap_before = self.cbcmap - self.cbcmap.add_map(append_builtin) - self.cbcmap.delete_builtin('arithmetic', 'div', 2) - self.assertTrue(self.cbcmap.mapequal(cbcmap_before)) - - def test_string_pred_string(self): - predstring = str(self.predl) - self.assertNotEqual(predstring, 'ltc(x,y') - - def test_add_and_delete_to_category(self): - cbcmap_before = self.cbcmap - arglist = ['x', 'y', 'z'] - pred = builtin.CongressBuiltinPred('testfunc', arglist, 1, - lambda x: not x) - self.cbcmap.insert_to_category('arithmetic', pred) - self.cbcmap.delete_from_category('arithmetic', pred) - self.assertTrue(self.cbcmap.mapequal(cbcmap_before)) - - def test_all_checks(self): - predtotest = self.cbcmap.builtin('lt') - self.assertTrue(self.cbcmap.builtin_is_registered(predtotest)) - - def test_eval_builtin(self): - predl = self.cbcmap.builtin('plus') - result = predl.code(1, 2) - self.assertEqual(result, 3) - predl = self.cbcmap.builtin('gt') - result = predl.code(1, 2) - self.assertFalse(result) - - -# NOTE(thinrichs): this test will be removed once we remove bare builtins -class TestReorder(base.TestCase): - def check(self, input_string, correct_string, msg): - rule = compile.parse1(input_string) - actual = compile.reorder_for_safety(rule) - correct = compile.parse1(correct_string) - if correct != actual: - emsg = "Correct: " + str(correct) - emsg += "; Actual: " + str(actual) - self.fail(msg + " :: " + emsg) - - def check_err(self, input_string, unsafe_lit_strings, msg): - rule = compile.parse1(input_string) - try: - compile.reorder_for_safety(rule) - self.fail("Failed to raise exception for " + input_string) - except exception.PolicyException as e: - errmsg = str(e) - # parse then print to string so string rep same in err msg - unsafe_lits = [str(compile.parse1(x)) for x in unsafe_lit_strings] - missing_lits = [m for m in unsafe_lits - if m + " (vars" not in errmsg] - if len(missing_lits) > 0: - self.fail( - "Unsafe literals {} not reported in error: {}".format( - ";".join(missing_lits), errmsg)) - - def test_reorder_builtins(self): - self.check("p(x, z) :- q(x, y), plus(x, y, z)", - "p(x, z) :- q(x, y), plus(x, y, z)", - "No reorder") - - self.check("p(x, z) :- plus(x, y, z), q(x, y)", - "p(x, z) :- q(x, y), plus(x, y, z)", - "Basic reorder") - - self.check("p(x, z) :- q(x, y), r(w), plus(x, y, z), plus(z, w, y)", - "p(x, z) :- q(x, y), r(w), plus(x, y, z), plus(z, w, y)", - "Chaining: no reorder") - - self.check("p(x, z) :- q(x, y), plus(x, y, z), plus(z, w, y), r(w)", - "p(x, z) :- q(x, y), plus(x, y, z), r(w), plus(z, w, y)", - "Chaining: reorder") - - self.check("p(x) :- lt(t, v), plus(z, w, t), plus(z, u, v), " - " plus(x, y, z), q(y), r(x), s(u), t(w) ", - "p(x) :- q(y), r(x), plus(x, y, z), s(u), plus(z, u, v), " - " t(w), plus(z, w, t), lt(t, v)", - "Partial-order chaining") - - def test_unsafe_builtins(self): - # an output - self.check_err("p(x) :- q(x), plus(x, y, z)", - ["plus(x,y,z)"], - "Basic Unsafe input") - - self.check_err("p(x) :- q(x), r(z), plus(x, y, z)", - ["plus(x,y,z)"], - "Basic Unsafe input 2") - - self.check_err("p(x, z) :- plus(x, y, z), plus(z, y, x), " - " plus(x, z, y)", - ["plus(x, y, z)", "plus(z, y, x)", "plus(x, z, y)"], - "Unsafe with cycle") - - # no outputs - self.check_err("p(x) :- q(x), lt(x, y)", - ["lt(x,y)"], - "Basic Unsafe input, no outputs") - - self.check_err("p(x) :- q(y), lt(x, y)", - ["lt(x,y)"], - "Basic Unsafe input, no outputs 2") - - self.check_err("p(x, z) :- lt(x, y), lt(y, x)", - ["lt(x,y)", "lt(y, x)"], - "Unsafe with cycle, no outputs") - - # chaining - self.check_err("p(x) :- q(x, y), plus(x, y, z), plus(z, 3, w), " - " plus(w, t, u)", - ["plus(w, t, u)"], - "Unsafe chaining") - - self.check_err("p(x) :- q(x, y), plus(x, y, z), plus(z, 3, w), " - " lt(w, t)", - ["lt(w, t)"], - "Unsafe chaining 2") - - def test_reorder_negation(self): - self.check("p(x) :- q(x), not u(x), r(y), not s(x, y)", - "p(x) :- q(x), not u(x), r(y), not s(x, y)", - "No reordering") - - self.check("p(x) :- not q(x), r(x)", - "p(x) :- r(x), not q(x)", - "Basic") - - self.check("p(x) :- r(x), not q(x, y), s(y)", - "p(x) :- r(x), s(y), not q(x,y)", - "Partially safe") - - self.check("p(x) :- not q(x, y), not r(x), not r(x, z), " - " t(x, y), u(x), s(z)", - "p(x) :- t(x,y), not q(x,y), not r(x), u(x), s(z), " - " not r(x, z)", - "Complex") - - def test_unsafe_negation(self): - self.check_err("p(x) :- not q(x)", - ["q(x)"], - "Basic") - - self.check_err("p(x) :- not q(x), not r(x)", - ["q(x)", "r(x)"], - "Cycle") - - self.check_err("p(x) :- not q(x, y), r(y)", - ["q(x, y)"], - "Partially safe") - - def test_reorder_builtins_negation(self): - self.check("p(x) :- not q(z), plus(x, y, z), s(x), s(y)", - "p(x) :- s(x), s(y), plus(x, y, z), not q(z)", - "Basic") - - self.check("p(x) :- not q(z, w), plus(x, y, z), lt(z, w), " - " plus(x, 3, w), s(x, y)", - "p(x) :- s(x,y), plus(x, y, z), plus(x, 3, w), " - " not q(z, w), lt(z, w)", - "Partial order") - - def test_unsafe_builtins_negation(self): - self.check_err("p(x) :- plus(x, y, z), not q(x, y)", - ['plus(x,y,z)', 'q(x,y)'], - 'Unsafe cycle') - - self.check_err("p(x) :- plus(x, y, z), plus(z, w, t), not q(z, t)," - " s(x), t(y)", - ['plus(z, w, t)', 'q(z, t)'], - 'Unsafety propagates') - - -NREC_THEORY = 'non-recursive theory test' -MAT_THEORY = 'materialized view theory test' - - -# NOTE(thinrichs): this test will be removed once we remove bare builtins -class TestTheories(base.TestCase): - def prep_runtime(self, code=None, msg=None, target=None): - # compile source - if msg is not None: - LOG.debug(msg) - if code is None: - code = "" - if target is None: - target = NREC_THEORY - run = agnostic.Runtime() - run.create_policy(NREC_THEORY, abbr="NRT", - kind=datalog_base.NONRECURSIVE_POLICY_TYPE) - run.create_policy(MAT_THEORY, abbr="MAT", - kind=datalog_base.MATERIALIZED_POLICY_TYPE) - run.debug_mode() - run.insert(code, target=target) - return run - - def check_equal(self, actual_string, correct_string, msg): - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def test_materialized_builtins(self): - self.test_builtins(MAT_THEORY) - - def test_builtins(self, th=NREC_THEORY): - """Test the mechanism that implements builtins.""" - run = self.prep_runtime() - run.insert('p(x) :- q(x,y), plus(x,y,z), r(z)' - 'q(1,2)' - 'q(2,3)' - 'r(3)' - 'r(5)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", "Plus") - run.delete('r(5)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(1)", "Plus") - - run = self.prep_runtime() - run.insert('p(x) :- q(x,y), minus(x,y,z), r(z)' - 'q(2,1)' - 'q(3,1)' - 'r(1)' - 'r(4)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(2)", "Minus") - run.delete('r(4)', target=th) - run.insert('r(2)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(2) p(3)", "Minus") - - run = self.prep_runtime() - run.insert('p(x, z) :- q(x,y), plus(x,y,z)' - 'q(1,2)' - 'q(2,3)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - "p(1, 3) p(2, 5)", "Plus") - - run = self.prep_runtime() - run.insert('m(x) :- j(x,y), lt(x,y)' - 'j(1,2)' - 'j(3,2)', target=th) - self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT") - - run = self.prep_runtime() - run.insert('m(x) :- j(x,y), lt(x,y), r(y)' - 'j(1,2)' - 'j(2,3)' - 'j(3,2)' - 'r(2)', target=th) - self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT 2") - - run = self.prep_runtime() - run.insert('p(x,z) :- q(x), plus(x,1,z)' - 'q(3)' - 'q(5)', target=th) - self.check_equal(run.select('p(x,z)', target=th), - 'p(3, 4) p(5,6)', "Bound input") - - run = self.prep_runtime() - run.insert('p(x) :- q(x), plus(x,1,5)' - 'q(4)' - 'q(5)', target=th) - self.check_equal(run.select('p(x)', target=th), - 'p(4)', "Bound output") - - run = self.prep_runtime() - run.insert('p(x, z) :- plus(x,y,z), q(x), r(y)' - 'q(4)' - 'r(5)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - 'p(4, 9)', - "Reordering") - - run = self.prep_runtime() - run.insert('p(x, z) :- plus(x,y,z), q(x), q(y)' - 'q(4)' - 'q(5)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - 'p(4, 9) p(4, 8) p(5, 9) p(5, 10)', - "Reordering with self joins") - - def test_materialized_builtins_content(self): - self.test_builtins_content(MAT_THEORY) - - def test_builtins_content(self, th=NREC_THEORY): - """Test the content of the builtins, not the mechanism.""" - def check_true(code, msg): - run = self.prep_runtime('') - run.insert(code, target=th) - self.check_equal( - run.select('p(x)', target=th), - 'p(1)', - msg) - - def check_false(code, msg): - th = NREC_THEORY - run = self.prep_runtime('') - run.insert(code, target=th) - self.check_equal( - run.select('p(x)', target=th), - '', - msg) - - # - # Numbers - # - - # int - code = 'p(1) :- int(2,2)' - check_true(code, "int") - - code = 'p(1) :- int(2.3, 2)' - check_true(code, "int") - - code = 'p(1) :- int(2, 3.3)' - check_false(code, "int") - - # float - code = 'p(1) :- float(2,2.0)' - check_true(code, "float") - - code = 'p(1) :- float(2.3,2.3)' - check_true(code, "float") - - code = 'p(1) :- float(2,3.3)' - check_false(code, "int") - - # plus - code = 'p(1) :- plus(2,3,5)' - check_true(code, "plus") - - code = 'p(1) :- plus(2,3,1)' - check_false(code, "plus") - - # minus - code = 'p(1) :- minus(5, 3, 2)' - check_true(code, "minus") - - code = 'p(1) :- minus(5, 3, 6)' - check_false(code, "minus") - - # minus negative: negative numbers should not be supported - # code = 'p(1) :- minus(3, 5, x)' - # check_false(code, "minus") - - # times - code = 'p(1) :- mul(3, 5, 15)' - check_true(code, "multiply") - - code = 'p(1) :- mul(2, 5, 1)' - check_false(code, "multiply") - - # divides - code = 'p(1) :- div(10, 2, 5)' - check_true(code, "divides") - - code = 'p(1) :- div(10, 4, 2)' - check_true(code, "integer divides") - - code = 'p(1) :- div(10, 4.0, 2.5)' - check_true(code, "float divides") - - code = 'p(1) :- div(10.0, 3, 3.3)' - check_false(code, "divides") - - # - # Comparison - # - - # less than - code = 'p(1) :- lt(1, 3)' - check_true(code, "lessthan") - - code = 'p(1) :- lt(5, 2)' - check_false(code, "lessthan") - - # less than equal - code = 'p(1) :- lteq(1, 3)' - check_true(code, "lessthaneq") - - code = 'p(1) :- lteq(3, 3)' - check_true(code, "lessthaneq") - - code = 'p(1) :- lteq(4, 3)' - check_false(code, "lessthaneq") - - # greater than - code = 'p(1) :- gt(9, 5)' - check_true(code, "greaterthan") - - code = 'p(1) :- gt(5, 9)' - check_false(code, "greaterthan") - - # greater than equal - code = 'p(1) :- gteq(10, 5)' - check_true(code, "greaterthaneq") - - code = 'p(1) :- gteq(10, 10)' - check_true(code, "greaterthaneq") - - code = 'p(1) :- gteq(5, 20)' - check_false(code, "greaterthaneq") - - # equal - code = 'p(1) :- equal(5, 5)' - check_true(code, "equal") - - code = 'p(1) :- equal(5, 7)' - check_false(code, "equal") - - # max - code = 'p(1) :- max(3, 4, 4)' - check_true(code, "max") - - code = 'p(1) :- max(3, 7, 3)' - check_false(code, "max") - - # - # Strings - # - - # len - code = 'p(1) :- len("abcde", 5)' - check_true(code, "Len") - - code = 'p(1) :- len("abcde", 7)' - check_false(code, "Len") - - # concat - code = 'p(1) :- concat("abc", "def", "abcdef")' - check_true(code, "concat") - - code = 'p(1) :- concat("abc", "def", "zxy")' - check_false(code, "concat") - - # - # Datetime - # We should make some of these more robust but can't do - # that with the safety restrictions in place at the time - # of writing. - # - - # lessthan - code = ('p(1) :- datetime_lt(' - '"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_lt") - - code = ('p(1) :- datetime_lt(' - '"2014-01-03 10:00:00", "Jan 2, 2014 10:00:00")') - check_false(code, "False datetime_lt") - - # lessthanequal - code = ('p(1) :- datetime_lteq(' - '"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_lteq") - - code = ('p(1) :- datetime_lteq(' - '"Jan 1, 2014 10:00:00", "2014-01-01 10:00:00")') - check_true(code, "True datetime_lteq") - - code = ('p(1) :- datetime_lteq(' - '"2014-01-02 10:00:00", "Jan 1, 2014 10:00:00")') - check_false(code, "False datetime_lteq") - - # greaterthan - code = ('p(1) :- datetime_gt(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_gt") - - code = ('p(1) :- datetime_gt(' - '"2014-01-03 10:00:00", "Feb 2, 2014 10:00:00")') - check_false(code, "False datetime_gt") - - # greaterthanequal - code = ('p(1) :- datetime_gteq(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_gteq") - - code = ('p(1) :- datetime_gteq(' - '"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")') - check_true(code, "True datetime_gteq") - - code = ('p(1) :- datetime_gteq(' - '"2014-01-02 10:00:00", "Mar 1, 2014 10:00:00")') - check_false(code, "False datetime_gteq") - - # equal - code = ('p(1) :- datetime_equal(' - '"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")') - check_true(code, "True datetime_equal") - - code = ('p(1) :- datetime_equal(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_false(code, "False datetime_equal") - - # plus - code = ('p(1) :- datetime_plus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 11:00:00")') - check_true(code, "True datetime_plus") - - code = ('p(1) :- datetime_plus(' - '"Jan 5, 2014 10:00:00", "1:00:00", "2014-01-05 11:00:00")') - check_true(code, "True datetime_plus") - - code = ('p(1) :- datetime_plus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 12:00:00")') - check_false(code, "False datetime_plus") - - # minus - code = ('p(1) :- datetime_minus(' - '"Jan 5, 2014 10:00:00", "25:00:00", "2014-01-04 09:00:00")') - check_true(code, "True datetime_minus") - - code = ('p(1) :- datetime_minus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 09:00:00")') - check_true(code, "True datetime_minus") - - code = ('p(1) :- datetime_minus(' - '"Jan 5, 2014 10:00:00", "9:00:00", "Jan 4, 2014 10:00:00")') - check_false(code, "False datetime_minus") - - # to_seconds - code = ('p(1) :- datetime_to_seconds(' - '"Jan 1, 1900 1:00:00", 3600)') - check_true(code, "True datetime_to_seconds") - - code = ('p(1) :- datetime_to_seconds(' - '"Jan 1, 1900 1:00:00", 3601)') - check_false(code, "False datetime_to_seconds") - - # extract_time - code = ('p(1) :- extract_time(' - '"Jan 1, 1900 1:00:00", "01:00:00")') - check_true(code, "True extract_time") - - code = ('p(1) :- extract_time(' - '"Jan 1, 1900 1:00:00", "02:00:00")') - check_false(code, "False extract_time") - - # extract_date - code = ('p(1) :- extract_date(' - '"Jan 1, 1900 1:00:00", "1900-01-01")') - check_true(code, "True extract_date") - - code = ('p(1) :- extract_date(' - '"Jan 1, 1900 1:00:00", "2000-01-01")') - check_false(code, "False extract_date") - - # pack_datetime - code = ('p(1) :- pack_datetime(2000, 1, 1, 10, 5, 6, ' - '"2000-1-1 10:5:6")') - check_true(code, "True pack_datetime") - - code = ('p(1) :- pack_datetime(2000, 1, 1, 10, 5, 6, ' - '"2000-1-1 10:5:20")') - check_false(code, "False pack_datetime") - - # pack_date - code = ('p(1) :- pack_date(2000, 1, 1, ' - '"2000-1-1")') - check_true(code, "True pack_date") - - code = ('p(1) :- pack_date(2000, 1, 1, ' - '"2000-1-2")') - check_false(code, "False pack_date") - - # pack_time - code = ('p(1) :- pack_time(5, 6, 7, ' - '"5:6:7")') - check_true(code, "True pack_time") - - code = ('p(1) :- pack_time(5, 6, 7, ' - '"10:6:7")') - check_false(code, "False pack_time") - - # unpack_datetime - code = ('p(1) :- unpack_datetime("2000-1-1 10:5:6", ' - '2000, 1, 1, 10, 5, 6)') - check_true(code, "True unpack_datetime") - - code = ('p(1) :- unpack_datetime("2000-1-1 10:5:6", ' - '2000, 1, 1, 12, 5, 6)') - check_false(code, "False unpack_datetime") - - # unpack_date - code = ('p(1) :- unpack_date("2000-1-1 10:5:6", ' - '2000, 1, 1)') - check_true(code, "True unpack_date") - - code = ('p(1) :- unpack_date("2000-1-1 10:5:6", ' - '2000, 1, 5)') - check_false(code, "False unpack_date") - - # unpack_time - code = ('p(1) :- unpack_time("2000-1-1 10:5:6", ' - '10, 5, 6)') - check_true(code, "True unpack_time") - - code = ('p(1) :- unpack_time("2000-1-1 10:5:6", ' - '12, 5, 6)') - check_false(code, "False unpack_time") - - # unpack_time - code = 'p(1) :- now(x)' - check_true(code, "True unpack_time") - - # - # Network Address IPv4 - # - - # ip equal - code = ('p(1) :- ips_equal("192.0.2.1", "192.0.2.1")') - check_true(code, "True ip_equal") - - code = ('p(1) :- ips_equal("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_equal") - - # ip less than - code = ('p(1) :- ips_lt("192.0.2.1", "192.0.2.2")') - check_true(code, "True ip_lt") - - code = ('p(1) :- ips_lt("192.0.2.1", "192.0.2.1")') - check_false(code, "False ip_lt") - - code = ('p(1) :- ips_lt("192.0.2.2", "192.0.2.1")') - check_false(code, "False ip_lt") - - # ip less than equal - code = ('p(1) :- ips_lteq("192.0.2.1", "192.0.2.1")') - check_true(code, "True ip_lteq") - - code = ('p(1) :- ips_lteq("192.0.2.1", "192.0.2.2")') - check_true(code, "True ip_lteq") - - code = ('p(1) :- ips_lteq("192.0.2.2", "192.0.2.1")') - check_false(code, "False ip_lteq") - - # ip greater than - code = ('p(1) :- ips_gt("192.0.2.2", "192.0.2.1")') - check_true(code, "True ip_gt") - - code = ('p(1) :- ips_gt("192.0.2.1", "192.0.2.1")') - check_false(code, "False ip_gt") - - code = ('p(1) :- ips_gt("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_gt") - - # ip greater than equal - code = ('p(1) :- ips_gteq("192.0.2.2", "192.0.2.1")') - check_true(code, "True ip_gteq") - - code = ('p(1) :- ips_gteq("192.0.2.2", "192.0.2.2")') - check_true(code, "True ip_gteq") - - code = ('p(1) :- ips_gteq("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_gteq") - - # networks equal - code = ('p(1) :- networks_equal("192.0.2.0/24", "192.0.2.112/24")') - check_true(code, "True networks_equal") - - code = ('p(1) :- networks_equal("192.0.2.0/24", "192.0.3.0/24")') - check_false(code, "False networks_equal") - - # networks overlap - code = ('p(1) :- networks_overlap("192.0.2.0/23", "192.0.2.0/24")') - check_true(code, "True networks_overlap") - - code = ('p(1) :- networks_overlap("192.0.2.0/24", "192.0.3.0/24")') - check_false(code, "False networks_overlap") - - # ip in network - code = ('p(1) :- ip_in_network("192.168.0.1", "192.168.0.0/24")') - check_true(code, "True ip_in_network") - - code = ('p(1) :- ip_in_network("192.168.10.1", "192.168.0.0/24")') - check_false(code, "False ip_in_network") - - # - # Network Address IPv6 - # - - # ip equal - code = ('p(1) :- ips_equal("::ffff:192.0.2.1", "::ffff:192.0.2.1")') - check_true(code, "True ip_equal v6") - - code = ('p(1) :- ips_equal("::ffff:192.0.2.1", "::ffff:192.0.2.2")') - check_false(code, "False ip_equal v6") - - # ip less than - code = ('p(1) :- ips_lt("::ffff:192.0.2.1", "::ffff:192.0.2.2")') - check_true(code, "True ip_lt v6") - - code = ('p(1) :- ips_lt("::ffff:192.0.2.1", "::ffff:192.0.2.1")') - check_false(code, "False ip_lt v6") - - code = ('p(1) :- ips_lt("::ffff:192.0.2.2", "::ffff:192.0.2.1")') - check_false(code, "False ip_lt v6") - - # ip less than equal - code = ('p(1) :- ips_lteq("::ffff:192.0.2.1", "::ffff:192.0.2.1")') - check_true(code, "True ip_lteq v6") - - code = ('p(1) :- ips_lteq("::ffff:192.0.2.1", "::ffff:192.0.2.2")') - check_true(code, "True ip_lteq v6") - - code = ('p(1) :- ips_lteq("::ffff:192.0.2.2", "::ffff:192.0.2.1")') - check_false(code, "False ip_lteq v6") - - # ip greater than - code = ('p(1) :- ips_gt("::ffff:192.0.2.2", "::ffff:192.0.2.1")') - check_true(code, "True ip_gt v6") - - code = ('p(1) :- ips_gt("::ffff:192.0.2.1", "::ffff:192.0.2.1")') - check_false(code, "False ip_gt v6") - - code = ('p(1) :- ips_gt("::ffff:192.0.2.1", "::ffff:192.0.2.2")') - check_false(code, "False ip_gt v6") - - # ip greater than equal - code = ('p(1) :- ips_gteq("::ffff:192.0.2.2", "::ffff:192.0.2.1")') - check_true(code, "True ip_gteq v6") - - code = ('p(1) :- ips_gteq("::ffff:192.0.2.2", "::ffff:192.0.2.2")') - check_true(code, "True ip_gteq v6") - - code = ('p(1) :- ips_gteq("::ffff:192.0.2.1", "::ffff:192.0.2.2")') - check_false(code, "False ip_gteq v6") - - # networks equal - code = ('p(1) :- networks_equal("fe80::ffff:192.0.2.0/24",' - ' "fe80::ffff:192.0.2.112/24")') - check_true(code, "True networks_equal v6") - - code = ('p(1) :- networks_equal("fe80::ffff:192.0.2.0/24",' - ' "ae80::ffff:192.0.2.0/24")') - check_false(code, "False networks_equal v6") - - # networks overlap - code = ('p(1) :- networks_overlap("fe80::ffff:192.0.2.0/23",' - ' "fe80::ffff:192.0.2.0/24")') - check_true(code, "True networks_overlap v6") - - code = ('p(1) :- networks_overlap("fe80::ffff:192.0.2.0/24",' - ' "ae80::ffff:192.0.3.0/24")') - check_false(code, "False networks_overlap v6") - - # ip in network - code = ('p(1) :- ip_in_network("fe80::ffff:192.168.0.1",' - ' "fe80::ffff:192.168.0.0/24")') - check_true(code, "True ip_in_network v6") - - code = ('p(1) :- ip_in_network("fe80::ffff:192.168.10.1",' - ' "ae80::ffff:192.168.10.1/24")') - check_false(code, "False ip_in_network v6") - - -class TestNamedspacedReorder(base.TestCase): - def check(self, input_string, correct_string, msg): - rule = compile.parse1(input_string) - actual = compile.reorder_for_safety(rule) - correct = compile.parse1(correct_string) - if correct != actual: - emsg = "Correct: " + str(correct) - emsg += "; Actual: " + str(actual) - self.fail(msg + " :: " + emsg) - - def check_err(self, input_string, unsafe_lit_strings, msg): - rule = compile.parse1(input_string) - try: - compile.reorder_for_safety(rule) - self.fail("Failed to raise exception for " + input_string) - except exception.PolicyException as e: - errmsg = str(e) - # parse then print to string so string rep same in err msg - unsafe_lits = [str(compile.parse1(x)) for x in unsafe_lit_strings] - missing_lits = [m for m in unsafe_lits - if m + " (vars" not in errmsg] - if len(missing_lits) > 0: - self.fail( - "Unsafe literals {} not reported in error: {}".format( - ";".join(missing_lits), errmsg)) - - def test_reorder_builtins(self): - self.check("p(x, z) :- q(x, y), builtin:plus(x, y, z)", - "p(x, z) :- q(x, y), builtin:plus(x, y, z)", - "No reorder") - - self.check("p(x, z) :- builtin:plus(x, y, z), q(x, y)", - "p(x, z) :- q(x, y), builtin:plus(x, y, z)", - "Basic reorder") - - self.check("p(x, z) :- q(x, y), r(w), builtin:plus(x, y, z), " - " builtin:plus(z, w, y)", - "p(x, z) :- q(x, y), r(w), builtin:plus(x, y, z), " - " builtin:plus(z, w, y)", - "Chaining: no reorder") - - self.check("p(x, z) :- q(x, y), builtin:plus(x, y, z), " - " builtin:plus(z, w, y), r(w)", - "p(x, z) :- q(x, y), builtin:plus(x, y, z), r(w), " - " builtin:plus(z, w, y)", - "Chaining: reorder") - - self.check("p(x) :- builtin:lt(t, v), builtin:plus(z, w, t), " - " builtin:plus(z, u, v), " - " builtin:plus(x, y, z), q(y), r(x), s(u), t(w) ", - "p(x) :- q(y), r(x), builtin:plus(x, y, z), s(u), " - " builtin:plus(z, u, v), " - " t(w), builtin:plus(z, w, t), builtin:lt(t, v)", - "Partial-order chaining") - - def test_unsafe_builtins(self): - # an output - self.check_err("p(x) :- q(x), builtin:plus(x, y, z)", - ["builtin:plus(x,y,z)"], - "Basic Unsafe input") - - self.check_err("p(x) :- q(x), r(z), builtin:plus(x, y, z)", - ["builtin:plus(x,y,z)"], - "Basic Unsafe input 2") - - self.check_err("p(x, z) :- builtin:plus(x, y, z), " - " builtin:plus(z, y, x), builtin:plus(x, z, y)", - ["builtin:plus(x, y, z)", "builtin:plus(z, y, x)", - "builtin:plus(x, z, y)"], - "Unsafe with cycle") - - # no outputs - self.check_err("p(x) :- q(x), builtin:lt(x, y)", - ["builtin:lt(x,y)"], - "Basic Unsafe input, no outputs") - - self.check_err("p(x) :- q(y), builtin:lt(x, y)", - ["builtin:lt(x,y)"], - "Basic Unsafe input, no outputs 2") - - self.check_err("p(x, z) :- builtin:lt(x, y), builtin:lt(y, x)", - ["builtin:lt(x,y)", "builtin:lt(y, x)"], - "Unsafe with cycle, no outputs") - - # chaining - self.check_err("p(x) :- q(x, y), builtin:plus(x, y, z), " - " builtin:plus(z, 3, w), builtin:plus(w, t, u)", - ["builtin:plus(w, t, u)"], - "Unsafe chaining") - - self.check_err("p(x) :- q(x, y), builtin:plus(x, y, z), " - " builtin:plus(z, 3, w), builtin:lt(w, t)", - ["builtin:lt(w, t)"], - "Unsafe chaining 2") - - def test_reorder_negation(self): - self.check("p(x) :- q(x), not u(x), r(y), not s(x, y)", - "p(x) :- q(x), not u(x), r(y), not s(x, y)", - "No reordering") - - self.check("p(x) :- not q(x), r(x)", - "p(x) :- r(x), not q(x)", - "Basic") - - self.check("p(x) :- r(x), not q(x, y), s(y)", - "p(x) :- r(x), s(y), not q(x,y)", - "Partially safe") - - self.check("p(x) :- not q(x, y), not r(x), not r(x, z), " - " t(x, y), u(x), s(z)", - "p(x) :- t(x,y), not q(x,y), not r(x), u(x), s(z), " - " not r(x, z)", - "Complex") - - def test_unsafe_negation(self): - self.check_err("p(x) :- not q(x)", - ["q(x)"], - "Basic") - - self.check_err("p(x) :- not q(x), not r(x)", - ["q(x)", "r(x)"], - "Cycle") - - self.check_err("p(x) :- not q(x, y), r(y)", - ["q(x, y)"], - "Partially safe") - - def test_reorder_builtins_negation(self): - self.check("p(x) :- not q(z), builtin:plus(x, y, z), s(x), s(y)", - "p(x) :- s(x), s(y), builtin:plus(x, y, z), not q(z)", - "Basic") - - self.check("p(x) :- not q(z, w), builtin:plus(x, y, z), " - " builtin:lt(z, w), builtin:plus(x, 3, w), s(x, y)", - "p(x) :- s(x,y), builtin:plus(x, y, z), " - " builtin:plus(x, 3, w), not q(z, w), builtin:lt(z, w)", - "Partial order") - - def test_unsafe_builtins_negation(self): - self.check_err("p(x) :- builtin:plus(x, y, z), not q(x, y)", - ['builtin:plus(x,y,z)', 'q(x,y)'], - 'Unsafe cycle') - - self.check_err("p(x) :- builtin:plus(x, y, z), builtin:plus(z, w, t)," - " not q(z, t), s(x), t(y)", - ['builtin:plus(z, w, t)', 'q(z, t)'], - 'Unsafety propagates') - - -class TestNamespacedTheories(base.TestCase): - def prep_runtime(self, code=None, msg=None, target=None): - # compile source - if msg is not None: - LOG.debug(msg) - if code is None: - code = "" - if target is None: - target = NREC_THEORY - run = agnostic.Runtime() - run.create_policy(NREC_THEORY, abbr="NRT", - kind=datalog_base.NONRECURSIVE_POLICY_TYPE) - run.create_policy(MAT_THEORY, abbr="MAT", - kind=datalog_base.MATERIALIZED_POLICY_TYPE) - run.debug_mode() - run.insert(code, target=target) - return run - - def check_equal(self, actual_string, correct_string, msg): - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def test_materialized_builtins(self): - self.test_builtins(MAT_THEORY) - - def test_builtins(self, th=NREC_THEORY): - """Test the mechanism that implements builtins.""" - run = self.prep_runtime() - run.insert('p(x) :- q(x,y), builtin:plus(x,y,z), r(z)' - 'q(1,2)' - 'q(2,3)' - 'r(3)' - 'r(5)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", "Plus") - run.delete('r(5)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(1)", "Plus") - - run = self.prep_runtime() - run.insert('p(x) :- q(x,y), builtin:minus(x,y,z), r(z)' - 'q(2,1)' - 'q(3,1)' - 'r(1)' - 'r(4)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(2)", "Minus") - run.delete('r(4)', target=th) - run.insert('r(2)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(2) p(3)", "Minus") - - run = self.prep_runtime() - run.insert('p(x, z) :- q(x,y), builtin:plus(x,y,z)' - 'q(1,2)' - 'q(2,3)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - "p(1, 3) p(2, 5)", "Plus") - - run = self.prep_runtime() - run.insert('m(x) :- j(x,y), builtin:lt(x,y)' - 'j(1,2)' - 'j(3,2)', target=th) - self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT") - - run = self.prep_runtime() - run.insert('m(x) :- j(x,y), builtin:lt(x,y), r(y)' - 'j(1,2)' - 'j(2,3)' - 'j(3,2)' - 'r(2)', target=th) - self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT 2") - - run = self.prep_runtime() - run.insert('p(x,z) :- q(x), builtin:plus(x,1,z)' - 'q(3)' - 'q(5)', target=th) - self.check_equal(run.select('p(x,z)', target=th), - 'p(3, 4) p(5,6)', "Bound input") - - run = self.prep_runtime() - run.insert('p(x) :- q(x), builtin:plus(x,1,5)' - 'q(4)' - 'q(5)', target=th) - self.check_equal(run.select('p(x)', target=th), - 'p(4)', "Bound output") - - run = self.prep_runtime() - run.insert('p(x, z) :- builtin:plus(x,y,z), q(x), r(y)' - 'q(4)' - 'r(5)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - 'p(4, 9)', - "Reordering") - - run = self.prep_runtime() - run.insert('p(x, z) :- builtin:plus(x,y,z), q(x), q(y)' - 'q(4)' - 'q(5)', target=th) - self.check_equal(run.select('p(x, y)', target=th), - 'p(4, 9) p(4, 8) p(5, 9) p(5, 10)', - "Reordering with self joins") - - def test_materialized_builtins_content(self): - self.test_builtins_content(MAT_THEORY) - - def test_builtins_content(self, th=NREC_THEORY): - """Test the content of the builtins, not the mechanism.""" - def check_true(code, msg): - run = self.prep_runtime('') - run.insert(code, target=th) - self.check_equal( - run.select('p(x)', target=th), - 'p(1)', - msg) - - def check_false(code, msg): - th = NREC_THEORY - run = self.prep_runtime('') - run.insert(code, target=th) - self.check_equal( - run.select('p(x)', target=th), - '', - msg) - - # - # Numbers - # - - # int - code = 'p(1) :- builtin:int(2,2)' - check_true(code, "int") - - code = 'p(1) :- builtin:int(2.3, 2)' - check_true(code, "int") - - code = 'p(1) :- builtin:int(2, 3.3)' - check_false(code, "int") - - # float - code = 'p(1) :- builtin:float(2,2.0)' - check_true(code, "float") - - code = 'p(1) :- builtin:float(2.3,2.3)' - check_true(code, "float") - - code = 'p(1) :- builtin:float(2,3.3)' - check_false(code, "int") - - # plus - code = 'p(1) :- builtin:plus(2,3,5)' - check_true(code, "plus") - - code = 'p(1) :- builtin:plus(2,3,1)' - check_false(code, "plus") - - # minus - code = 'p(1) :- builtin:minus(5, 3, 2)' - check_true(code, "minus") - - code = 'p(1) :- builtin:minus(5, 3, 6)' - check_false(code, "minus") - - # minus negative: negative numbers should not be supported - # code = 'p(1) :- minus(3, 5, x)' - # check_false(code, "minus") - - # times - code = 'p(1) :- builtin:mul(3, 5, 15)' - check_true(code, "multiply") - - code = 'p(1) :- builtin:mul(2, 5, 1)' - check_false(code, "multiply") - - # divides - code = 'p(1) :- builtin:div(10, 2, 5)' - check_true(code, "divides") - - code = 'p(1) :- builtin:div(10, 4, 2)' - check_true(code, "integer divides") - - code = 'p(1) :- builtin:div(10, 4.0, 2.5)' - check_true(code, "float divides") - - code = 'p(1) :- builtin:div(10.0, 3, 3.3)' - check_false(code, "divides") - - # - # Comparison - # - - # less than - code = 'p(1) :- builtin:lt(1, 3)' - check_true(code, "lessthan") - - code = 'p(1) :- builtin:lt(5, 2)' - check_false(code, "lessthan") - - # less than equal - code = 'p(1) :- builtin:lteq(1, 3)' - check_true(code, "lessthaneq") - - code = 'p(1) :- builtin:lteq(3, 3)' - check_true(code, "lessthaneq") - - code = 'p(1) :- builtin:lteq(4, 3)' - check_false(code, "lessthaneq") - - # greater than - code = 'p(1) :- builtin:gt(9, 5)' - check_true(code, "greaterthan") - - code = 'p(1) :- builtin:gt(5, 9)' - check_false(code, "greaterthan") - - # greater than equal - code = 'p(1) :- builtin:gteq(10, 5)' - check_true(code, "greaterthaneq") - - code = 'p(1) :- builtin:gteq(10, 10)' - check_true(code, "greaterthaneq") - - code = 'p(1) :- builtin:gteq(5, 20)' - check_false(code, "greaterthaneq") - - # equal - code = 'p(1) :- builtin:equal(5, 5)' - check_true(code, "equal") - - code = 'p(1) :- builtin:equal(5, 7)' - check_false(code, "equal") - - # max - code = 'p(1) :- builtin:max(3, 4, 4)' - check_true(code, "max") - - code = 'p(1) :- builtin:max(3, 7, 3)' - check_false(code, "max") - - # - # Strings - # - - # len - code = 'p(1) :- builtin:len("abcde", 5)' - check_true(code, "Len") - - code = 'p(1) :- builtin:len("abcde", 7)' - check_false(code, "Len") - - # concat - code = 'p(1) :- builtin:concat("abc", "def", "abcdef")' - check_true(code, "concat") - - code = 'p(1) :- builtin:concat("abc", "def", "zxy")' - check_false(code, "concat") - - # - # Datetime - # We should make some of these more robust but can't do - # that with the safety restrictions in place at the time - # of writing. - # - - # lessthan - code = ('p(1) :- builtin:datetime_lt(' - '"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_lt") - - code = ('p(1) :- builtin:datetime_lt(' - '"2014-01-03 10:00:00", "Jan 2, 2014 10:00:00")') - check_false(code, "False datetime_lt") - - # lessthanequal - code = ('p(1) :- builtin:datetime_lteq(' - '"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_lteq") - - code = ('p(1) :- builtin:datetime_lteq(' - '"Jan 1, 2014 10:00:00", "2014-01-01 10:00:00")') - check_true(code, "True datetime_lteq") - - code = ('p(1) :- builtin:datetime_lteq(' - '"2014-01-02 10:00:00", "Jan 1, 2014 10:00:00")') - check_false(code, "False datetime_lteq") - - # greaterthan - code = ('p(1) :- builtin:datetime_gt(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_gt") - - code = ('p(1) :- builtin:datetime_gt(' - '"2014-01-03 10:00:00", "Feb 2, 2014 10:00:00")') - check_false(code, "False datetime_gt") - - # greaterthanequal - code = ('p(1) :- builtin:datetime_gteq(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_true(code, "True datetime_gteq") - - code = ('p(1) :- builtin:datetime_gteq(' - '"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")') - check_true(code, "True datetime_gteq") - - code = ('p(1) :- builtin:datetime_gteq(' - '"2014-01-02 10:00:00", "Mar 1, 2014 10:00:00")') - check_false(code, "False datetime_gteq") - - # equal - code = ('p(1) :- builtin:datetime_equal(' - '"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")') - check_true(code, "True datetime_equal") - - code = ('p(1) :- builtin:datetime_equal(' - '"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")') - check_false(code, "False datetime_equal") - - # plus - code = ('p(1) :- builtin:datetime_plus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 11:00:00")') - check_true(code, "True datetime_plus") - - code = ('p(1) :- builtin:datetime_plus(' - '"Jan 5, 2014 10:00:00", "1:00:00", "2014-01-05 11:00:00")') - check_true(code, "True datetime_plus") - - code = ('p(1) :- builtin:datetime_plus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 12:00:00")') - check_false(code, "False datetime_plus") - - # minus - code = ('p(1) :- builtin:datetime_minus(' - '"Jan 5, 2014 10:00:00", "25:00:00", "2014-01-04 09:00:00")') - check_true(code, "True datetime_minus") - - code = ('p(1) :- builtin:datetime_minus(' - '"Jan 5, 2014 10:00:00", 3600, "2014-01-05 09:00:00")') - check_true(code, "True datetime_minus") - - code = ('p(1) :- builtin:datetime_minus(' - '"Jan 5, 2014 10:00:00", "9:00:00", "Jan 4, 2014 10:00:00")') - check_false(code, "False datetime_minus") - - # to_seconds - code = ('p(1) :- builtin:datetime_to_seconds(' - '"Jan 1, 1900 1:00:00", 3600)') - check_true(code, "True datetime_to_seconds") - - code = ('p(1) :- builtin:datetime_to_seconds(' - '"Jan 1, 1900 1:00:00", 3601)') - check_false(code, "False datetime_to_seconds") - - # extract_time - code = ('p(1) :- builtin:extract_time(' - '"Jan 1, 1900 1:00:00", "01:00:00")') - check_true(code, "True extract_time") - - code = ('p(1) :- builtin:extract_time(' - '"Jan 1, 1900 1:00:00", "02:00:00")') - check_false(code, "False extract_time") - - # extract_date - code = ('p(1) :- builtin:extract_date(' - '"Jan 1, 1900 1:00:00", "1900-01-01")') - check_true(code, "True extract_date") - - code = ('p(1) :- builtin:extract_date(' - '"Jan 1, 1900 1:00:00", "2000-01-01")') - check_false(code, "False extract_date") - - # pack_datetime - code = ('p(1) :- builtin:pack_datetime(2000, 1, 1, 10, 5, 6, ' - '"2000-1-1 10:5:6")') - check_true(code, "True pack_datetime") - - code = ('p(1) :- builtin:pack_datetime(2000, 1, 1, 10, 5, 6, ' - '"2000-1-1 10:5:20")') - check_false(code, "False pack_datetime") - - # pack_date - code = ('p(1) :- builtin:pack_date(2000, 1, 1, ' - '"2000-1-1")') - check_true(code, "True pack_date") - - code = ('p(1) :- builtin:pack_date(2000, 1, 1, ' - '"2000-1-2")') - check_false(code, "False pack_date") - - # pack_time - code = ('p(1) :- builtin:pack_time(5, 6, 7, ' - '"5:6:7")') - check_true(code, "True pack_time") - - code = ('p(1) :- builtin:pack_time(5, 6, 7, ' - '"10:6:7")') - check_false(code, "False pack_time") - - # unpack_datetime - code = ('p(1) :- builtin:unpack_datetime("2000-1-1 10:5:6", ' - '2000, 1, 1, 10, 5, 6)') - check_true(code, "True unpack_datetime") - - code = ('p(1) :- builtin:unpack_datetime("2000-1-1 10:5:6", ' - '2000, 1, 1, 12, 5, 6)') - check_false(code, "False unpack_datetime") - - # unpack_date - code = ('p(1) :- builtin:unpack_date("2000-1-1 10:5:6", ' - '2000, 1, 1)') - check_true(code, "True unpack_date") - - code = ('p(1) :- builtin:unpack_date("2000-1-1 10:5:6", ' - '2000, 1, 5)') - check_false(code, "False unpack_date") - - # unpack_time - code = ('p(1) :- builtin:unpack_time("2000-1-1 10:5:6", ' - '10, 5, 6)') - check_true(code, "True unpack_time") - - code = ('p(1) :- builtin:unpack_time("2000-1-1 10:5:6", ' - '12, 5, 6)') - check_false(code, "False unpack_time") - - # unpack_time - code = 'p(1) :- builtin:now(x)' - check_true(code, "True unpack_time") - - # - # Network Address IPv4 - # - - # ip equal - code = ('p(1) :- builtin:ips_equal("192.0.2.1", "192.0.2.1")') - check_true(code, "True ip_equal") - - code = ('p(1) :- builtin:ips_equal("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_equal") - - # ip less than - code = ('p(1) :- builtin:ips_lt("192.0.2.1", "192.0.2.2")') - check_true(code, "True ip_lt") - - code = ('p(1) :- builtin:ips_lt("192.0.2.1", "192.0.2.1")') - check_false(code, "False ip_lt") - - code = ('p(1) :- builtin:ips_lt("192.0.2.2", "192.0.2.1")') - check_false(code, "False ip_lt") - - # ip less than equal - code = ('p(1) :- builtin:ips_lteq("192.0.2.1", "192.0.2.1")') - check_true(code, "True ip_lteq") - - code = ('p(1) :- builtin:ips_lteq("192.0.2.1", "192.0.2.2")') - check_true(code, "True ip_lteq") - - code = ('p(1) :- builtin:ips_lteq("192.0.2.2", "192.0.2.1")') - check_false(code, "False ip_lteq") - - # ip greater than - code = ('p(1) :- builtin:ips_gt("192.0.2.2", "192.0.2.1")') - check_true(code, "True ip_gt") - - code = ('p(1) :- builtin:ips_gt("192.0.2.1", "192.0.2.1")') - check_false(code, "False ip_gt") - - code = ('p(1) :- builtin:ips_gt("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_gt") - - # ip greater than equal - code = ('p(1) :- builtin:ips_gteq("192.0.2.2", "192.0.2.1")') - check_true(code, "True ip_gteq") - - code = ('p(1) :- builtin:ips_gteq("192.0.2.2", "192.0.2.2")') - check_true(code, "True ip_gteq") - - code = ('p(1) :- builtin:ips_gteq("192.0.2.1", "192.0.2.2")') - check_false(code, "False ip_gteq") - - # networks equal - code = ('p(1) :- builtin:networks_equal("192.0.2.0/24", ' - '"192.0.2.112/24")') - check_true(code, "True networks_equal") - - code = ('p(1) :- builtin:networks_equal("192.0.2.0/24", ' - '"192.0.3.0/24")') - check_false(code, "False networks_equal") - - # networks overlap - code = ('p(1) :- builtin:networks_overlap("192.0.2.0/23", ' - '"192.0.2.0/24")') - check_true(code, "True networks_overlap") - - code = ('p(1) :- builtin:networks_overlap("192.0.2.0/24", ' - '"192.0.3.0/24")') - check_false(code, "False networks_overlap") - - # ip in network - code = ('p(1) :- builtin:ip_in_network("192.168.0.1", ' - '"192.168.0.0/24")') - check_true(code, "True ip_in_network") - - code = ('p(1) :- builtin:ip_in_network("192.168.10.1", ' - '"192.168.0.0/24")') - check_false(code, "False ip_in_network") - - # - # Network Address IPv6 - # - - # ip equal - code = ('p(1) :- builtin:ips_equal("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.1")') - check_true(code, "True ip_equal v6") - - code = ('p(1) :- builtin:ips_equal("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.2")') - check_false(code, "False ip_equal v6") - - # ip less than - code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.2")') - check_true(code, "True ip_lt v6") - - code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.1")') - check_false(code, "False ip_lt v6") - - code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.2", ' - ' "::ffff:192.0.2.1")') - check_false(code, "False ip_lt v6") - - # ip less than equal - code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.1")') - check_true(code, "True ip_lteq v6") - - code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.2")') - check_true(code, "True ip_lteq v6") - - code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.2", ' - ' "::ffff:192.0.2.1")') - check_false(code, "False ip_lteq v6") - - # ip greater than - code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.2", ' - ' "::ffff:192.0.2.1")') - check_true(code, "True ip_gt v6") - - code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.1")') - check_false(code, "False ip_gt v6") - - code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.2")') - check_false(code, "False ip_gt v6") - - # ip greater than equal - code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.2", ' - ' "::ffff:192.0.2.1")') - check_true(code, "True ip_gteq v6") - - code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.2", ' - ' "::ffff:192.0.2.2")') - check_true(code, "True ip_gteq v6") - - code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.1", ' - ' "::ffff:192.0.2.2")') - check_false(code, "False ip_gteq v6") - - # networks equal - code = ('p(1) :- builtin:networks_equal("fe80::ffff:192.0.2.0/24",' - ' "fe80::ffff:192.0.2.112/24")') - check_true(code, "True networks_equal v6") - - code = ('p(1) :- builtin:networks_equal("fe80::ffff:192.0.2.0/24",' - ' "ae80::ffff:192.0.2.0/24")') - check_false(code, "False networks_equal v6") - - # networks overlap - code = ('p(1) :- builtin:networks_overlap("fe80::ffff:192.0.2.0/23",' - ' "fe80::ffff:192.0.2.0/24")') - check_true(code, "True networks_overlap v6") - - code = ('p(1) :- builtin:networks_overlap("fe80::ffff:192.0.2.0/24",' - ' "ae80::ffff:192.0.3.0/24")') - check_false(code, "False networks_overlap v6") - - # ip in network - code = ('p(1) :- builtin:ip_in_network("fe80::ffff:192.168.0.1",' - ' "fe80::ffff:192.168.0.0/24")') - check_true(code, "True ip_in_network v6") - - code = ('p(1) :- builtin:ip_in_network("fe80::ffff:192.168.10.1",' - ' "ae80::ffff:192.168.10.1/24")') - check_false(code, "False ip_in_network v6") diff --git a/congress/tests/datalog/test_compiler.py b/congress/tests/datalog/test_compiler.py deleted file mode 100644 index 34b5d7ba..00000000 --- a/congress/tests/datalog/test_compiler.py +++ /dev/null @@ -1,974 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy - -from congress.datalog import analysis -from congress.datalog import base as datalogbase -from congress.datalog import compile -from congress.datalog import utility -from congress import exception -from congress.policy_engines import agnostic -from congress.tests import base -from congress.tests import helper - - -class TestParser(base.TestCase): - - def test_tablename(self): - """Test correct parsing of tablenames.""" - p = compile.parse1('p(1)') - self.assertEqual(p.table.table, 'p') - self.assertIsNone(p.table.modal) - self.assertIsNone(p.table.service) - - p = compile.parse1('nova:p(1)') - self.assertEqual(p.table.table, 'p') - self.assertIsNone(p.table.modal) - self.assertEqual(p.table.service, 'nova') - - p = compile.parse1('execute[nova:p(1)]') - self.assertEqual(p.table.table, 'p') - self.assertEqual(p.table.modal, 'execute') - self.assertEqual(p.table.service, 'nova') - - def test_rule_hash(self): - """Test whether rules are properly hashed.""" - s = set() - p = compile.parse1('p(x) :- q(x), s(x,y)') - s.add(p) - q = compile.parse1('p(x) :- q(x), s(x,y)') - s.discard(q) - self.assertEqual(s, set()) - - def test_event_equality(self): - r1 = compile.parse1('p(x) :- q(x)') - r2 = compile.parse1('p(x) :- q(x)') - e1 = compile.Event(formula=r1, target='alice', insert=True) - e2 = compile.Event(formula=r2, target='alice', insert=True) - self.assertEqual(e1, e2) - - def test_event_facts(self): - # insert - event = compile.parse('insert[p(1) :- true]') - self.assertEqual(len(event), 1) - event = event[0] - fact = compile.parse1('p(1) :- true') - self.assertEqual(event.formula, fact) - self.assertTrue(event.insert) - self.assertIsNone(event.target) - - # delete - event = compile.parse('delete[p(1) :- true]') - self.assertEqual(len(event), 1) - event = event[0] - fact = compile.parse1('p(1) :- true') - self.assertEqual(event.formula, fact) - self.assertFalse(event.insert) - self.assertIsNone(event.target) - - # insert with policy - event = compile.parse('insert[p(1) :- true; "policy"]') - self.assertEqual(len(event), 1) - event = event[0] - fact = compile.parse1('p(1) :- true') - self.assertEqual(event.formula, fact) - self.assertTrue(event.insert) - self.assertEqual(event.target, "policy") - - def test_event_rules(self): - """Test modal operators.""" - # a rule we use a few times - pqrule = compile.parse1('p(x) :- q(x)') - - # rule-level modal (with insert) - event = compile.parse('insert[p(x) :- q(x)]') - self.assertEqual(len(event), 1) - event = event[0] - self.assertEqual(event.formula, pqrule) - self.assertTrue(event.insert) - self.assertIsNone(event.target) - - # rule-level modal with delete - event = compile.parse('delete[p(x) :- q(x)]') - self.assertEqual(len(event), 1) - event = event[0] - self.assertEqual(event.formula, pqrule) - self.assertFalse(event.insert) - self.assertIsNone(event.target) - - # embedded modals - event = compile.parse('insert[execute[p(x)] :- q(x)]') - self.assertEqual(len(event), 1) - event = event[0] - rule = compile.parse1('execute[p(x)] :- q(x)') - self.assertEqual(event.formula, rule) - self.assertTrue(event.insert) - self.assertIsNone(event.target) - - # rule-level modal with policy name - event = compile.parse('insert[p(x) :- q(x); "policy"]') - self.assertEqual(len(event), 1) - event = event[0] - self.assertEqual(event.formula, pqrule) - self.assertTrue(event.insert) - self.assertEqual(event.target, "policy") - - def test_modal_execute(self): - # modal rule - rule = compile.parse('execute[p(x)] :- q(x)') - self.assertEqual(len(rule), 1) - rule = rule[0] - self.assertEqual(rule.head.table.modal, 'execute') - - # modal rule with namespace - rule = compile.parse('execute[nova:disconnectNetwork(x)] :- q(x)') - self.assertEqual(len(rule), 1) - rule = rule[0] - self.assertEqual(rule.head.table.modal, 'execute') - - # modal query - rule = compile.parse('execute[p(x)]') - self.assertEqual(len(rule), 1) - rule = rule[0] - self.assertEqual(rule.table.modal, 'execute') - - def test_update_rules(self): - rule = compile.parse1('insert[p(x)] :- q(x)') - self.assertEqual(rule.head.table.modal, 'insert') - - rule = compile.parse1('insert[p(x)] :- execute[q(x)]') - self.assertEqual(rule.head.table.modal, 'insert') - - def test_modal_failures(self): - self.assertRaises(exception.PolicyException, compile.parse1, - 'insert[p(x) :- q(x)') - self.assertRaises(exception.PolicyException, compile.parse1, - 'insert[insert[p(x)] :- q(x)') - self.assertRaises(exception.PolicyException, compile.parse1, - 'nonexistent[insert[p(x)] :- q(x)]') - self.assertRaises(exception.PolicyException, compile.parse1, - 'insert[nonexistent[p(x)] :- q(x)]') - - -class TestColumnReferences(base.TestCase): - - class SchemaWrapper (object): - """Placeholder so we don't use the actual policy-engine for tests.""" - def __init__(self, schema): - self.schema = schema - self.kind = datalogbase.DATASOURCE_POLICY_TYPE - - def test_column_references_lowlevel(self): - """Test column-references with low-level checks.""" - # do the first one the painful way, to ensure the parser - # is doing something reasonable. - run = agnostic.Runtime() - code = ("p(x) :- nova:q(id=x)") - actual = run.parse(code) - self.assertEqual(len(actual), 1) - rule = actual[0] - self.assertEqual(len(rule.heads), 1) - self.assertEqual(rule.head.table.table, "p") - self.assertEqual(len(rule.head.arguments), 1) - self.assertEqual(rule.head.arguments[0].name, 'x') - self.assertEqual(len(rule.body), 1) - lit = rule.body[0] - self.assertFalse(lit.is_negated()) - self.assertEqual(lit.table.table, "q") - self.assertIsNone(lit.table.modal) - self.assertEqual(lit.table.service, 'nova') - self.assertEqual(len(lit.arguments), 0) - self.assertEqual(len(lit.named_arguments), 1) - self.assertIn('id', lit.named_arguments) - self.assertEqual(lit.named_arguments['id'].name, 'x') - - def test_hash(self): - x = set() - x.add(compile.parse1('p(x, y) :- nova:q(x, id=y)')) - x.add(compile.parse1('p(x, y) :- nova:q(x, id=y)')) - self.assertEqual(len(x), 1) - self.assertIn(compile.parse1('p(x, y) :- nova:q(x, id=y)'), x) - x.discard(compile.parse1('p(x, y) :- nova:q(x, id=y)')) - self.assertEqual(len(x), 0) - - def test_lessthan(self): - x = compile.parse1('nova:q(x)') - y = compile.parse1('nova:q(x, id=y)') - self.assertLess(x, y) - - x = compile.parse1('nova:q(x)') - y = compile.parse1('nova:q(x, id=y)') - self.assertGreaterEqual(y, x) - - x = compile.parse1('nova:q(x, id=w)') - y = compile.parse1('nova:q(x, id=y)') - self.assertLess(x, y) - - x = compile.parse1('nova:q(id=x)') - y = compile.parse1('nova:q(id=y)') - self.assertLess(x, y) - - x = compile.parse1('nova:q(id=x)') - y = compile.parse1('nova:q(id=y, status=z)') - self.assertLess(x, y) - - x = compile.parse1('p(x, y) :- nova:q(x, id=y)') - y = compile.parse1('p(x, y) :- nova:q(x, id=y, status=z)') - self.assertLess(x, y) - - def test_column_references_parse_errors(self): - """Test invalid column references occurring in a single atom.""" - def check_err(code, errmsg, msg): - try: - compile.parse(code) - self.fail("Error should have been thrown but was not: " + msg) - except exception.PolicyException as e: - emsg = "Err message '{}' should include '{}'".format( - str(e), errmsg) - self.assertIn(errmsg, str(e), msg + ": " + emsg) - - check_err( - 'p(x) :- nova:q(id=x, status=x, id=y)', - 'two values for column name id', - 'Multiple values for column name') - - check_err( - 'p(x) :- nova:q(4=y, id=w, 4=z)', - 'two values for column number 4', - 'Multiple values for column number') - - check_err( - 'p(x) :- nova:q(x, 1=z, y)', - 'positional parameter after a reference parameter', - 'Positional parameter after reference parameter') - - check_err( - 'p(x) :- nova:q(x, y, 1=z)', - '1 is already provided by position arguments', - 'Conflict between name and position') - - def test_positional_args_padding_atom(self): - """Test positional args padding on a single atom.""" - def check_err(rule, errmsg, msg): - rule = compile.parse1(rule) - try: - rule.eliminate_column_references_and_pad_positional(theories) - self.fail("Failed to throw error {}".format(errmsg)) - except (exception.PolicyException, - exception.IncompleteSchemaException) as e: - emsg = "Err messages '{}' should include '{}'".format( - str(e), errmsg) - self.assertIn(errmsg, str(e), msg + ": " + emsg) - - def check(code, correct, msg, no_theory=False): - actual = compile.parse1( - code).eliminate_column_references_and_pad_positional( - {} if no_theory else theories) - eq = helper.datalog_same(str(actual), correct) - self.assertTrue(eq, msg) - - run = agnostic.Runtime() - run.create_policy('nova') - schema = compile.Schema({'q': ('id', 'name', 'status')}) - theories = {'nova': self.SchemaWrapper(schema)} - - # Too few positional args - code = ("p(x) :- nova:q(w, y)") - correct = "p(x) :- nova:q(w, y, x3)" - check(code, correct, 'Too few positional args') - - code = ("p(x) :- nova:q(w)") - correct = "p(x) :- nova:q(w, y, x3)" - check(code, correct, 'Too few positional args') - - code = ("p(x) :- nova:q()") - correct = "p(x) :- nova:q(w, y, x3)" - check(code, correct, 'Too few (no) positional args') - - # No schema provided, no change - code = ("p(x) :- nova:q(w, y)") - correct = "p(x) :- nova:q(w, y)" - check(code, correct, 'No schema provided', True) - - code = ("p(x) :- nova:q(w, x, y, z)") - correct = "p(x) :- nova:q(w, x, y, z)" - check(code, correct, 'No schema provided', True) - - def test_positional_args_padding_multiple_atoms(self): - """Test positional args padding on a single atom.""" - def check(code, correct, msg, no_theory=False): - actual = compile.parse1( - code).eliminate_column_references_and_pad_positional( - {} if no_theory else theories) - eq = helper.datalog_same(str(actual), correct) - self.assertTrue(eq, msg) - - run = agnostic.Runtime() - run.create_policy('nova') - schema = compile.Schema({'q': ('id', 'name', 'status'), - 'r': ('id', 'age', 'weight')}) - theories = {'nova': self.SchemaWrapper(schema)} - - # Multiple atoms, no shared variable - code = ("p(x) :- nova:q(x, y), nova:r(w)") - correct = "p(x) :- nova:q(x, y, z0), nova:r(w, y0, y1)" - check(code, correct, 'Multiple atoms') - - # Multiple atoms, some shared variable - code = ("p(x) :- nova:q(x, y), nova:r(x)") - correct = "p(x) :- nova:q(x, y, z0), nova:r(x, y0, y1)" - check(code, correct, 'Multiple atoms') - - # Multiple atoms, same table - code = ("p(x) :- nova:q(x, y), nova:q(x)") - correct = "p(x) :- nova:q(x, y, z0), nova:q(x, w0, w1)" - check(code, correct, 'Multiple atoms, same table') - - def test_column_references_validation_errors(self): - """Test invalid column references occurring in a single atom.""" - schema = compile.Schema({'q': ('id', 'name', 'status'), - 'r': ('id', 'age', 'weight')}, - complete=True) - theories = {'nova': self.SchemaWrapper(schema)} - - def check_err(rule, errmsg, msg): - rule = compile.parse1(rule) - try: - rule.eliminate_column_references_and_pad_positional(theories) - self.fail("Failed to throw error {}".format(errmsg)) - except (exception.PolicyException, - exception.IncompleteSchemaException) as e: - emsg = "Err messages '{}' should include '{}'".format( - str(e), errmsg) - self.assertIn(errmsg, str(e), msg + ": " + emsg) - - check_err( - 'p(x) :- nova:missing(id=x)', - 'uses unknown table missing', - 'Unknown table') - - check_err( - 'p(x) :- nova:q(id=x, birthday=y)', - 'column name birthday does not exist', - 'Unknown column name') - - check_err( - 'p(x) :- nova:q(4=y)', - 'column index 4 is too large', - 'Large column number') - - check_err( - 'p(x) :- nova:q(id=x, 0=y)', - 'index 0 references column id, which is also referenced by name', - 'Conflict between name and number references') - - check_err( - 'p(x) :- nova:q(x, y, id=z)', - 'already provided by position', - 'Conflict between name and position') - - theories = {} - check_err( - 'p(x) :- nova:missing(id=x)', - 'schema is unknown', - 'Missing schema') - - def test_column_references_atom(self): - """Test column references occurring in a single atom in a rule.""" - def check(code, correct, msg): - actual = compile.parse1( - code).eliminate_column_references_and_pad_positional(theories) - eq = helper.datalog_same(str(actual), correct) - self.assertTrue(eq, msg) - - run = agnostic.Runtime() - run.create_policy('nova') - schema = compile.Schema({'q': ('id', 'name', 'status')}) - theories = {'nova': self.SchemaWrapper(schema)} - - # Multiple column names - code = ("p(x) :- nova:q(id=x, status=y)") - correct = "p(x) :- nova:q(x, w, y)" - check(code, correct, 'Multiple column names') - - # Multiple column numbers - code = ("p(x) :- nova:q(0=x, 1=y, 2=z)") - correct = "p(x) :- nova:q(x, y, z)" - check(code, correct, 'Multiple column numbers') - - # Mix column names and numbers - code = ("p(x) :- nova:q(id=x, 2=y)") - correct = "p(x) :- nova:q(x, w, y)" - check(code, correct, 'Mix names and numbers') - - # Object constants - code = ("p(x) :- nova:q(id=3, 2=2)") - correct = "p(x) :- nova:q(3, w, 2)" - check(code, correct, 'Object constants') - - # Out of order - code = ("p(x, y) :- nova:q(status=y, id=x)") - correct = "p(x, y) :- nova:q(x, z, y)" - check(code, correct, 'Out of order') - - # Out of order with numbers - code = ("p(x, y) :- nova:q(1=y, 0=x)") - correct = "p(x, y) :- nova:q(x, y, z)" - check(code, correct, 'Out of order with numbers') - - # Positional plus named - code = ("p(x, y) :- nova:q(x, status=y)") - correct = "p(x, y) :- nova:q(x, z, y)" - check(code, correct, 'Positional plus named') - - # Positional plus named 2 - code = ("p(x, y, z) :- nova:q(x, y, 2=z)") - correct = "p(x, y, z) :- nova:q(x, y, z)" - check(code, correct, 'Positional plus named 2') - - # Pure positional (different since we are providing schema) - code = ("p(x, y, z) :- nova:q(x, y, z)") - correct = "p(x, y, z) :- nova:q(x, y, z)" - check(code, correct, 'Pure positional') - - # Pure positional (without schema) - code = ("p(x) :- nova:q(x, y, z)") - run.delete_policy('nova') - correct = "p(x) :- nova:q(x, y, z)" - check(code, correct, 'Pure positional without schema') - - # Too few pure positional EKCS - - def test_column_references_multiple_atoms(self): - """Test column references occurring in multiple atoms in a rule.""" - def check(code, correct, msg): - actual = compile.parse1( - code).eliminate_column_references_and_pad_positional(theories) - eq = helper.datalog_same(str(actual), correct) - self.assertTrue(eq, msg) - - run = agnostic.Runtime() - run.create_policy('nova') - schema = compile.Schema({'q': ('id', 'name', 'status'), - 'r': ('id', 'age', 'weight')}) - theories = {'nova': self.SchemaWrapper(schema)} - - # Multiple atoms - code = ("p(x) :- nova:q(id=x, 2=y), nova:r(id=x)") - correct = "p(x) :- nova:q(x, x0, y), nova:r(x, y0, y1)" - check(code, correct, 'Multiple atoms') - - # Multiple atoms sharing column name but different variables - code = ("p(x) :- nova:q(id=x), nova:r(id=y)") - correct = "p(x) :- nova:q(x, x0, x1), nova:r(y, y0, y1)" - check(code, correct, 'Multiple atoms shared column name') - - # Multiple atoms, same table - code = ("p(x) :- nova:q(id=x, 2=y), nova:q(id=x)") - correct = "p(x) :- nova:q(x, x0, y), nova:q(x, y0, y1)" - check(code, correct, 'Multiple atoms, same table') - - def test_eliminate_column_references_body_order(self): - """Test eliminate_column_references preserves order insensitivity.""" - run = agnostic.Runtime() - run.create_policy('nova') - schema = compile.Schema({'q': ('id', 'name', 'status'), - 'r': ('id', 'age', 'weight')}) - theories = {'nova': self.SchemaWrapper(schema)} - - rule1 = compile.parse1( - "p(x) :- nova:q(id=x, 2=y), nova:r(id=x)" - ).eliminate_column_references_and_pad_positional(theories) - rule2 = compile.parse1( - "p(x) :- nova:r(id=x), nova:q(id=x, 2=y)" - ).eliminate_column_references_and_pad_positional(theories) - self.assertEqual(rule1, rule2, 'eliminate_column_references failed to ' - 'preserve order insensitivity') - - -class TestCompiler(base.TestCase): - - def test_type_checkers(self): - """Test the type checkers, e.g. is_atom, is_rule.""" - atom = compile.Literal("p", []) - atom2 = compile.Literal("q", []) - atom3 = compile.Literal("r", []) - lit = compile.Literal("r", [], negated=True) - regular_rule = compile.Rule(atom, [atom2, atom3]) - regular_rule2 = compile.Rule(atom, [lit, atom2]) - multi_rule = compile.Rule([atom, atom2], [atom3]) - fake_rule = compile.Rule([atom, 1], [atom2]) - fake_rule2 = compile.Rule(atom, [atom2, 1]) - - # is_atom - self.assertTrue(compile.is_atom(atom)) - self.assertTrue(compile.is_atom(atom2)) - self.assertTrue(compile.is_atom(atom3)) - self.assertFalse(compile.is_atom(lit)) - self.assertFalse(compile.is_atom(regular_rule)) - self.assertFalse(compile.is_atom(regular_rule2)) - self.assertFalse(compile.is_atom(multi_rule)) - self.assertFalse(compile.is_atom(fake_rule)) - self.assertFalse(compile.is_atom(fake_rule2)) - self.assertFalse(compile.is_atom("a string")) - - # is_literal - self.assertTrue(compile.is_literal(atom)) - self.assertTrue(compile.is_literal(atom2)) - self.assertTrue(compile.is_literal(atom3)) - self.assertTrue(compile.is_literal(lit)) - self.assertFalse(compile.is_literal(regular_rule)) - self.assertFalse(compile.is_literal(regular_rule2)) - self.assertFalse(compile.is_literal(multi_rule)) - self.assertFalse(compile.is_literal(fake_rule)) - self.assertFalse(compile.is_literal(fake_rule2)) - self.assertFalse(compile.is_literal("a string")) - - # is_regular_rule - self.assertFalse(compile.is_regular_rule(atom)) - self.assertFalse(compile.is_regular_rule(atom2)) - self.assertFalse(compile.is_regular_rule(atom3)) - self.assertFalse(compile.is_regular_rule(lit)) - self.assertTrue(compile.is_regular_rule(regular_rule)) - self.assertTrue(compile.is_regular_rule(regular_rule2)) - self.assertFalse(compile.is_regular_rule(multi_rule)) - self.assertFalse(compile.is_regular_rule(fake_rule)) - self.assertFalse(compile.is_regular_rule(fake_rule2)) - self.assertFalse(compile.is_regular_rule("a string")) - - # is_multi_rule - self.assertFalse(compile.is_multi_rule(atom)) - self.assertFalse(compile.is_multi_rule(atom2)) - self.assertFalse(compile.is_multi_rule(atom3)) - self.assertFalse(compile.is_multi_rule(lit)) - self.assertFalse(compile.is_multi_rule(regular_rule)) - self.assertFalse(compile.is_multi_rule(regular_rule2)) - self.assertTrue(compile.is_multi_rule(multi_rule)) - self.assertFalse(compile.is_multi_rule(fake_rule)) - self.assertFalse(compile.is_multi_rule(fake_rule2)) - self.assertFalse(compile.is_multi_rule("a string")) - - # is_rule - self.assertFalse(compile.is_rule(atom)) - self.assertFalse(compile.is_rule(atom2)) - self.assertFalse(compile.is_rule(atom3)) - self.assertFalse(compile.is_rule(lit)) - self.assertTrue(compile.is_rule(regular_rule)) - self.assertTrue(compile.is_rule(regular_rule2)) - self.assertTrue(compile.is_rule(multi_rule)) - self.assertFalse(compile.is_rule(fake_rule)) - self.assertFalse(compile.is_rule(fake_rule2)) - self.assertFalse(compile.is_rule("a string")) - - # is_datalog - self.assertTrue(compile.is_datalog(atom)) - self.assertTrue(compile.is_datalog(atom2)) - self.assertTrue(compile.is_datalog(atom3)) - self.assertFalse(compile.is_datalog(lit)) - self.assertTrue(compile.is_datalog(regular_rule)) - self.assertTrue(compile.is_datalog(regular_rule2)) - self.assertFalse(compile.is_datalog(multi_rule)) - self.assertFalse(compile.is_datalog(fake_rule)) - self.assertFalse(compile.is_datalog(fake_rule2)) - self.assertFalse(compile.is_datalog("a string")) - - # is_extended_datalog - self.assertTrue(compile.is_extended_datalog(atom)) - self.assertTrue(compile.is_extended_datalog(atom2)) - self.assertTrue(compile.is_extended_datalog(atom3)) - self.assertFalse(compile.is_extended_datalog(lit)) - self.assertTrue(compile.is_extended_datalog(regular_rule)) - self.assertTrue(compile.is_extended_datalog(regular_rule2)) - self.assertTrue(compile.is_extended_datalog(multi_rule)) - self.assertFalse(compile.is_extended_datalog(fake_rule)) - self.assertFalse(compile.is_extended_datalog(fake_rule2)) - self.assertFalse(compile.is_extended_datalog("a string")) - - def test_rule_validation(self): - """Test that rules are properly validated.""" - # unsafe var in head - rule = compile.parse1('p(x) :- q(y)') - errs = compile.rule_errors(rule) - self.assertEqual(len(errs), 1) - - # multiple unsafe vars in head - rule = compile.parse1('p(x,y,z) :- q(w)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 3) - - # unsafe var in negtative literal: - rule = compile.parse1('p(x) :- q(x), not r(y)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # unsafe var in negative literal: ensure head doesn't make safe - rule = compile.parse1('p(x) :- not q(x)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # unsafe var in negative literal: - # ensure partial safety not total safety - rule = compile.parse1('p(x) :- q(x), not r(x,y)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # unsafe var in negative literal: ensure double negs doesn't make safe - rule = compile.parse1('p(x) :- q(x), not r(x,y), not s(x, y)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # multiple heads with modal - rule = compile.parse1('execute[p(x)], r(x) :- q(x)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # modal in body - rule = compile.parse1('p(x) :- execute[q(x)]') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - # keywords - rule = compile.parse1('equal(x) :- q(x)') - errs = compile.rule_errors(rule) - self.assertEqual(len(set([str(x) for x in errs])), 1) - - def test_module_schemas(self): - """Test that rules are properly checked against module schemas.""" - - run = agnostic.Runtime() - run.create_policy('mod1') - run.create_policy('mod2') - run.set_schema('mod1', compile.Schema({'p': (1, 2, 3), 'q': (1,)}), - complete=True) - run.set_schema('mod2', compile.Schema({'p': (1,), 'q': (1, 2)}), - complete=True) - - def check_err(code_string, theory, emsg, msg, f=compile.rule_errors): - rule = compile.parse1(code_string) - errs = f(rule, run.theory, theory) - self.assertTrue(any(emsg in str(err) for err in errs), - msg + ":: Failed to find error message '" + emsg + - "' in: " + ";".join(str(e) for e in errs)) - - # no errors - rule = compile.parse1('p(x) :- q(x), mod1:p(x, y, z), mod2:q(x, y), ' - 'mod1:q(t), mod2:p(t)') - errs = compile.rule_errors(rule, run.theory) - self.assertEqual(len(errs), 0, "Should not have found any errors") - - # unknown table within module - check_err('p(x) :- q(x), mod1:r(x), r(x)', - 'mod3', - 'unknown table', - 'Unknown table for rule') - - # wrong number of arguments - check_err('p(x) :- q(x), mod1:p(x,y,z,w), r(x)', - 'mod3', - 'only 3 arguments are permitted', - 'Wrong number of arguments for rule') - - # same tests for an atom - - # no errors - atom = compile.parse1('p(1, 2, 2)') - errs = compile.fact_errors(atom, run.theory, 'mod1') - self.assertEqual(len(errs), 0, "Should not have found any errors") - - # unknown table within module - check_err('r(1)', - 'mod1', - 'unknown table', - 'Unknown table for atom', - f=compile.fact_errors) - - # wrong number of arguments - check_err('p(1, 2, 3, 4)', - 'mod1', - 'only 3 arguments are permitted', - 'Wrong number of arguments for atom', - f=compile.fact_errors) - - # schema update - schema = compile.Schema() - rule1 = compile.parse1('p(x) :- q(x, y)') - change1 = schema.update(rule1.head, True) - rule2 = compile.parse1('p(x) :- r(x, y)') - change2 = schema.update(rule2.head, True) - self.assertEqual(schema.count['p'], 2) - schema.revert(change2) - self.assertEqual(schema.count['p'], 1) - schema.revert(change1) - self.assertNotIn('p', schema.count) - - schema.update(rule1.head, True) - schema.update(rule2.head, True) - change1 = schema.update(rule1.head, False) - change2 = schema.update(rule2.head, False) - self.assertNotIn('p', schema.count) - schema.revert(change2) - self.assertEqual(schema.count['p'], 1) - schema.revert(change1) - self.assertEqual(schema.count['p'], 2) - - def test_rule_recursion(self): - rules = compile.parse('p(x) :- q(x), r(x) q(x) :- r(x) r(x) :- t(x)') - self.assertFalse(compile.is_recursive(rules)) - - rules = compile.parse('p(x) :- p(x)') - self.assertTrue(compile.is_recursive(rules)) - - rules = compile.parse('p(x) :- q(x) q(x) :- r(x) r(x) :- p(x)') - self.assertTrue(compile.is_recursive(rules)) - - rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)') - self.assertTrue(compile.is_recursive(rules)) - - rules = compile.parse('p(x) :- q(x), s(x) q(x) :- t(x) s(x) :- p(x)') - self.assertTrue(compile.is_recursive(rules)) - - def test_rule_stratification(self): - rules = compile.parse('p(x) :- not q(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- p(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x) q(x) :- p(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x) q(x) :- not r(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- not q(x) q(x) :- not r(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- not q(x) ' - 'q(x) :- not r(x) ' - 'r(x) :- not s(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x), r(x) ' - 'q(x) :- not t(x) ' - 'r(x) :- not s(x)') - self.assertTrue(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- not p(x)') - self.assertFalse(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)') - self.assertFalse(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x),r(x) r(x) :- not p(x)') - self.assertFalse(compile.is_stratified(rules)) - - rules = compile.parse('p(x) :- q(x), r(x) ' - 'q(x) :- not t(x) ' - 'r(x) :- not s(x) ' - 't(x) :- p(x)') - self.assertFalse(compile.is_stratified(rules)) - - -class TestDependencyGraph(base.TestCase): - - def test_nodes_edges(self): - g = compile.RuleDependencyGraph() - - # first insertion - g.formula_insert(compile.parse1('p(x), q(x) :- r(x), s(x)')) - self.assertTrue(g.node_in('p')) - self.assertTrue(g.node_in('q')) - self.assertTrue(g.node_in('r')) - self.assertTrue(g.node_in('s')) - self.assertTrue(g.edge_in('p', 'r', False)) - self.assertTrue(g.edge_in('p', 's', False)) - self.assertTrue(g.edge_in('q', 'r', False)) - self.assertTrue(g.edge_in('q', 's', False)) - self.assertFalse(g.has_cycle()) - - # another insertion - g.formula_insert(compile.parse1('r(x) :- t(x)')) - self.assertTrue(g.node_in('p')) - self.assertTrue(g.node_in('q')) - self.assertTrue(g.node_in('r')) - self.assertTrue(g.node_in('s')) - self.assertTrue(g.edge_in('p', 'r', False)) - self.assertTrue(g.edge_in('p', 's', False)) - self.assertTrue(g.edge_in('q', 'r', False)) - self.assertTrue(g.edge_in('q', 's', False)) - self.assertTrue(g.node_in('t')) - self.assertTrue(g.edge_in('r', 't', False)) - self.assertFalse(g.has_cycle()) - - # 3rd insertion, creating a cycle - g.formula_insert(compile.parse1('t(x) :- p(x)')) - self.assertTrue(g.edge_in('t', 'p', False)) - self.assertTrue(g.has_cycle()) - - # deletion - g.formula_delete(compile.parse1('p(x), q(x) :- r(x), s(x)')) - self.assertTrue(g.node_in('p')) - self.assertTrue(g.node_in('r')) - self.assertTrue(g.node_in('t')) - self.assertTrue(g.edge_in('r', 't', False)) - self.assertTrue(g.edge_in('t', 'p', False)) - self.assertFalse(g.has_cycle()) - - # double-insertion - g.formula_insert(compile.parse1('p(x) :- q(x), r(x)')) - g.formula_insert(compile.parse1('p(1) :- r(1)')) - self.assertTrue(g.has_cycle()) - - # deletion -- checking for bag semantics - g.formula_delete(compile.parse1('p(1) :- r(1)')) - self.assertTrue(g.has_cycle()) - g.formula_delete(compile.parse1('p(x) :- q(x), r(x)')) - self.assertFalse(g.has_cycle()) - - # update - g.formula_update([ - compile.Event(compile.parse1('a(x) :- b(x)')), - compile.Event(compile.parse1('b(x) :- c(x)')), - compile.Event(compile.parse1('c(x) :- a(x)'))]) - self.assertTrue(g.has_cycle()) - g.formula_update([ - compile.Event(compile.parse1('c(x) :- a(x)'), insert=False)]) - self.assertFalse(g.has_cycle()) - - # cycle enumeration - g = compile.RuleDependencyGraph() - g.formula_insert(compile.parse1('p(x) :- q(x), r(x)')) - g.formula_insert(compile.parse1('q(x) :- t(x), not s(x)')) - g.formula_insert(compile.parse1('t(x) :- t(x), p(x), q(x)')) - self.assertTrue(g.has_cycle()) - self.assertEqual(len(g.cycles()), 3) - expected_cycle_set = set([ - utility.Cycle(['p', 'q', 't', 'p']), - utility.Cycle(['q', 't', 'q']), - utility.Cycle(['t', 't']) - ]) - actual_cycle_set = set([ - utility.Cycle(g.cycles()[0]), - utility.Cycle(g.cycles()[1]), - utility.Cycle(g.cycles()[2]) - ]) - self.assertEqual(expected_cycle_set, actual_cycle_set) - - def test_dependencies(self): - g = compile.RuleDependencyGraph() - g.formula_insert(compile.parse1('p(x) :- q(x), r(x)')) - g.formula_insert(compile.parse1('q(x) :- t(x), not s(x)')) - self.assertEqual(g.dependencies('p'), set(['p', 'q', 'r', 't', 's'])) - self.assertEqual(g.dependencies('q'), set(['q', 't', 's'])) - self.assertEqual(g.dependencies('r'), set(['r'])) - self.assertEqual(g.dependencies('t'), set(['t'])) - self.assertEqual(g.dependencies('s'), set(['s'])) - - # cyclic case - g = compile.RuleDependencyGraph() - g.formula_insert(compile.parse1('p(x) :- q(x), r(x)')) - g.formula_insert(compile.parse1('q(x) :- t(x), not s(x)')) - g.formula_insert(compile.parse1('t(x) :- t(x), p(x), q(x)')) - self.assertEqual(g.dependencies('p'), set(['p', 'q', 'r', 't', 's'])) - self.assertEqual(g.dependencies('q'), set(['p', 'q', 'r', 't', 's'])) - self.assertEqual(g.dependencies('r'), set(['r'])) - self.assertEqual(g.dependencies('t'), set(['p', 'q', 'r', 't', 's'])) - self.assertEqual(g.dependencies('s'), set(['s'])) - - g = compile.RuleDependencyGraph(head_to_body=False) - g.formula_insert(compile.parse1('p(x) :- q(x), r(x)')) - g.formula_insert(compile.parse1('q(x) :- t(x), not s(x)')) - self.assertEqual(g.dependencies('p'), set(['p'])) - self.assertEqual(g.dependencies('q'), set(['q', 'p'])) - self.assertEqual(g.dependencies('r'), set(['r', 'p'])) - self.assertEqual(g.dependencies('t'), set(['t', 'q', 'p'])) - self.assertEqual(g.dependencies('s'), set(['s', 'q', 'p'])) - - def test_modal_index(self): - m = analysis.ModalIndex() - m.add('execute', 'p') - self.assertEqual(set(m.tables('execute')), set(['p'])) - m.add('execute', 'q') - self.assertEqual(set(m.tables('execute')), set(['p', 'q'])) - m.remove('execute', 'q') - self.assertEqual(set(m.tables('execute')), set(['p'])) - m.add('execute', 'q') - m.add('execute', 'q') - m.remove('execute', 'q') - self.assertEqual(set(m.tables('execute')), set(['p', 'q'])) - m.remove('execute', 'q') - self.assertEqual(set(m.tables('execute')), set(['p'])) - m.add('foo', 'p') - self.assertEqual(set(m.tables('foo')), set(['p'])) - self.assertEqual(set(m.tables('bar')), set()) - self.assertEqual(set(m.tables('execute')), set(['p'])) - - def test_modal_index_composition(self): - m = analysis.ModalIndex() - m.add('execute', 'p') - m.add('execute', 'q') - m.add('execute', 'r') - m.add('foo', 'r') - m.add('foo', 's') - - n = analysis.ModalIndex() - n.add('execute', 'p') - n.add('execute', 'alpha') - n.add('foo', 'r') - n.add('bar', 'beta') - - n_plus_m = analysis.ModalIndex() - n_plus_m.add('execute', 'p') - n_plus_m.add('execute', 'p') - n_plus_m.add('execute', 'q') - n_plus_m.add('execute', 'r') - n_plus_m.add('execute', 'alpha') - n_plus_m.add('foo', 'r') - n_plus_m.add('foo', 's') - n_plus_m.add('foo', 'r') - n_plus_m.add('bar', 'beta') - - m_copy = copy.copy(m) - m_copy += n - self.assertEqual(m_copy, n_plus_m) - - m_minus_n = analysis.ModalIndex() - m_minus_n.add('execute', 'q') - m_minus_n.add('execute', 'r') - m_minus_n.add('foo', 's') - - m_copy = copy.copy(m) - m_copy -= n - self.assertEqual(m_copy, m_minus_n) - - def test_modals(self): - g = compile.RuleDependencyGraph() - g.formula_insert(compile.parse1('p(x) :- q(x)')) - g.formula_insert(compile.parse1('q(x) :- r(x)')) - g.formula_insert(compile.parse1('execute[p(x)] :- q(x)')) - chgs = g.formula_insert(compile.parse1('execute[r(x)] :- q(x)')) - g.formula_insert(compile.parse1('insert[s(x)] :- q(x)')) - self.assertEqual(set(g.tables_with_modal('execute')), set(['p', 'r'])) - g.undo_changes(chgs) - self.assertEqual(set(g.tables_with_modal('execute')), set(['p'])) - chgs = g.formula_delete(compile.parse1('execute[p(x)] :- q(x)')) - self.assertEqual(set(g.tables_with_modal('execute')), set()) - g.undo_changes(chgs) - self.assertEqual(set(g.tables_with_modal('execute')), set(['p'])) diff --git a/congress/tests/datalog/test_factset.py b/congress/tests/datalog/test_factset.py deleted file mode 100644 index 8a7e4f86..00000000 --- a/congress/tests/datalog/test_factset.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datalog import factset -from congress.tests import base - - -class TestFactSet(base.TestCase): - def setUp(self): - super(TestFactSet, self).setUp() - self.factset = factset.FactSet() - - def test_empty(self): - self.assertNotIn((1, 2, 3), self.factset) - self.assertEqual(0, len(self.factset)) - - def test_add_one(self): - f = (1, 2, 'a') - self.factset.add(f) - self.assertEqual(1, len(self.factset)) - self.assertEqual(set([f]), self.factset.find(((0, 1), (1, 2), - (2, 'a')))) - - def test_add_few(self): - f1 = (1, 200, 'a') - f2 = (2, 200, 'a') - f3 = (3, 200, 'c') - self.factset.add(f1) - self.factset.add(f2) - self.factset.add(f3) - - self.assertEqual(3, len(self.factset)) - self.assertEqual(set([f1, f2, f3]), self.factset.find(((1, 200),))) - self.assertEqual(set([f1, f2]), self.factset.find(((2, 'a'),))) - self.assertEqual(set([f1]), self.factset.find(((0, 1), (1, 200), - (2, 'a'),))) - self.assertEqual(set(), self.factset.find(((0, 8),))) - - def test_remove(self): - f1 = (1, 200, 'a') - f2 = (2, 200, 'a') - f3 = (3, 200, 'c') - self.factset.add(f1) - self.factset.add(f2) - self.factset.add(f3) - self.assertEqual(3, len(self.factset)) - - self.assertTrue(self.factset.remove(f1)) - self.assertEqual(2, len(self.factset)) - self.assertEqual(set([f2, f3]), self.factset.find(((1, 200),))) - - self.assertTrue(self.factset.remove(f3)) - self.assertEqual(1, len(self.factset)) - self.assertEqual(set([f2]), self.factset.find(((1, 200),))) - - self.assertFalse(self.factset.remove(f3)) - - self.assertTrue(self.factset.remove(f2)) - self.assertEqual(0, len(self.factset)) - self.assertEqual(set(), self.factset.find(((1, 200),))) - - def test_create_index(self): - f1 = (1, 200, 'a') - f2 = (2, 200, 'a') - f3 = (3, 200, 'c') - self.factset.add(f1) - self.factset.add(f2) - self.factset.add(f3) - - self.factset.create_index((1,)) - self.assertEqual(set([f1, f2, f3]), self.factset.find(((1, 200),))) - self.assertEqual(set([f1, f2]), self.factset.find(((2, 'a'),))) - self.assertEqual(set([f1, f2]), self.factset.find(((1, 200), - (2, 'a')))) - self.assertEqual(set([f1]), self.factset.find(((0, 1), (1, 200), - (2, 'a'),))) - self.assertEqual(set(), self.factset.find(((0, 8),))) - - self.factset.create_index((1, 2)) - self.assertEqual(set([f1, f2, f3]), self.factset.find(((1, 200),))) - self.assertEqual(set([f1, f2]), self.factset.find(((2, 'a'),))) - self.assertEqual(set([f1, f2]), self.factset.find(((1, 200), - (2, 'a')))) - self.assertEqual(set([f1]), self.factset.find(((0, 1), (1, 200), - (2, 'a'),))) - self.assertEqual(set(), self.factset.find(((0, 8),))) - - def test_remove_index(self): - f1 = (1, 200, 'a') - f2 = (2, 200, 'a') - f3 = (3, 200, 'c') - self.factset.add(f1) - self.factset.add(f2) - self.factset.add(f3) - - self.factset.create_index((1,)) - self.factset.create_index((1, 2)) - self.factset.remove_index((1,)) - self.factset.remove_index((1, 2)) - - self.assertEqual(set([f1, f2, f3]), self.factset.find(((1, 200),))) - self.assertEqual(set([f1, f2]), self.factset.find(((2, 'a'),))) - self.assertEqual(set([f1, f2]), self.factset.find(((1, 200), - (2, 'a')))) - self.assertEqual(set([f1]), self.factset.find(((0, 1), (1, 200), - (2, 'a'),))) - self.assertEqual(set(), self.factset.find(((0, 8),))) - - def test_indexed_find(self): - f1 = (1, 200, 'a') - f2 = (2, 200, 'a') - f3 = (3, 200, 'c') - self.factset.add(f1) - self.factset.add(f2) - self.factset.add(f3) - - # Count iterations without index. - iterations = [] # measure how many iterations find() uses. - self.assertEqual(set([f1]), self.factset.find(((0, 1),), iterations)) - self.assertEqual(3, iterations[0]) - - # Count iterations with index match. - self.factset.create_index((0,)) - iterations = [] - self.assertEqual(set([f1]), self.factset.find(((0, 1),), iterations)) - self.assertEqual(1, iterations[0]) - - # Count iterations when there is a matching index, but not match for - # this particular key. - iterations = [] - self.assertEqual(set(), self.factset.find(((0, 100),), iterations)) - self.assertEqual(1, iterations[0]) - - # Count iterations after deleting index. - self.factset.remove_index((0,)) - iterations = [] - self.assertEqual(set([f1]), self.factset.find(((0, 1),), iterations)) - self.assertEqual(3, iterations[0]) diff --git a/congress/tests/datalog/test_materialized.py b/congress/tests/datalog/test_materialized.py deleted file mode 100644 index 2e699635..00000000 --- a/congress/tests/datalog/test_materialized.py +++ /dev/null @@ -1,537 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.datalog import base as datalog_base -from congress.datalog import compile -from congress.policy_engines import agnostic -from congress.tests import base -from congress.tests import helper - -LOG = logging.getLogger(__name__) - -MAT_THEORY = 'classification test theory' -DB_THEORY = 'database test theory' - - -class TestRuntime(base.TestCase): - """Tests for Runtime that are not specific to any theory.""" - - def prep_runtime(self, code=None, msg=None, target=None): - # compile source - if msg is not None: - LOG.debug(msg) - if code is None: - code = "" - if target is None: - target = MAT_THEORY - run = agnostic.Runtime() - run.create_policy(MAT_THEORY, - kind=datalog_base.MATERIALIZED_POLICY_TYPE) - run.create_policy(DB_THEORY, - kind=datalog_base.DATABASE_POLICY_TYPE) - # ensure inserts without target go to MAT_THEORY - run.DEFAULT_THEORY = MAT_THEORY - run.debug_mode() - run.insert(code, target=target) - return run - - def check_equal(self, actual_string, correct_string, msg): - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def check_db(self, runtime, correct_string, msg): - """Check that runtime.theory[DB_THEORY] is equal to CORRECT_STRING.""" - self.check_equal(runtime.theory[DB_THEORY].content_string(), - correct_string, msg) - - def check_class(self, runtime, correct_string, msg, tablenames=None): - """Test MAT_THEORY. - - Check that runtime RUN.theory[MAT_THEORY] is - equal to CORRECT_STRING. - """ - actual = runtime.theory[MAT_THEORY].content(tablenames=tablenames) - actual_string = " ".join(str(x) for x in actual) - self.check_equal(actual_string, correct_string, msg) - - def showdb(self, run): - LOG.debug("Resulting DB: %s", - run.theory[MAT_THEORY].database | run.theory[DB_THEORY]) - - def test_database(self): - """Test Database with insert/delete.""" - run = self.prep_runtime('') - - self.check_db(run, "", "Empty database on init") - - # set semantics, not bag semantics - run.insert('r(1)', DB_THEORY) - self.check_db(run, "r(1)", "Basic insert") - run.insert('r(1)', DB_THEORY) - self.check_db(run, "r(1)", "Duplicate insert") - - run.delete('r(1)', DB_THEORY) - self.check_db(run, "", "Delete") - run.delete('r(1)', DB_THEORY) - self.check_db(run, "", "Delete from empty table") - - def test_error_checking(self): - """Test error-checking on insertion of rules.""" - code = ("p(x) :- q(x)") - run = self.prep_runtime(code) - result = run.get_target(MAT_THEORY).policy() - self.assertEqual(1, len(result)) - self.assertIn(compile.parse1("p(x) :- q(x)"), result) - - # safety 1 - code = ("p(x) :- not q(x)") - run = self.prep_runtime("", "** Safety 1 **") - permitted, changes = run.insert(code, MAT_THEORY) - self.assertFalse(permitted) - - # safety 2 - code = ("p(x) :- q(y)") - run = self.prep_runtime("", "** Safety 2 **") - permitted, changes = run.insert(code, MAT_THEORY) - self.assertFalse(permitted) - - # TODO(thinrichs): weaken cross-policy recursion restriction - # so that we can include recursion within a single theory. - # recursion into classification theory - # code = ("p(x) :- p(x)") - # run = self.prep_runtime("", "** Classification Recursion **") - # permitted, changes = run.insert(code, MAT_THEORY) - # self.assertTrue(permitted) - - # stratification into classification theory - code = ("p(x) :- q(x), not p(x)") - run = self.prep_runtime("", "** Classification Stratification **") - permitted, changes = run.insert(code, MAT_THEORY) - self.assertFalse(permitted) - - def test_basic(self): - """Materialized Theory: test rule propagation.""" - code = ("q(x) :- p(x), r(x)") - run = self.prep_runtime( - code, "**** Materialized Theory: Basic propagation tests ****") - run.insert('r(1)', MAT_THEORY) - run.insert('p(1)', MAT_THEORY) - self.check_class(run, "r(1) p(1) q(1)", - "Insert into base table with 1 propagation") - - run.delete('r(1)', MAT_THEORY) - self.check_class(run, "p(1)", - "Delete from base table with 1 propagation") - - # multiple rules - code = ("q(x) :- p(x), r(x)" - "q(x) :- s(x)") - run.insert('p(1)', MAT_THEORY) - run.insert('r(1)', MAT_THEORY) - self.check_class(run, "p(1) r(1) q(1)", "Insert: multiple rules") - run.insert('s(1)', MAT_THEORY) - self.check_class(run, "p(1) r(1) s(1) q(1)", - "Insert: duplicate conclusions") - - def test_short_body(self): - code = ("q(x) :- p(x)") - run = self.prep_runtime( - code, "**** Materialized Theory: Body length 1 tests ****") - - run.insert('p(1)', MAT_THEORY) - self.check_class(run, "p(1) q(1)", "Insert with body of size 1") - self.showdb(run) - run.delete('p(1)', MAT_THEORY) - self.showdb(run) - self.check_class(run, "", "Delete with body of size 1") - - def test_existentials(self): - code = ("q(x) :- p(x), r(y)") - run = self.prep_runtime( - code, - "**** Materialized Theory: Unary tables with existential ****") - run.insert('p(1)', MAT_THEORY) - run.insert('r(2)', MAT_THEORY) - run.insert('r(3)', MAT_THEORY) - self.showdb(run) - self.check_class(run, "p(1) r(2) r(3) q(1)", - "Insert with unary table and existential") - run.delete('r(2)', MAT_THEORY) - self.showdb(run) - self.check_class(run, "p(1) r(3) q(1)", - "Delete 1 with unary table and existential") - run.delete('r(3)', MAT_THEORY) - self.check_class(run, "p(1)", - "Delete all with unary table and existential") - - def test_nonmonadic(self): - run = self.prep_runtime( - "q(x) :- p(x,y)", - "**** Materialized Theory: Multiple-arity table tests ****") - - run.insert('p(1,2)', MAT_THEORY) - self.check_class(run, "p(1, 2) q(1)", - "Insert: existential variable in body of size 1") - run.delete('p(1,2)', MAT_THEORY) - self.check_class(run, "", - "Delete: existential variable in body of size 1") - - code = ("q(x) :- p(x,y), r(y,x)") - run = self.prep_runtime(code) - run.insert('p(1,2)', MAT_THEORY) - run.insert('r(2,1)', MAT_THEORY) - self.check_class(run, "p(1, 2) r(2, 1) q(1)", - "Insert: join in body of size 2") - run.delete('p(1,2)', MAT_THEORY) - self.check_class(run, "r(2, 1)", - "Delete: join in body of size 2") - run.insert('p(1,2)', MAT_THEORY) - run.insert('p(1,3)', MAT_THEORY) - run.insert('r(3,1)', MAT_THEORY) - self.check_class( - run, "r(2, 1) r(3,1) p(1, 2) p(1, 3) q(1)", - "Insert: multiple existential bindings for same head") - - run.delete('p(1,2)', MAT_THEORY) - self.check_class( - run, "r(2, 1) r(3,1) p(1, 3) q(1)", - "Delete: multiple existential bindings for same head") - - def test_larger_join(self): - code = ("q(x,v) :- p(x,y), r(y,z), s(z,w), t(w,v)") - run = self.prep_runtime(code) - run.insert('p(1, 10)', MAT_THEORY) - run.insert('p(1, 20)', MAT_THEORY) - run.insert('p(10, 100)', MAT_THEORY) - run.insert('p(20, 200)', MAT_THEORY) - run.insert('p(100, 1000)', MAT_THEORY) - run.insert('p(200, 2000)', MAT_THEORY) - run.insert('p(1000, 10000)', MAT_THEORY) - run.insert('p(2000, 20000)', MAT_THEORY) - run.insert('r(10, 100)', MAT_THEORY) - run.insert('r(20, 200)', MAT_THEORY) - run.insert('s(100, 1000)', MAT_THEORY) - run.insert('s(200, 2000)', MAT_THEORY) - run.insert('t(1000, 10000)', MAT_THEORY) - run.insert('t(2000, 20000)', MAT_THEORY) - code = ("p(1,10) p(1,20) p(10, 100) p(20, 200) p(100, 1000) " - "p(200, 2000) p(1000, 10000) p(2000, 20000) " - "r(10,100) r(20,200) s(100,1000) s(200,2000) " - "t(1000, 10000) t(2000,20000) " - "q(1,10000) q(1,20000)") - self.check_class(run, code, "Insert: larger join") - run.delete('t(1000, 10000)', MAT_THEORY) - code = ("p(1,10) p(1,20) p(10, 100) p(20, 200) p(100, 1000) " - "p(200, 2000) p(1000, 10000) p(2000, 20000) r(10,100) " - "r(20,200) s(100,1000) s(200,2000) t(2000,20000) " - "q(1,20000)") - self.check_class(run, code, "Delete: larger join") - - def test_self_join(self): - code = ("q(x,y) :- p(x,z), p(z,y)") - run = self.prep_runtime(code) - run.insert('p(1,2)', MAT_THEORY) - run.insert('p(1,3)', MAT_THEORY) - run.insert('p(2, 4)', MAT_THEORY) - run.insert('p(2, 5)', MAT_THEORY) - self.check_class( - run, 'p(1,2) p(1,3) p(2,4) p(2,5) q(1,4) q(1,5)', - "Insert: self-join", tablenames=['p', 'q']) - run.delete('p(2, 4)', MAT_THEORY) - self.check_class(run, 'p(1,2) p(1,3) p(2,5) q(1,5)', '', - tablenames=['p', 'q']) - - code = ("q(x,z) :- p(x,y), p(y,z)") - run = self.prep_runtime(code) - run.insert('p(1, 1)', MAT_THEORY) - self.check_class( - run, 'p(1,1) q(1,1)', "Insert: self-join on same data", - tablenames=['p', 'q']) - - code = ("q(x,w) :- p(x,y), p(y,z), p(z,w)") - run = self.prep_runtime(code) - run.insert('p(1, 1)', MAT_THEORY) - run.insert('p(1, 2)', MAT_THEORY) - run.insert('p(2, 2)', MAT_THEORY) - run.insert('p(2, 3)', MAT_THEORY) - run.insert('p(2, 4)', MAT_THEORY) - run.insert('p(2, 5)', MAT_THEORY) - run.insert('p(3, 3)', MAT_THEORY) - run.insert('p(3, 4)', MAT_THEORY) - run.insert('p(3, 5)', MAT_THEORY) - run.insert('p(3, 6)', MAT_THEORY) - run.insert('p(3, 7)', MAT_THEORY) - code = ('p(1,1) p(1,2) p(2,2) p(2,3) p(2,4) p(2,5)' - 'p(3,3) p(3,4) p(3,5) p(3,6) p(3,7)' - 'q(1,1) q(1,2) q(2,2) q(2,3) q(2,4) q(2,5)' - 'q(3,3) q(3,4) q(3,5) q(3,6) q(3,7)' - 'q(1,3) q(1,4) q(1,5) q(1,6) q(1,7)' - 'q(2,6) q(2,7)') - self.check_class(run, code, "Insert: larger self join", - tablenames=['p', 'q']) - run.delete('p(1, 1)', MAT_THEORY) - run.delete('p(2, 2)', MAT_THEORY) - code = (' p(1,2) p(2,3) p(2,4) p(2,5)' - 'p(3,3) p(3,4) p(3,5) p(3,6) p(3,7)' - ' q(2,3) q(2,4) q(2,5)' - 'q(3,3) q(3,4) q(3,5) q(3,6) q(3,7)' - 'q(1,3) q(1,4) q(1,5) q(1,6) q(1,7)' - 'q(2,6) q(2,7)') - self.check_class(run, code, "Delete: larger self join", - tablenames=['p', 'q']) - - def test_insert_order(self): - """Test insert. - - Test that the order in which we change rules - and data is irrelevant. - """ - # was actual bug: insert data first, then - # insert rule with self-join - code = ('q(1)' - 'p(x) :- q(x)') - run = self.prep_runtime(code) - self.check_class(run, 'p(1) q(1)', "Basic insert order") - - code = ('s(1)' - 'q(1,1)' - 'p(x,y) :- q(x,y), not r(x,y)' - 'r(x,y) :- s(x), s(y)') - run = self.prep_runtime(code) - self.check_class(run, 's(1) q(1,1) r(1,1)', "Self-join Insert order", - tablenames=['s', 'q', 'r']) - - code = ('q(1)' - 'p(x) :- q(x) ' - 'r(x) :- p(x) ') - run = self.prep_runtime(code) - self.check_class(run, 'q(1) p(1) r(1)', "Multiple rule insert") - run.delete('p(x) :- q(x)', MAT_THEORY) - self.check_class(run, 'q(1)', "Deletion of rule") - - def test_value_types(self): - """Test the different value types.""" - # string - code = ("q(x) :- p(x), r(x)") - run = self.prep_runtime( - code, "**** Materialized Theory: String data type ****") - - run.insert('r("apple")', MAT_THEORY) - self.check_class(run, 'r("apple")', - "String insert with no propagations") - run.insert('r("apple")', MAT_THEORY) - self.check_class(run, 'r("apple")', - "Duplicate string insert with no propagations") - - run.delete('r("apple")', MAT_THEORY) - self.check_class(run, "", "Delete with no propagations") - run.delete('r("apple")', MAT_THEORY) - self.check_class(run, "", "Delete from empty table") - - run.insert('r("apple")', MAT_THEORY) - run.insert('p("apple")', MAT_THEORY) - self.check_class(run, 'r("apple") p("apple") q("apple")', - "String insert with 1 propagation") - - run.delete('r("apple")', MAT_THEORY) - self.check_class(run, 'p("apple")', "String delete with 1 propagation") - - # float - code = ("q(x) :- p(x), r(x)") - run = self.prep_runtime( - code, "**** Materialized Theory: Float data type ****") - - run.insert('r(1.2)', MAT_THEORY) - self.check_class(run, 'r(1.2)', "String insert with no propagations") - run.insert('r(1.2)', MAT_THEORY) - self.check_class(run, 'r(1.2)', - "Duplicate string insert with no propagations") - - run.delete('r(1.2)', MAT_THEORY) - self.check_class(run, "", "Delete with no propagations") - run.delete('r(1.2)', MAT_THEORY) - self.check_class(run, "", "Delete from empty table") - - run.insert('r(1.2)', MAT_THEORY) - run.insert('p(1.2)', MAT_THEORY) - self.check_class(run, 'r(1.2) p(1.2) q(1.2)', - "String self.insert with 1 propagation") - - run.delete('r(1.2)', MAT_THEORY) - self.check_class(run, 'p(1.2)', "String delete with 1 propagation") - - def test_negation(self): - """Test Materialized Theory negation.""" - # Unary, single join - code = ("q(x) :- p(x), not r(x)") - run = self.prep_runtime(code, - "**** Materialized Theory: Negation ****") - - run.insert('p(2)', MAT_THEORY) - self.check_class(run, 'p(2) q(2)', - "Insert into positive literal with propagation") - run.delete('p(2)', MAT_THEORY) - self.check_class(run, '', - "Delete from positive literal with propagation") - - run.insert('r(2)', MAT_THEORY) - self.check_class(run, 'r(2)', - "Insert into negative literal without propagation") - run.delete('r(2)', MAT_THEORY) - self.check_class(run, '', - "Delete from negative literal without propagation") - - run.insert('p(2)', MAT_THEORY) - run.insert('r(2)', MAT_THEORY) - self.check_class(run, 'p(2) r(2)', - "Insert into negative literal with propagation") - - run.delete('r(2)', MAT_THEORY) - self.check_class(run, 'q(2) p(2)', - "Delete from negative literal with propagation") - - # Unary, multiple joins - code = ("s(x) :- p(x), not r(x), q(y), not t(y)") - run = self.prep_runtime(code, "Unary, multiple join") - run.insert('p(1)', MAT_THEORY) - run.insert('q(2)', MAT_THEORY) - self.check_class(run, 'p(1) q(2) s(1)', - 'Insert with two negative literals') - - run.insert('r(3)', MAT_THEORY) - self.check_class(run, 'p(1) q(2) s(1) r(3)', - 'Ineffectual insert with 2 negative literals') - run.insert('r(1)', MAT_THEORY) - self.check_class( - run, 'p(1) q(2) r(3) r(1)', - 'Insert into existentially quantified negative literal ' - 'with propagation. ') - run.insert('t(2)', MAT_THEORY) - self.check_class( - run, 'p(1) q(2) r(3) r(1) t(2)', - 'Insert into negative literal producing extra blocker for proof.') - run.delete('t(2)', MAT_THEORY) - self.check_class(run, 'p(1) q(2) r(3) r(1)', - 'Delete first blocker from proof') - run.delete('r(1)', MAT_THEORY) - self.check_class(run, 'p(1) q(2) r(3) s(1)', - 'Delete second blocker from proof') - - # Non-unary - code = ("p(x, v) :- q(x,z), r(z, w), not s(x, w), u(w,v)") - run = self.prep_runtime(code, "Non-unary") - run.insert('q(1, 2)', MAT_THEORY) - run.insert('r(2, 3)', MAT_THEORY) - run.insert('r(2, 4)', MAT_THEORY) - run.insert('u(3, 5)', MAT_THEORY) - run.insert('u(4, 6)', MAT_THEORY) - self.check_class( - run, 'q(1,2) r(2,3) r(2,4) u(3,5) u(4,6) p(1,5) p(1,6)', - 'Insert with non-unary negative literal') - - run.insert('s(1, 3)', MAT_THEORY) - self.check_class( - run, 'q(1,2) r(2,3) r(2,4) u(3,5) u(4,6) s(1,3) p(1,6)', - 'Insert into non-unary negative with propagation') - - run.insert('s(1, 4)', MAT_THEORY) - self.check_class( - run, 'q(1,2) r(2,3) r(2,4) u(3,5) u(4,6) s(1,3) s(1,4)', - 'Insert into non-unary with different propagation') - - def test_select(self): - """Materialized Theory: test the SELECT event handler.""" - code = ("p(x, y) :- q(x), r(y)") - run = self.prep_runtime(code, "**** Materialized Theory: Select ****") - run.insert('q(1)', MAT_THEORY) - # self.assertEqual('q(1)', run.select('q(x)')) - run.insert('q(2)', MAT_THEORY) - run.insert('r(1)', MAT_THEORY) - run.insert('r(2)', MAT_THEORY) - self.check_class( - run, 'q(1) q(2) r(1) r(2) p(1,1) p(1,2) p(2,1) p(2,2)', - 'Prepare for select') - self.check_equal( - run.select('p(x,y)', MAT_THEORY), - 'p(1,1) p(1,2) p(2,1) p(2,2)', - 'Select: bound no args') - self.check_equal( - run.select('p(1,y)', MAT_THEORY), - 'p(1,1) p(1,2)', - 'Select: bound 1st arg') - self.check_equal( - run.select('p(x,2)', MAT_THEORY), - 'p(1,2) p(2,2)', - 'Select: bound 2nd arg') - self.check_equal( - run.select('p(1,2)', MAT_THEORY), - 'p(1,2)', - 'Select: bound 1st and 2nd arg') - self.check_equal( - run.select('query :- q(x), r(y)', MAT_THEORY), - 'query :- q(1), r(1)' - 'query :- q(1), r(2)' - 'query :- q(2), r(1)' - 'query :- q(2), r(2)', - 'Select: conjunctive query') - - def test_modify_rules(self): - """Test rules modification. - - Test the functionality for adding and deleting - rules *after* data has already been entered. - """ - run = self.prep_runtime("", "Rule modification") - run.insert("q(1) r(1) q(2) r(2)", MAT_THEORY) - self.showdb(run) - self.check_class(run, 'q(1) r(1) q(2) r(2)', "Installation") - run.insert("p(x) :- q(x), r(x)", MAT_THEORY) - self.check_class( - run, - 'q(1) r(1) q(2) r(2) p(1) p(2)', 'Rule insert after data insert') - run.delete("q(1)", MAT_THEORY) - self.check_class( - run, - 'r(1) q(2) r(2) p(2)', 'Delete after Rule insert with propagation') - run.insert("q(1)", MAT_THEORY) - run.delete("p(x) :- q(x), r(x)", MAT_THEORY) - self.check_class(run, 'q(1) r(1) q(2) r(2)', "Delete rule") - - def test_recursion(self): - """Materialized Theory: test recursion.""" - self.skipTest('Recursion not currently allowed') - # TODO(thinrichs): weaken cross-policy recursion restriction - # so that we can include recursion within a single theory. - run = self.prep_runtime('q(x,y) :- p(x,y)' - 'q(x,y) :- p(x,z), q(z,y)') - run.insert('p(1,2)', MAT_THEORY) - run.insert('p(2,3)', MAT_THEORY) - run.insert('p(3,4)', MAT_THEORY) - run.insert('p(4,5)', MAT_THEORY) - self.check_class( - run, 'p(1,2) p(2,3) p(3,4) p(4,5)' - 'q(1,2) q(2,3) q(1,3) q(3,4) q(2,4) q(1,4) q(4,5) q(3,5) ' - 'q(1,5) q(2,5)', 'Insert into recursive rules') - run.delete('p(1,2)', MAT_THEORY) - self.check_class( - run, 'p(2,3) p(3,4) p(4,5)' - 'q(2,3) q(3,4) q(2,4) q(4,5) q(3,5) q(2,5)', - 'Delete from recursive rules') diff --git a/congress/tests/datalog/test_nonrecur.py b/congress/tests/datalog/test_nonrecur.py deleted file mode 100644 index 2a3b9b51..00000000 --- a/congress/tests/datalog/test_nonrecur.py +++ /dev/null @@ -1,748 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.datalog import base as datalog_base -from congress.datalog import compile -from congress.datalog import nonrecursive -from congress.policy_engines import agnostic -from congress.tests import base -from congress.tests import helper - -LOG = logging.getLogger(__name__) - -NREC_THEORY = 'non-recursive theory' -DB_THEORY = 'database' - - -class TestRuntime(base.TestCase): - def prep_runtime(self, code=None, msg=None, target=None): - # compile source - if msg is not None: - LOG.debug(msg) - if code is None: - code = "" - if target is None: - target = NREC_THEORY - run = agnostic.Runtime() - run.create_policy(NREC_THEORY, - kind=datalog_base.NONRECURSIVE_POLICY_TYPE) - run.create_policy(DB_THEORY, - kind=datalog_base.DATABASE_POLICY_TYPE) - run.debug_mode() - run.insert(code, target=target) - return run - - def check_equal(self, actual_string, correct_string, msg): - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def test_indexing(self): - th = NREC_THEORY - run = self.prep_runtime('') - for i in range(10): - run.insert('r(%d)' % i, th) - - run.insert('s(5)', th) - run.insert('p(x) :- r(x), s(x)', th) - ans = 'p(5)' - self.check_equal(run.select('p(5)', th), ans, 'Indexing') - - def test_insert(self): - """Test ability to insert/delete sentences.""" - th = NREC_THEORY - - # insert single atom - run = self.prep_runtime('') - run.insert('p(1)', th) - self.check_equal(run.content(th), 'p(1)', 'Atomic insertion') - - # insert collection of atoms - run = self.prep_runtime('') - run.insert('p(1)', th) - run.insert('p(2)', th) - run.insert('r(3,4)', th) - run.insert('q(1,2,3)', th) - run.insert('q(4,5,6)', th) - ans = 'p(1) p(2) r(3,4) q(1,2,3) q(4,5,6)' - self.check_equal(run.content(th), ans, 'Multiple atomic insertions') - - # insert collection of rules - run = self.prep_runtime('') - run.insert('p(x) :- q(x), r(x)', th) - run.insert('p(x) :- r(x), s(x,y)', th) - run.insert('s(x,y) :- t(x,v), m(v,y)', th) - ans = ('p(x) :- q(x), r(x) ' - 'p(x) :- r(x), s(x,y) ' - 's(x,y) :- t(x,v), m(v,y) ') - self.check_equal(run.content(th), ans, 'Rules') - - # insert rules and data - run.insert('r(1)', th) - run.insert('r(2)', th) - run.insert('m(2,3)', th) - run.insert('p(x) :- q(x), r(x)', th) - run.insert('p(x) :- r(x), s(x,y)', th) - run.insert('s(x,y) :- t(x,v), m(v,y)', th) - ans = ('r(1) r(2) m(2,3) ' - 'p(x) :- q(x), r(x) ' - 'p(x) :- r(x), s(x,y) ' - 's(x,y) :- t(x,v), m(v,y)') - self.check_equal(run.content(th), ans, 'Rules') - - # insert modal rule - run = self.prep_runtime('') - run.insert('execute[p(x)] :- q(x), r(x)', th) - run.insert('execute[p(x)] :- r(x), s(x, y)', th) - run.insert('s(x,y) :- t(x, v), m(v, y)', th) - ans = ('execute[p(x)] :- q(x), r(x) ' - 'execute[p(x)] :- r(x), s(x, y) ' - 's(x,y) :- t(x,v), m(v, y) ') - self.check_equal(run.content(th), ans, 'Rules') - - # insert values for modal rule - run.insert('r(1)', th) - run.insert('r(2)', th) - run.insert('m(2,3)', th) - run.insert('execute[p(x)] :- q(x), r(x)', th) - run.insert('execute[p(x)] :- r(x), s(x,y)', th) - run.insert('s(x,y) :- t(x,v), m(v,y)', th) - ans = ('r(1) r(2) m(2,3) ' - 'execute[p(x)] :- q(x), r(x) ' - 'execute[p(x)] :- r(x), s(x,y) ' - 's(x,y) :- t(x,v), m(v,y)') - self.check_equal(run.content(th), ans, 'Rules') - - # recursion - run = self.prep_runtime("", "** Non-recursive Recursion **") - permitted, changes = run.insert("p(x) :- p(x)", th) - self.assertFalse(permitted) - self.assertEqual(run.content(th), '') - - # non-stratified - run = self.prep_runtime("", "** Stratification **") - permitted, changes = run.insert("p(x) :- q(x), not p(x)", th) - self.assertFalse(permitted) - self.assertEqual(run.content(th), '') - - # confliction: rule-rule - run = self.prep_runtime("") - run.insert("q(x) :- p(x,y)", th) - permitted, changes = run.insert("q(x,y) :- p(x,y)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: rule-fact - run = self.prep_runtime("") - run.insert("q(x) :- p(x,y)", th) - permitted, changes = run.insert("q(1,3)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: fact-rule - run = self.prep_runtime("") - run.insert("q(1,3)", th) - permitted, changes = run.insert("q(x) :- p(x,y)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: fact-rule - run = self.prep_runtime("") - run.insert("q(1,3)", th) - permitted, changes = run.insert("q(1)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: body-confliction - run = self.prep_runtime("") - run.insert("q(1,3)", th) - permitted, changes = run.insert("p(x,y) :- q(x,y,z)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: body-confliction1 - run = self.prep_runtime("") - run.insert("p(x,y) :- q(x,y)", th) - permitted, changes = run.insert("q(y) :- r(y)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - # confliction: body-confliction2 - run = self.prep_runtime("") - run.insert("p(x) :- q(x)", th) - permitted, changes = run.insert("r(y) :- q(x,y)", th) - self.assertEqual(len(changes), 1) - self.assertFalse(permitted) - - def test_delete(self): - """Test ability to delete policy statements.""" - th = NREC_THEORY - - # Multiple atoms - run = self.prep_runtime('', 'Data deletion') - run.insert('p(1)', th) - run.insert('p(2)', th) - run.insert('r(3,4)', th) - run.insert('q(1,2,3)', th) - run.insert('q(4,5,6)', th) - run.delete('q(1,2,3)', th) - run.delete('p(2)', th) - ans = ('p(1) r(3,4) q(4,5,6)') - self.check_equal(run.content(th), ans, 'Multiple atomic deletions') - - # Rules and data - run = self.prep_runtime('', 'Rule/data deletion') - run.insert('r(1)', th) - run.insert('r(2)', th) - run.insert('m(2,3)', th) - run.insert('p(x) :- q(x), r(x)', th) - run.insert('p(x) :- r(x), s(x,y)', th) - run.insert('s(x,y) :- t(x,v), m(v,y)', th) - run.delete('r(1)', th) - run.delete('p(x) :- r(x), s(x,y)', th) - - ans = ('r(2) m(2,3) ' - 'p(x) :- q(x), r(x) ' - 's(x,y) :- t(x,v), m(v,y)') - self.check_equal(run.content(th), ans, 'Rule/data deletions') - run.insert('r(1)', th) - run.insert('p(y) :- q(y), r(z)', th) - ans = ('r(1) r(2) m(2,3) ' - 'p(x) :- q(x), r(x) ' - 'p(y) :- q(y), r(z) ' - 's(x,y) :- t(x,v), m(v,y)') - self.check_equal(run.content(th), ans, - 'Rule/data inserts after deletes') - - # modal rule deletion - run = self.prep_runtime('', 'Rule/Modal data deletion') - run.insert('r(1)', th) - run.insert('r(2)', th) - run.insert('m(2,3)', th) - run.insert('execute[p(x)] :- q(x), r(x)', th) - run.insert('p(x) :- r(x), s(x,y)', th) - run.insert('s(x,y) :- t(x,v), m(v,y)', th) - run.delete('r(1)', th) - run.delete('execute[p(x)] :- q(x), r(x)', th) - ans = ('r(2) m(2,3) ' - 'p(x) :- r(x), s(x, y) ' - 's(x,y) :- t(x,v), m(v,y)') - - self.check_equal(run.content(th), ans, 'Rule/data deletions') - # non-existent - run = self.prep_runtime('', 'Nonexistent deletion') - permitted, changes = run.delete('p(1)', th) - self.assertEqual(len(changes), 0) - - def test_select(self): - """Test query functionality, i.e. top-down evaluation.""" - th = NREC_THEORY - run = self.prep_runtime('') - run.insert('p(1)', target=th) - self.check_equal(run.select('p(1)', target=th), "p(1)", - "Simple lookup") - self.check_equal(run.select('p(2)', target=th), "", - "Failed lookup") - run = self.prep_runtime('p(1)', target=th) - self.check_equal(run.select('p(x)', target=th), "p(1)", - "Variablized lookup") - - run = self.prep_runtime('p(x) :- q(x)' - 'q(x) :- r(x)' - 'r(1)', target=th) - self.check_equal(run.select('p(1)', target=th), "p(1)", - "Monadic rules") - self.check_equal(run.select('p(2)', target=th), "", - "False monadic rules") - self.check_equal(run.select('p(x)', target=th), "p(1)", - "Variablized query with monadic rules") - - run = self.prep_runtime('p(x) :- q(x)' - 'q(x) :- r(x)' - 'q(x) :- s(x)' - 'r(1)' - 's(2)', target=th) - self.check_equal(run.select('p(1)', target=th), "p(1)", - "Monadic, disjunctive rules") - self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", - "Variablized, monadic, disjunctive rules") - self.check_equal(run.select('p(3)', target=th), "", - "False Monadic, disjunctive rules") - - run = self.prep_runtime('p(x) :- q(x), r(x)' - 'q(1)' - 'r(1)' - 'r(2)' - 'q(2)' - 'q(3)', target=th) - self.check_equal(run.select('p(1)', target=th), "p(1)", - "Monadic multiple literals in body") - self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", - "Monadic multiple literals in body variablized") - self.check_equal(run.select('p(3)', target=th), "", - "False monadic multiple literals in body") - - run = self.prep_runtime('p(x) :- q(x), r(x)' - 'q(1)' - 'r(2)', target=th) - self.check_equal(run.select('p(x)', target=th), "", - "False variablized monadic multiple literals in body") - - # Modal operator in rule - run = self.prep_runtime('execute[p(x)] :- q(x), r(x)' - 'q(1)' - 'r(1)', target=th) - self.check_equal(run.select('execute[p(x)]', - target=th), "execute[p(1)]", - "Modal operator in Rule head") - - run = self.prep_runtime('p(x,y) :- q(x,z), r(z, y)' - 'q(1,1)' - 'q(1,2)' - 'r(1,3)' - 'r(1,4)' - 'r(2,5)', target=th) - self.check_equal(run.select('p(1,3)', target=th), "p(1,3)", - "Binary, existential rules 1") - self.check_equal(run.select('p(x,y)', target=th), - "p(1,3) p(1,4) p(1,5)", - "Binary, existential rules 2") - self.check_equal(run.select('p(1,1)', target=th), "", - "False binary, existential rules") - self.check_equal(run.select('p(x,x)', target=th), "", - "False binary, variablized, existential rules") - - run = self.prep_runtime('p(x) :- q(x), r(x)' - 'q(y) :- t(y), s(x)' - 's(1)' - 'r(2)' - 't(2)', target=th) - self.check_equal(run.select('p(2)', target=th), "p(2)", - "Distinct variable namespaces across rules") - self.check_equal(run.select('p(x)', target=th), "p(2)", - "Distinct variable namespaces across rules") - - run = self.prep_runtime('p(x,y) :- q(x,z), r(z,y)' - 'q(x,y) :- s(x,z), t(z,y)' - 's(x,y) :- u(x,z), v(z,y)' - 'u(0,2)' - 'u(1,2)' - 'v(2,3)' - 't(3,4)' - 'r(4,5)' - 'r(4,6)', target=th) - self.check_equal(run.select('p(1,5)', target=th), "p(1,5)", - "Tower of existential variables") - self.check_equal(run.select('p(x,y)', target=th), - "p(0,5) p(1,5) p(1,6) p(0,6)", - "Tower of existential variables") - self.check_equal(run.select('p(0,y)', target=th), - "p(0,5) p(0,6)", - "Tower of existential variables") - - run = self.prep_runtime('p(x) :- q(x), r(z)' - 'r(z) :- s(z), q(x)' - 's(1)' - 'q(x) :- t(x)' - 't(1)', target=th) - self.check_equal(run.select('p(x)', target=th), 'p(1)', - "Two layers of existential variables") - - # variables - run = self.prep_runtime('p(x) :- q(x0,x)' - 'q(1,2)') - self.check_equal(run.select('p(x)', target=th), 'p(2)', - "Using x0 in rule") - - def test_empty(self): - # full empty - th = nonrecursive.NonrecursiveRuleTheory() - th.insert(compile.parse1('p(x) :- q(x)')) - th.insert(compile.parse1('p(1)')) - th.insert(compile.parse1('q(2)')) - th.empty() - self.assertEqual(len(th.content()), 0) - - # empty with tablenames - th = nonrecursive.NonrecursiveRuleTheory() - th.insert(compile.parse1('p(x) :- q(x)')) - th.insert(compile.parse1('p(1)')) - th.insert(compile.parse1('q(2)')) - th.empty(['p']) - e = helper.datalog_equal(th.content_string(), 'q(2)') - self.assertTrue(e) - - # empty with invert - th = nonrecursive.NonrecursiveRuleTheory() - th.insert(compile.parse1('p(x) :- q(x)')) - th.insert(compile.parse1('p(1)')) - th.insert(compile.parse1('q(2)')) - th.empty(['p'], invert=True) - correct = ('p(x) :- q(x) p(1)') - e = helper.datalog_equal(th.content_string(), correct) - self.assertTrue(e) - - def test_trace(self): - """Test tracing during query.""" - # with single theory - run = self.prep_runtime('') - run.insert('p(x) :- q(x)', target=NREC_THEORY) - run.insert('q(1)', target=NREC_THEORY) - (ans, trace) = run.select('p(x)', target=NREC_THEORY, trace=True) - self.check_equal(ans, 'p(1) ', "Simple lookup") - LOG.debug(trace) - lines = trace.split('\n') - self.assertEqual(len(lines), 12) - - # with included theory - run = self.prep_runtime('') - run.theory[NREC_THEORY].includes.append(run.theory[DB_THEORY]) - - run.insert('p(x) :- q(x)', target=NREC_THEORY) - run.insert('q(1)', target=DB_THEORY) - (ans, trace) = run.select('p(x)', target=NREC_THEORY, trace=True) - self.check_equal(ans, 'p(1) ', "Tracing check") - LOG.debug(trace) - lines = trace.split('\n') - self.assertEqual(len(lines), 16) - - def test_abduction(self): - """Test abduction (computation of policy fragments).""" - def check(query, code, tablenames, correct, msg, find_all=True): - # We're interacting directly with the runtime's underlying - # theory b/c we haven't yet decided whether Abduce should - # be a top-level API call. - run = self.prep_runtime() - run.insert(code, target=NREC_THEORY) - query = helper.str2form(query) - actual = run.theory[NREC_THEORY].abduce( - query, tablenames=tablenames, find_all=find_all) - e = helper.datalog_same(helper.pol2str(actual), correct, msg) - self.assertTrue(e) - - code = ('p(x) :- q(x), r(x)' - 'q(1)' - 'q(2)') - check('p(x)', code, ['r'], - 'p(1) :- r(1) p(2) :- r(2)', "Basic monadic") - - code = ('p(x) :- q(x), r(x)' - 'r(1)' - 'r(2)') - check('p(x)', code, ['q'], - 'p(1) :- q(1) p(2) :- q(2)', "Late, monadic binding") - - code = ('p(x) :- q(x)') - check('p(x)', code, ['q'], - 'p(x) :- q(x)', "No binding") - - code = ('p(x) :- q(x), r(x)' - 'q(x) :- s(x)' - 'r(1)' - 'r(2)') - check('p(x)', code, ['s'], - 'p(1) :- s(1) p(2) :- s(2)', "Intermediate table") - - code = ('p(x) :- q(x), r(x)' - 'q(x) :- s(x)' - 'q(x) :- t(x)' - 'r(1)' - 'r(2)') - check('p(x)', code, ['s', 't'], - 'p(1) :- s(1) p(2) :- s(2) p(1) :- t(1) p(2) :- t(2)', - "Intermediate, disjunctive table") - - code = ('p(x) :- q(x), r(x)' - 'q(x) :- s(x)' - 'q(x) :- t(x)' - 'r(1)' - 'r(2)') - check('p(x)', code, ['s'], - 'p(1) :- s(1) p(2) :- s(2)', - "Intermediate, disjunctive table, but only some saveable") - - code = ('p(x) :- q(x), u(x), r(x)' - 'q(x) :- s(x)' - 'q(x) :- t(x)' - 'u(1)' - 'u(2)') - check('p(x)', code, ['s', 't', 'r'], - 'p(1) :- s(1), r(1) p(2) :- s(2), r(2)' - 'p(1) :- t(1), r(1) p(2) :- t(2), r(2)', - "Multiple support literals") - - code = ('p(x) :- q(x,y), s(x), r(y, z)' - 'r(2,3)' - 'r(2,4)' - 's(1)' - 's(2)') - check('p(x)', code, ['q'], - 'p(1) :- q(1,2) p(2) :- q(2,2)', - "Existential variables that become ground") - - code = ('p(x) :- q(x,y), r(y, z)' - 'r(2,3)' - 'r(2,4)') - check('p(x)', code, ['q'], - 'p(x) :- q(x,2) p(x) :- q(x,2)', - "Existential variables that do not become ground") - - code = ('p+(x) :- q(x), r(z)' - 'r(z) :- s(z), q(x)' - 's(1)') - check('p+(x)', code, ['q'], - 'p+(x) :- q(x), q(x1)', - "Existential variables with name collision") - - def test_modals(self): - """Test that the modal operators work properly.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy("test") - run.insert('execute[p(x)] :- q(x)', 'test') - run.insert('q(1)', 'test') - self.assertTrue(helper.datalog_equal( - run.select('execute[p(x)]', 'test'), - 'execute[p(1)]')) - - def test_modal_with_theory(self): - """Test that the modal operators work properly with a theory.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy("test") - run.insert('execute[nova:p(x)] :- q(x)', 'test') - run.insert('q(1)', 'test') - self.assertTrue(helper.datalog_equal( - run.select('execute[nova:p(x)]', 'test'), - 'execute[nova:p(1)]')) - - def test_policy_tablenames_filter_modal(self): - execute_rule = 'execute[nova:servers.pause(x)] :- nova:servers(x)' - run = self.prep_runtime(execute_rule) - execute_policy = run.get_target(NREC_THEORY) - tables = execute_policy.tablenames() - self.assertEqual({'nova:servers.pause', 'nova:servers'}, set(tables)) - tables = execute_policy.tablenames(include_modal=False) - self.assertEqual({'nova:servers'}, set(tables)) - tables = execute_policy.tablenames(include_modal=True) - self.assertEqual({'nova:servers.pause', 'nova:servers'}, set(tables)) - - def test_consequences(self): - """Test computation of all atoms true in a theory.""" - def check(code, correct, msg): - # We're interacting directly with the runtime's underlying - # theory b/c we haven't decided whether consequences should - # be a top-level API call. - run = self.prep_runtime() - run.insert(code, target=NREC_THEORY) - actual = run.theory[NREC_THEORY].consequences() - e = helper.datalog_same(helper.pol2str(actual), correct, msg) - self.assertTrue(e) - - code = ('p1(x) :- q(x)' - 'q(1)' - 'q(2)') - check(code, 'p1(1) p1(2) q(1) q(2)', 'Monadic') - - code = ('p1(x) :- q(x)' - 'p2(x) :- r(x)' - 'q(1)' - 'q(2)') - check(code, 'p1(1) p1(2) q(1) q(2)', 'Monadic with empty tables') - - -class TestSelectNegation(base.TestCase): - """Tests for negation within a select() routine.""" - def check(self, run, query_string, correct_string, msg): - actual_string = run.select(query_string) - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def test_monadic(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x), not r(x)' - 'q(1)' - 'q(2)' - 'r(2)') - - self.check(run, 'p(1)', 'p(1)', "Monadic negation") - self.check(run, 'p(2)', '', "False monadic negation") - self.check(run, 'p(x)', 'p(1)', - "Variablized monadic negation") - - def test_binary(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x,y), r(z), not s(y,z)' - 'q(1,1)' - 'q(2,2)' - 'r(4)' - 'r(5)' - 's(1,4)' - 's(1,5)' - 's(2,5)') - self.check(run, 'p(2)', 'p(2)', - "Binary negation with existentials") - self.check(run, 'p(1)', '', - "False Binary negation with existentials") - self.check(run, 'p(x)', 'p(2)', - "False Binary negation with existentials") - - def test_depth(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x,y), s(y,z)' - 's(y,z) :- r(y,w), t(z), not u(w,z)' - 'q(1,1)' - 'q(2,2)' - 'r(1,4)' - 't(7)' - 'r(1,5)' - 't(8)' - 'u(5,8)') - self.check(run, 'p(1)', 'p(1)', - "Embedded negation with existentials") - self.check(run, 'p(2)', '', - "False embedded negation with existentials") - self.check(run, 'p(x)', 'p(1)', - "False embedded negation with existentials") - - def test_mid_rule(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x), not s(x), r(x)' - 'q(1) q(2) q(3) q(4) q(5) q(6)' - 's(1) s(3) s(5)' - 'r(2) r(6)') - self.check(run, 'p(x)', 'p(2) p(6)', - "Multiple answers with monadic negation in middle of rule") - - -class TestArity(base.TestCase): - def test_regular_parsing(self): - th = nonrecursive.NonrecursiveRuleTheory() - th.insert(compile.parse1('p(x) :- q(x, y)')) - th.insert(compile.parse1('execute[r(x)] :- t(x, y)')) - th.insert(compile.parse1('execute[nova:s(x, y)] :- u(x, y)')) - th.insert(compile.parse1('execute[nova:noargs()] :- true')) - self.assertEqual(th.arity('p'), 1) - self.assertIsNone(th.arity('q')) - self.assertIsNone(th.arity('r')) - self.assertIsNone(th.arity('nova:s')) - self.assertEqual(th.arity('r', modal='execute'), 1) - self.assertEqual(th.arity('nova:s', modal='execute'), 2) - self.assertEqual(th.arity('nova:noargs', modal='execute'), 0) - - def test_no_split_parsing(self): - th = nonrecursive.NonrecursiveRuleTheory() - th.insert(compile.parse1('nova:v(x, y) :- u(x, y)', - use_modules=False)) - - self.assertEqual(th.arity('nova:v'), 2) - self.assertIsNone(th.arity('nova:v', modal='insert')) - th.insert(compile.parse1('insert[neutron:v(x, y, z)] :- u(x, y)', - use_modules=False)) - self.assertEqual(th.arity('nova:v'), 2) - self.assertEqual(th.arity('neutron:v', modal='insert'), 3) - - def test_schema(self): - th = nonrecursive.NonrecursiveRuleTheory(name='alice') - th.schema = compile.Schema({'p': ('id', 'status', 'name')}) - self.assertEqual(th.arity('p'), 3) - self.assertEqual(th.arity('alice:p'), 3) - - -class TestInstances(base.TestCase): - """Tests for Runtime's delegation functionality.""" - def check(self, rule, data, correct, possibilities=None): - rule = compile.parse1(rule, use_modules=False) - data = compile.parse(data, use_modules=False) - possibilities = possibilities or '' - possibilities = compile.parse(possibilities, use_modules=False) - possibilities = [compile.Rule(x, []) for x in possibilities] - poss = {} - for rule_lit in possibilities: - if rule_lit.head.tablename() not in poss: - poss[rule_lit.head.tablename()] = set([rule_lit]) - else: - poss[rule_lit.head.tablename()].add(rule_lit) - - th = nonrecursive.MultiModuleNonrecursiveRuleTheory() - th.debug_mode() - for lit in data: - th.insert(lit) - result = th.instances(rule, poss) - actual = " ".join(str(x) for x in result) - e = helper.datalog_equal(actual, correct) - self.assertTrue(e) - - def test_basic(self): - rule = 'p(x) :- r(x)' - data = 'r(1) r(2)' - correct = ('p(1) :- r(1) ' - 'p(2) :- r(2)') - self.check(rule, data, correct) - - def test_multiple_literals(self): - rule = 'p(x) :- r(x), s(x)' - data = 'r(1) r(2) r(3) s(2) s(3)' - correct = ('p(2) :- r(2), s(2) ' - 'p(3) :- r(3), s(3)') - self.check(rule, data, correct) - - def test_grounded(self): - rule = 'p(x) :- t(5), r(x), s(x)' - data = 'r(1) r(2) r(3) s(2) s(3)' - correct = ('p(2) :- t(5), r(2), s(2) ' - 'p(3) :- t(5), r(3), s(3)') - self.check(rule, data, correct) - - def test_builtins(self): - rule = 'p(x, z) :- r(x), s(y), plus(x, y, z)' - data = 'r(1) s(2) s(3)' - correct = ('p(1, z) :- r(1), s(2), plus(1, 2, z) ' - 'p(1, z) :- r(1), s(3), plus(1, 3, z)') - self.check(rule, data, correct) - - def test_builtins_reordered(self): - rule = 'p(x, z) :- r(x), plus(x, y, z), s(y)' - data = 'r(1) s(2) s(3)' - correct = ('p(1, z) :- r(1), plus(1, 2, z), s(2) ' - 'p(1, z) :- r(1), plus(1, 3, z), s(3)') - self.check(rule, data, correct) - - def test_modules(self): - # Nonstandard here in that for instances, we are assuming all the - # data that we need is in the current policy, even if it references - # a different policy explicitly. - rule = 'p(x) :- nova:r(x)' - data = 'nova:r(1) nova:r(2)' - correct = ('p(1) :- nova:r(1) ' - 'p(2) :- nova:r(2)') - self.check(rule, data, correct) - - def test_possibilities(self): - rule = 'p(x) :- q(x)' - data = 'q(1) q(5)' - poss = 'q(2) q(3)' - correct = ('p(2) :- q(2) ' - 'p(3) :- q(3) ') - self.check(rule, data, correct, poss) diff --git a/congress/tests/datalog/test_ordered_set.py b/congress/tests/datalog/test_ordered_set.py deleted file mode 100644 index b44a74d1..00000000 --- a/congress/tests/datalog/test_ordered_set.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datalog import utility -from congress.tests import base - - -class TestOrderedSet(base.TestCase): - def test_creation_simple(self): - """"Test basic OrderedSet instantiation.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertEqual(list(os), contents) - self.assertEqual(len(os), len(contents)) - self.assertEqual(set(os), set(contents)) - - def test_creation_with_duplicates(self): - """"Test that OrderedSet instantiation removes duplicates.""" - contents = ["foo", "bar", "foo", "baz"] - os = utility.OrderedSet(contents) - self.assertNotEqual(list(os), contents) - self.assertEqual(len(os), len(contents) - 1) - self.assertEqual(set(os), set(contents)) - - def test_contains(self): - """Test that basic OrderedSet.__contains__ functionality works.""" - contents = ["foo", "bar", "baz"] - missing = "qux" - os = utility.OrderedSet(contents) - self.assertTrue(all(x in os for x in contents)) - self.assertNotIn(missing, os) - - discarded = contents[1] - os.discard(discarded) - self.assertTrue(all(x in os for x in contents if x != discarded)) - self.assertNotIn(discarded, os) - - def test_add_known_item(self): - """Test that OrderedSet.add(known) returns False.""" - contents = ["foo", "bar", "baz"] - known = contents[1] - os = utility.OrderedSet(contents) - self.assertFalse(os.add(known)) - self.assertEqual(list(os), contents) - - def test_add_unknown_item(self): - """Test that OrderedSet.add(unknown) returns True.""" - contents = ["foo", "bar", "baz"] - unknown = "qux" - os = utility.OrderedSet(contents) - self.assertTrue(os.add(unknown)) - self.assertEqual(list(os), contents + [unknown]) - - def test_discard_known_item(self): - """Test that OrderedSet.discard(known) returns True.""" - contents = ["foo", "bar", "baz"] - known = contents[1] - new_contents = [x for x in contents if x != known] - os = utility.OrderedSet(contents) - self.assertTrue(os.discard(known)) - self.assertEqual(list(os), new_contents) - - def test_discard_unknown_item(self): - """Test that OrderedSet.discard(unknown) returns False.""" - contents = ["foo", "bar", "baz"] - unknown = "qux" - os = utility.OrderedSet(contents) - self.assertFalse(os.discard(unknown)) - self.assertEqual(list(os), contents) - - def test_pop_last_item(self): - """Test that OrderedSet.pop() returns the final item.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertEqual(os.pop(), contents[-1]) - self.assertEqual(list(os), contents[:-1]) - - def test_pop_not_first_item(self): - """Test that OrderedSet.pop(last=False) returns the first item.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertEqual(os.pop(last=False), contents[0]) - self.assertEqual(list(os), contents[1:]) - - def test_reversed_reverses_order(self): - """Test that reversed(OrderedSet()) reverses correctly.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertEqual(list(reversed(os)), list(reversed(contents))) - - def test_equals_other_ordered_set(self): - """Test that OrderedSet equality accounts for order.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertNotEqual(os, utility.OrderedSet(reversed(os))) - self.assertEqual(os, utility.OrderedSet(contents)) - - def test_equals_other_iterable(self): - """Test that OrderedSet-to-other-iterable equality returns False.""" - contents = ["foo", "bar", "baz"] - os = utility.OrderedSet(contents) - self.assertNotEqual(os, set(os)) - self.assertNotEqual(os, frozenset(os)) - self.assertNotEqual(os, list(os)) - self.assertNotEqual(os, tuple(os)) - self.assertNotEqual(os, {x: 0 for x in os}) diff --git a/congress/tests/datalog/test_ruleset.py b/congress/tests/datalog/test_ruleset.py deleted file mode 100644 index ac960907..00000000 --- a/congress/tests/datalog/test_ruleset.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datalog import compile -from congress.datalog import ruleset -from congress.tests import base - - -class TestRuleSet(base.TestCase): - def setUp(self): - super(TestRuleSet, self).setUp() - self.ruleset = ruleset.RuleSet() - - def test_empty_ruleset(self): - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_clear_ruleset(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - self.ruleset.add_rule('p', rule1) - self.ruleset.clear() - - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_add_rule(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - self.assertTrue(self.ruleset.add_rule('p', rule1)) - self.assertIn('p', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - def test_add_existing_rule(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - self.assertTrue(self.ruleset.add_rule('p', rule1)) - self.assertIn('p', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - self.assertFalse(self.ruleset.add_rule('p', rule1)) - self.assertIn('p', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - def test_add_rules_with_same_head(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - rule2 = compile.parse1('p(x,y) :- s(x), t(y)') - - self.assertTrue(self.ruleset.add_rule('p', rule1)) - self.assertIn('p', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - self.assertTrue(self.ruleset.add_rule('p', rule2)) - self.assertIn('p', self.ruleset) - self.assertIn(rule1, self.ruleset.get_rules('p')) - self.assertIn(rule2, self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - def test_add_rules_with_different_head(self): - rule1 = compile.parse1('p1(x,y) :- q(x), r(y)') - rule2 = compile.parse1('p2(x,y) :- s(x), t(y)') - - self.assertTrue(self.ruleset.add_rule('p1', rule1)) - self.assertTrue(self.ruleset.add_rule('p2', rule2)) - - self.assertIn('p1', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p1')) - self.assertIn('p1', self.ruleset.keys()) - - self.assertIn('p2', self.ruleset) - self.assertEqual([rule2], self.ruleset.get_rules('p2')) - self.assertIn('p2', self.ruleset.keys()) - - def test_add_fact(self): - fact1 = compile.Fact('p', (1, 2, 3)) - equivalent_rule = compile.Rule(compile.parse1('p(1,2,3)'), ()) - - self.assertTrue(self.ruleset.add_rule('p', fact1)) - self.assertIn('p', self.ruleset) - self.assertEqual([equivalent_rule], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - def test_add_equivalent_rule(self): - # equivalent_rule could be a fact because it has no body, and is - # ground. - equivalent_rule = compile.Rule(compile.parse1('p(1,2,3)'), ()) - - self.assertTrue(self.ruleset.add_rule('p', equivalent_rule)) - self.assertIn('p', self.ruleset) - self.assertEqual([equivalent_rule], self.ruleset.get_rules('p')) - self.assertEqual(['p'], self.ruleset.keys()) - - def test_discard_rule(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - self.assertTrue(self.ruleset.add_rule('p', rule1)) - self.assertIn('p', self.ruleset) - self.assertEqual([rule1], self.ruleset.get_rules('p')) - - self.assertTrue(self.ruleset.discard_rule('p', rule1)) - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_discard_nonexistent_rule(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - self.assertFalse(self.ruleset.discard_rule('p', rule1)) - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_discard_rules_with_same_head(self): - rule1 = compile.parse1('p(x,y) :- q(x), r(y)') - rule2 = compile.parse1('p(x,y) :- s(x), t(y)') - self.assertTrue(self.ruleset.add_rule('p', rule1)) - self.assertTrue(self.ruleset.add_rule('p', rule2)) - self.assertIn('p', self.ruleset) - self.assertIn(rule1, self.ruleset.get_rules('p')) - self.assertIn(rule2, self.ruleset.get_rules('p')) - - self.assertTrue(self.ruleset.discard_rule('p', rule1)) - self.assertTrue(self.ruleset.discard_rule('p', rule2)) - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_discard_rules_with_different_head(self): - rule1 = compile.parse1('p1(x,y) :- q(x), r(y)') - rule2 = compile.parse1('p2(x,y) :- s(x), t(y)') - self.assertTrue(self.ruleset.add_rule('p1', rule1)) - self.assertTrue(self.ruleset.add_rule('p2', rule2)) - self.assertIn('p1', self.ruleset) - self.assertIn('p2', self.ruleset) - self.assertIn(rule1, self.ruleset.get_rules('p1')) - self.assertIn(rule2, self.ruleset.get_rules('p2')) - - self.assertTrue(self.ruleset.discard_rule('p1', rule1)) - self.assertTrue(self.ruleset.discard_rule('p2', rule2)) - self.assertNotIn('p1', self.ruleset) - self.assertNotIn('p2', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_discard_fact(self): - fact = compile.Fact('p', (1, 2, 3)) - equivalent_rule = compile.Rule(compile.parse1('p(1,2,3)'), ()) - - self.assertTrue(self.ruleset.add_rule('p', fact)) - self.assertIn('p', self.ruleset) - self.assertEqual([equivalent_rule], self.ruleset.get_rules('p')) - - self.assertTrue(self.ruleset.discard_rule('p', fact)) - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_discard_equivalent_rule(self): - fact = compile.Fact('p', (1, 2, 3)) - equivalent_rule = compile.Rule(compile.parse1('p(1,2,3)'), ()) - - self.assertTrue(self.ruleset.add_rule('p', fact)) - self.assertIn('p', self.ruleset) - self.assertEqual([equivalent_rule], self.ruleset.get_rules('p')) - - self.assertTrue(self.ruleset.discard_rule('p', equivalent_rule)) - self.assertNotIn('p', self.ruleset) - self.assertEqual([], self.ruleset.keys()) - - def test_contains(self): - fact = compile.Fact('p', (1, 2, 3)) - rule = compile.parse1('p(x) :- q(x)') - self.ruleset.add_rule('p', fact) - self.ruleset.add_rule('p', rule) - - # positive tests - equivalent_fact1 = compile.Fact('p', (1, 2, 3)) - equivalent_fact2 = compile.parse1('p(1,2,3)') - equivalent_fact3 = compile.Rule(compile.parse1('p(1,2,3)'), ()) - equivalent_rule = compile.parse1('p(x) :- q(x)') - self.assertTrue(self.ruleset.contains('p', equivalent_fact1)) - self.assertTrue(self.ruleset.contains('p', equivalent_fact2)) - self.assertTrue(self.ruleset.contains('p', equivalent_fact3)) - self.assertTrue(self.ruleset.contains('p', equivalent_rule)) - - # negative tests - nonequiv_fact = compile.parse1('p(4, 5, 6)') - nonequiv_rule = compile.parse1('p(x) :- r(x)') - self.assertFalse(self.ruleset.contains('p', nonequiv_fact)) - self.assertFalse(self.ruleset.contains('p', nonequiv_rule)) diff --git a/congress/tests/datalog/test_unify.py b/congress/tests/datalog/test_unify.py deleted file mode 100644 index 6f847ef3..00000000 --- a/congress/tests/datalog/test_unify.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.datalog import compile -from congress.datalog import topdown -from congress.datalog import unify -from congress.tests import base - -LOG = logging.getLogger(__name__) - - -NREC_THEORY = 'non-recursive theory' -DB_THEORY = 'database' -MAT_THEORY = 'materialized' - - -class TestUnify(base.TestCase): - def open(self, msg): - LOG.debug("** Started: %s **", msg) - - def close(self, msg): - LOG.debug("** Finished: %s **", msg) - - def create_unify(self, atom_string1, atom_string2, msg, change_num, - unifier1=None, unifier2=None, recursive_str=False): - """Create unification and check basic results.""" - def str_uni(u): - if recursive_str: - return u.recur_str() - else: - return str(u) - - def print_unifiers(changes=None): - LOG.debug("unifier1: %s", str_uni(unifier1)) - LOG.debug("unifier2: %s", str_uni(unifier2)) - if changes is not None: - LOG.debug("changes: %s", - ";".join([str(x) for x in changes])) - - if msg is not None: - self.open(msg) - if unifier1 is None: - # LOG.debug("Generating new unifier1") - unifier1 = topdown.TopDownTheory.new_bi_unifier() - if unifier2 is None: - # LOG.debug("Generating new unifier2") - unifier2 = topdown.TopDownTheory.new_bi_unifier() - p1 = compile.parse(atom_string1)[0] - p2 = compile.parse(atom_string2)[0] - changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) - self.assertIsNotNone(changes) - print_unifiers(changes) - p1p = p1.plug(unifier1) - p2p = p2.plug(unifier2) - print_unifiers(changes) - if not p1p == p2p: - LOG.debug("Failure: bi-unify(%s, %s) produced %s and %s", - p1, p2, str_uni(unifier1), str_uni(unifier2)) - LOG.debug("plug(%s, %s) = %s", p1, str_uni(unifier1), p1p) - LOG.debug("plug(%s, %s) = %s", p2, str_uni(unifier2), p2p) - self.fail() - if change_num is not None and len(changes) != change_num: - LOG.debug("Failure: bi-unify(%s, %s) produced %s and %s", - p1, p2, str_uni(unifier1), str_uni(unifier2)) - LOG.debug("plug(%s, %s) = %s", p1, str_uni(unifier1), p1p) - LOG.debug("plug(%s, %s) = %s", p2, str_uni(unifier2), p2p) - LOG.debug("Expected %s changes; computed %s changes", - change_num, len(changes)) - self.fail() - LOG.debug("unifier1: %s", str_uni(unifier1)) - LOG.debug("unifier2: %s", str_uni(unifier2)) - if msg is not None: - self.open(msg) - return (p1, unifier1, p2, unifier2, changes) - - def check_unify(self, atom_string1, atom_string2, msg, change_num, - unifier1=None, unifier2=None, recursive_str=False): - self.open(msg) - (p1, unifier1, p2, unifier2, changes) = self.create_unify( - atom_string1, atom_string2, msg, change_num, - unifier1=unifier1, unifier2=unifier2, recursive_str=recursive_str) - unify.undo_all(changes) - self.assertEqual(p1, p1.plug(unifier1)) - self.assertEqual(p2, p2.plug(unifier2)) - self.close(msg) - - def check_unify_fail(self, atom_string1, atom_string2, msg): - """Check that the bi-unification fails.""" - self.open(msg) - unifier1 = topdown.TopDownTheory.new_bi_unifier() - unifier2 = topdown.TopDownTheory.new_bi_unifier() - p1 = compile.parse(atom_string1)[0] - p2 = compile.parse(atom_string2)[0] - changes = unify.bi_unify_atoms(p1, unifier1, p2, unifier2) - if changes is not None: - LOG.debug("Failure failure: bi-unify(%s, %s) produced %s and %s", - p1, p2, unifier1, unifier2) - LOG.debug("plug(%s, %s) = %s", p1, unifier1, p1.plug(unifier1)) - LOG.debug("plug(%s, %s) = %s", p2, unifier2, p2.plug(unifier2)) - self.fail() - self.close(msg) - - def test_instance(self): - """Test whether the INSTANCE computation is correct.""" - def assertIsNotNone(x): - self.assertTrue(x is not None) - - def assertIsNone(x): - self.assertTrue(x is None) - - assertIsNotNone(unify.instance(str2form('p(1)'), str2form('p(y)'))) - assertIsNotNone(unify.instance(str2form('p(1,2)'), str2form('p(x,y)'))) - assertIsNotNone(unify.instance(str2form('p(1,x)'), str2form('p(x,y)'))) - assertIsNotNone(unify.instance(str2form('p(1,x,1)'), - str2form('p(x,y,x)'))) - assertIsNotNone(unify.instance(str2form('p(1,x,1)'), - str2form('p(x,y,z)'))) - assertIsNone(unify.instance(str2form('p(1,2)'), str2form('p(x,x)'))) - - def test_same(self): - """Test whether the SAME computation is correct.""" - def assertIsNotNone(x): - self.assertTrue(x is not None) - - def assertIsNone(x): - self.assertTrue(x is None) - - assertIsNotNone(unify.same(str2form('p(x)'), str2form('p(y)'))) - assertIsNotNone(unify.same(str2form('p(x)'), str2form('p(x)'))) - assertIsNotNone(unify.same(str2form('p(x,y)'), str2form('p(x,y)'))) - assertIsNotNone(unify.same(str2form('p(x,y)'), str2form('p(y,x)'))) - assertIsNone(unify.same(str2form('p(x,x)'), str2form('p(x,y)'))) - assertIsNone(unify.same(str2form('p(x,y)'), str2form('p(x,x)'))) - assertIsNotNone(unify.same(str2form('p(x,x)'), str2form('p(y,y)'))) - assertIsNotNone(unify.same(str2form('p(x,y,x)'), str2form('p(y,x,y)'))) - assertIsNone(unify.same(str2form('p(x,y,z)'), str2form('p(x,y,1)'))) - - def test_bi_unify(self): - """Test the bi-unification routine and its supporting routines.""" - def var(x): - return compile.Term.create_from_python(x, force_var=True) - - def obj(x): - return compile.Term.create_from_python(x) - - def new_uni(): - return topdown.TopDownTheory.new_bi_unifier() - - # apply, add - u1 = new_uni() - u1.add(var('x'), obj(1), None) - self.assertEqual(u1.apply(var('x')), obj(1)) - - u1 = new_uni() - u2 = new_uni() - u1.add(var('y'), var('x'), u2) - self.assertEqual(u1.apply(var('y')), var('x')) - u2.add(var('x'), obj(2), None) - self.assertEqual(u1.apply(var('y')), obj(2)) - - # delete - u1.delete(var('y')) - self.assertEqual(u1.apply(var('y')), var('y')) - - u1 = new_uni() - u2 = new_uni() - u1.add(var('y'), var('x'), u2) - u2.add(var('x'), obj(2), None) - u2.delete(var('x')) - self.assertEqual(u1.apply(var('y')), var('x')) - u1.delete(var('y')) - self.assertEqual(u1.apply(var('y')), var('y')) - - # bi_unify - self.check_unify("p(x)", "p(1)", - "Matching", 1) - self.check_unify("p(x,y)", "p(1,2)", - "Binary Matching", 2) - self.check_unify("p(1,2)", "p(x,y)", - "Binary Matching Reversed", 2) - self.check_unify("p(1,1)", "p(x,y)", - "Binary Matching Many-to-1", 2) - self.check_unify_fail("p(1,2)", "p(x,x)", - "Binary Matching Failure") - self.check_unify("p(1,x)", "p(1,y)", - "Simple Unification", 1) - self.check_unify("p(1,x)", "p(y,1)", - "Separate Namespace Unification", 2) - self.check_unify("p(1,x)", "p(x,2)", - "Namespace Collision Unification", 2) - self.check_unify("p(x,y,z)", "p(t,u,v)", - "Variable-only Unification", 3) - self.check_unify("p(x,y,y)", "p(t,u,t)", - "Repeated Variable Unification", 3) - self.check_unify_fail("p(x,y,y,x,y)", "p(t,u,t,1,2)", - "Repeated Variable Unification Failure") - self.check_unify( - "p(x,y,y)", "p(x,y,x)", - "Repeated Variable Unification Namespace Collision", 3) - self.check_unify_fail( - "p(x,y,y,x,y)", "p(x,y,x,1,2)", - "Repeated Variable Unification Namespace Collision Failure") - - # test sequence of changes - (p1, u1, p2, u2, changes) = self.create_unify( - "p(x)", "p(x)", "Step 1", 1) # 1 since the two xs are different - self.create_unify( - "p(x)", "p(1)", "Step 2", 1, unifier1=u1, recursive_str=True) - self.create_unify( - "p(x)", "p(1)", "Step 3", 0, unifier1=u1, recursive_str=True) - - -class TestMatch(base.TestCase): - - def check(self, atom1, atom2): - atom1 = compile.parse1(atom1) - atom2 = compile.parse1(atom2) - unifier = unify.BiUnifier() - changes = unify.match_atoms(atom1, unifier, atom2) - self.assertIsNotNone(changes) - self.assertEqual(atom1.plug(unifier), atom2) - - def cherr(self, atom1, atom2): - atom1 = compile.parse1(atom1) - atom2 = compile.parse1(atom2) - unifier = unify.BiUnifier() - self.assertIsNone(unify.match_atoms(atom1, unifier, atom2)) - - def test_atoms(self): - self.check('p(x, y)', 'p(1, 2)') - self.check('p(x, x)', 'p(1, 1)') - self.cherr('p(x, x)', 'p(1, 2)') - self.check('p(x, y, z)', 'p(1, 2, 1)') - self.check('p(x, y, x, z)', 'p(1, 2, 1, 3)') - self.cherr('p(x, y, x, y)', 'p(1, 2, 1, 3)') - self.cherr('p(x, y, x, y)', 'p(1, 2, 2, 1)') - self.check('p(x, y, x, y)', 'p(1, 1, 1, 1)') - - def test_sequence(self): - atom1 = compile.parse1('p(x, y)') - atom2 = compile.parse1('p(1, 2)') - unifier = unify.BiUnifier() - changes = unify.match_atoms(atom1, unifier, atom2) - self.assertIsNotNone(changes) - - atom3 = compile.parse1('q(y, z)') - atom4 = compile.parse1('q(2, 3)') - changes = unify.match_atoms(atom3, unifier, atom4) - self.assertIsNotNone(changes) - - atom5 = compile.parse1('r(x, y, z, z)') - atom6 = compile.parse1('r(1, 2, 3, 3)') - changes = unify.match_atoms(atom5, unifier, atom6) - self.assertIsNotNone(changes) - - self.assertEqual(atom1.plug(unifier), atom2) - self.assertEqual(atom3.plug(unifier), atom4) - self.assertEqual(atom5.plug(unifier), atom6) - - -def str2form(formula_string): - return compile.parse1(formula_string) - - -def str2pol(policy_string): - return compile.parse(policy_string) - - -def pol2str(policy): - return " ".join(str(x) for x in policy) - - -def form2str(formula): - return str(formula) diff --git a/congress/tests/datalog/test_utility.py b/congress/tests/datalog/test_utility.py deleted file mode 100644 index 931ed3fe..00000000 --- a/congress/tests/datalog/test_utility.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import contextlib -import logging as python_logging - -import six - -from congress.datalog import utility -from congress.tests import base - - -class TestGraph(base.TestCase): - def test_nodes(self): - """Test addition/deletion of nodes.""" - g = utility.Graph() - g.add_node(1) - g.add_node(2) - g.add_node(3) - g.add_node(1) - self.assertTrue(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 3) - - g.delete_node(1) - self.assertFalse(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 2) - - g.delete_node(1) - self.assertFalse(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 2) - - g.add_node(1) - self.assertTrue(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 3) - - g.delete_node(2) - self.assertTrue(g.node_in(1)) - self.assertFalse(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 2) - - def test_edges(self): - """Test addition/deletion of edges.""" - g = utility.Graph() - - g.add_edge(1, 2) - g.add_edge(2, 3) - g.add_edge(2, 4) - g.add_edge(1, 2) - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 7) - - g.delete_edge(2, 4) - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertFalse(g.edge_in(2, 4)) - self.assertEqual(len(g), 6) - - g.delete_edge(2, 3) - self.assertTrue(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(2, 3)) - self.assertFalse(g.edge_in(2, 4)) - self.assertEqual(len(g), 5) - - g.delete_edge(2, 3) - self.assertTrue(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(2, 3)) - self.assertFalse(g.edge_in(2, 4)) - self.assertEqual(len(g), 5) - - g.add_edge(2, 3) - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertFalse(g.edge_in(2, 4)) - self.assertEqual(len(g), 6) - - def test_or(self): - g1 = utility.Graph() - g1.add_edge(1, 2) - g1.add_edge(2, 3) - - g2 = utility.Graph() - g2.add_edge(2, 3) - g2.add_edge(3, 4) - - g = g1 | g2 - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(3, 4)) - self.assertIsInstance(g, utility.Graph) - self.assertTrue(g1.edge_in(1, 2)) - self.assertTrue(g1.edge_in(2, 3)) - self.assertFalse(g1.edge_in(3, 4)) - self.assertFalse(g2.edge_in(1, 2)) - self.assertTrue(g2.edge_in(2, 3)) - self.assertTrue(g2.edge_in(3, 4)) - - def test_ior(self): - g1 = utility.Graph() - g1.add_edge(1, 2) - g1.add_edge(2, 3) - - g2 = utility.Graph() - g2.add_edge(2, 3) - g2.add_edge(3, 4) - - g1 |= g2 - self.assertTrue(g1.edge_in(1, 2)) - self.assertTrue(g1.edge_in(2, 3)) - self.assertTrue(g1.edge_in(3, 4)) - self.assertFalse(g2.edge_in(1, 2)) - self.assertTrue(g2.edge_in(2, 3)) - self.assertTrue(g2.edge_in(3, 4)) - - def test_cycle(self): - g1 = utility.Graph() - g1.add_edge(1, 2) - self.assertFalse(g1.has_cycle()) - g1.add_edge(2, 3) - self.assertFalse(g1.has_cycle()) - g1.add_edge(2, 4) - self.assertFalse(g1.has_cycle()) - g1.add_edge(4, 1) - self.assertTrue(g1.has_cycle()) - g1.delete_edge(2, 3) - self.assertTrue(g1.has_cycle()) - g1.delete_edge(2, 4) - self.assertFalse(g1.has_cycle()) - - actual_cycle_as_set = frozenset(utility.Cycle(['p', 'q', 't', 'p'])) - expected_cycle_as_set = frozenset([('p', 'q'), ('q', 't'), ('t', 'p')]) - self.assertEqual(actual_cycle_as_set, expected_cycle_as_set) - - g = utility.Graph() - g.add_edge('p', 'q') - g.add_edge('p', 'r') - g.add_edge('q', 't') - g.add_edge('q', 's') - g.add_edge('t', 't') - g.add_edge('t', 'p') - g.add_edge('t', 'q') - self.assertTrue(g.has_cycle()) - self.assertEqual(len(g.cycles()), 3) - expected_cycle_set = set([ - utility.Cycle(['p', 'q', 't', 'p']), - utility.Cycle(['q', 't', 'q']), - utility.Cycle(['t', 't']) - ]) - actual_cycle_set = set([ - utility.Cycle(g.cycles()[0]), - utility.Cycle(g.cycles()[1]), - utility.Cycle(g.cycles()[2]) - ]) - self.assertEqual(expected_cycle_set, actual_cycle_set) - - def test_find_reachable_nodes(self): - g1 = utility.Graph() - self.assertEqual(g1.find_reachable_nodes([1]), set()) - g1.add_edge(0, 1) - g1.add_edge(1, 2) - g1.add_edge(2, 3) - g1.add_edge(2, 4) - g1.add_edge(3, 5) - g1.add_edge(0, 6) - g1.add_edge(7, 8) - g1.add_edge(8, 9) - g1.add_edge(10, 5) - g1.add_edge(11, 12) - self.assertEqual(g1.find_reachable_nodes([]), set()) - self.assertEqual(g1.find_reachable_nodes([1]), set([1, 2, 3, 4, 5])) - self.assertEqual(g1.find_reachable_nodes([10]), set([10, 5])) - self.assertEqual(g1.find_reachable_nodes([1, 10]), - set([1, 2, 3, 4, 5, 10])) - self.assertEqual(g1.find_reachable_nodes([5]), set([5])) - self.assertEqual(g1.find_reachable_nodes([11]), set([11, 12])) - g1.add_edge(5, 2) - self.assertEqual(g1.find_reachable_nodes([10]), set([10, 5, 2, 3, 4])) - - def test_dependencies(self): - g1 = utility.Graph() - self.assertIsNone(g1.dependencies(1)) - g1.add_edge(0, 1) - g1.add_edge(1, 2) - g1.add_edge(2, 3) - g1.add_edge(2, 4) - g1.add_edge(3, 5) - g1.add_edge(0, 6) - g1.add_edge(7, 8) - g1.add_edge(8, 9) - g1.add_edge(10, 5) - g1.add_edge(11, 12) - self.assertEqual(g1.dependencies(1), set([1, 2, 3, 4, 5])) - self.assertEqual(g1.dependencies(10), set([10, 5])) - self.assertEqual(g1.dependencies(5), set([5])) - self.assertEqual(g1.dependencies(11), set([11, 12])) - - def test_cyclic_dependencies(self): - g1 = utility.Graph() - self.assertIsNone(g1.dependencies(1)) - g1.add_edge(0, 1) - g1.add_edge(1, 2) - g1.add_edge(2, 3) - g1.add_edge(2, 4) - g1.add_edge(3, 5) - g1.add_edge(0, 6) - g1.add_edge(7, 8) - g1.add_edge(8, 9) - g1.add_edge(10, 5) - g1.add_edge(11, 12) - g1.add_edge(5, 2) # create cycle - self.assertEqual(g1.dependencies(1), set([1, 2, 3, 4, 5])) - self.assertEqual(g1.dependencies(10), set([10, 5, 2, 3, 4])) - self.assertEqual(g1.dependencies(5), set([5, 2, 3, 4])) - self.assertEqual(g1.dependencies(11), set([11, 12])) - - def test_find_dependent_nodes(self): - g1 = utility.Graph() - self.assertEqual(g1.find_dependent_nodes([1]), set([1])) - g1.add_edge(0, 1) - g1.add_edge(1, 2) - g1.add_edge(2, 3) - g1.add_edge(2, 4) - g1.add_edge(3, 5) - g1.add_edge(0, 6) - g1.add_edge(7, 8) - g1.add_edge(8, 9) - g1.add_edge(10, 5) - g1.add_edge(11, 12) - self.assertEqual(g1.find_dependent_nodes([0]), set([0])) - self.assertEqual(g1.find_dependent_nodes([2]), set([2, 1, 0])) - self.assertEqual(g1.find_dependent_nodes([5]), - set([5, 0, 1, 2, 3, 10])) - self.assertEqual(g1.find_dependent_nodes([12]), set([11, 12])) - self.assertEqual(g1.find_dependent_nodes([5, 6]), - set([5, 0, 1, 2, 3, 10, 6])) - g1.add_edge(5, 2) # add cycle - self.assertEqual(g1.find_dependent_nodes([2]), - set([5, 0, 1, 2, 3, 10])) - - -class TestBagGraph(base.TestCase): - def test_nodes(self): - """Test addition/deletion of nodes.""" - g = utility.BagGraph() - g.add_node(1) - g.add_node(2) - g.add_node(3) - g.add_node(1) - self.assertTrue(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 4) - - g.delete_node(1) - self.assertTrue(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 3) - - g.delete_node(1) - self.assertFalse(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 2) - - g.add_node(1) - self.assertTrue(g.node_in(1)) - self.assertTrue(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 3) - - g.delete_node(2) - self.assertTrue(g.node_in(1)) - self.assertFalse(g.node_in(2)) - self.assertTrue(g.node_in(3)) - self.assertEqual(len(g), 2) - - def test_edges(self): - """Test addition/deletion of edges.""" - g = utility.BagGraph() - - g.add_edge(1, 2) - g.add_edge(2, 3) - g.add_edge(2, 4) - g.add_edge(1, 2) - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 12) - - g.delete_edge(1, 2) - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 9) - - g.delete_edge(1, 2) - self.assertFalse(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 6) - - g.delete_edge(2, 3) - self.assertFalse(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 3) - - g.add_edge(1, 2) - self.assertTrue(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(2, 4)) - self.assertEqual(len(g), 6) - - g.add_node(1) - self.assertEqual(g.node_count(1), 2) - - def test_edge_labels(self): - g = utility.BagGraph() - - g.add_edge(1, 2) - g.add_edge(1, 2, "label1") - g.add_edge(1, 2, "label1") - g.add_edge(1, 2, "label2") - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(1, 2, "label1")) - self.assertTrue(g.edge_in(1, 2, "label2")) - self.assertFalse(g.edge_in(1, 2, "non-existent")) - self.assertEqual(g.edge_count(1, 2, "label1"), 2) - - g.delete_edge(1, 2, "label1") - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(1, 2, "label1")) - self.assertTrue(g.edge_in(1, 2, "label2")) - self.assertFalse(g.edge_in(1, 2, "non-existent")) - self.assertEqual(g.edge_count(1, 2, "label1"), 1) - - g.delete_edge(1, 2, "label1") - self.assertTrue(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(1, 2, "label1")) - self.assertTrue(g.edge_in(1, 2, "label2")) - self.assertFalse(g.edge_in(1, 2, "non-existent")) - self.assertEqual(g.edge_count(1, 2, "label1"), 0) - - g.delete_edge(1, 2, "label2") - self.assertTrue(g.edge_in(1, 2)) - self.assertFalse(g.edge_in(1, 2, "label1")) - self.assertFalse(g.edge_in(1, 2, "label2")) - self.assertFalse(g.edge_in(1, 2, "non-existent")) - self.assertEqual(g.edge_count(1, 2, "label1"), 0) - - def test_or(self): - g1 = utility.BagGraph() - g1.add_edge(1, 2) - g1.add_edge(2, 3) - - g2 = utility.BagGraph() - g2.add_edge(2, 3) - g2.add_edge(3, 4) - - g = g1 | g2 - self.assertTrue(g.edge_in(1, 2)) - self.assertTrue(g.edge_in(2, 3)) - self.assertTrue(g.edge_in(3, 4)) - self.assertEqual(g.edge_count(2, 3), 2) - self.assertIsInstance(g, utility.Graph) - self.assertTrue(g1.edge_in(1, 2)) - self.assertTrue(g1.edge_in(2, 3)) - self.assertFalse(g1.edge_in(3, 4)) - self.assertEqual(g1.edge_count(2, 3), 1) - self.assertFalse(g2.edge_in(1, 2)) - self.assertTrue(g2.edge_in(2, 3)) - self.assertTrue(g2.edge_in(3, 4)) - self.assertEqual(g2.edge_count(2, 3), 1) - - def test_ior(self): - g1 = utility.BagGraph() - g1.add_edge(1, 2) - g1.add_edge(2, 3) - - g2 = utility.BagGraph() - g2.add_edge(2, 3) - g2.add_edge(3, 4) - - g1 |= g2 - self.assertTrue(g1.edge_in(1, 2)) - self.assertTrue(g1.edge_in(2, 3)) - self.assertTrue(g1.edge_in(3, 4)) - self.assertIsInstance(g1, utility.BagGraph) - self.assertEqual(g1.edge_count(2, 3), 2) - self.assertFalse(g2.edge_in(1, 2)) - self.assertTrue(g2.edge_in(2, 3)) - self.assertTrue(g2.edge_in(3, 4)) - self.assertEqual(g2.edge_count(2, 3), 1) - - def test_cycle(self): - g1 = utility.BagGraph() - g1.add_edge(1, 2) - self.assertFalse(g1.has_cycle()) - g1.add_edge(2, 3) - self.assertFalse(g1.has_cycle()) - g1.add_edge(2, 4) - self.assertFalse(g1.has_cycle()) - g1.add_edge(2, 4) - self.assertFalse(g1.has_cycle()) - g1.add_edge(4, 1) - self.assertTrue(g1.has_cycle()) - g1.delete_edge(2, 3) - self.assertTrue(g1.has_cycle()) - g1.delete_edge(2, 4) - self.assertTrue(g1.has_cycle()) - g1.delete_edge(2, 4) - self.assertFalse(g1.has_cycle()) - - -class TestIterstr(base.TestCase): - class X(object): - def __init__(self, v): - self.v = v - - def __str__(self): - return "%s" % self.v - - def __repr__(self): - return "X:%s" % self.v - - @contextlib.contextmanager - def get_logging_fixtures(self): - stream = six.moves.StringIO() - handler = python_logging.StreamHandler(stream) - try: - logger = python_logging.getLogger(self.__class__.__name__) - logger.setLevel(python_logging.INFO) - handler.setLevel(python_logging.INFO) - logger.addHandler(handler) - try: - yield (stream, handler, logger) - finally: - logger.removeHandler(handler) - finally: - handler.close() - - def test__str__returns_informal_representation(self): - xs = map(TestIterstr.X, range(5)) - observed = utility.iterstr(xs) - self.assertEqual("[0;1;2;3;4]", str(observed)) - self.assertEqual("[0;1;2;3;4]", "{}".format(observed)) - self.assertEqual("[0;1;2;3;4]", "%s" % observed) - - def test__repr__returns_formal_representation(self): - xs = map(TestIterstr.X, range(5)) - observed = utility.iterstr(xs) - self.assertEqual("[X:0;X:1;X:2;X:3;X:4]", repr(observed)) - self.assertEqual("[X:0;X:1;X:2;X:3;X:4]", "{!r}".format(observed)) - self.assertEqual("[X:0;X:1;X:2;X:3;X:4]", "%r" % observed) - - def test_empty(self): - xs = map(TestIterstr.X, range(0)) - observed = utility.iterstr(xs) - self.assertEqual("[]", str(observed)) - self.assertEqual("[]", repr(observed)) - - def test_logging_basic_integration(self): - with self.get_logging_fixtures() as (stream, handler, logger): - iterable = utility.iterstr(map(TestIterstr.X, range(5))) - logger.info("some message %s", iterable) - handler.flush() - self.assertEqual("some message [0;1;2;3;4]\n", stream.getvalue()) - - def test_logging_skips_interpolation(self): - with self.get_logging_fixtures() as (stream, handler, logger): - iterable = utility.iterstr(map(TestIterstr.X, range(5))) - logger.debug("some message %s", iterable) - self.assertIsNone(iterable._str_interp) diff --git a/congress/tests/datasources.conf b/congress/tests/datasources.conf deleted file mode 100644 index 6d4dfc5f..00000000 --- a/congress/tests/datasources.conf +++ /dev/null @@ -1,22 +0,0 @@ - -[neutron] -module: datasources/neutron_driver.py -username: demo -password: password -auth_url: http://127.0.0.1:5000/v2.0 -tenant_name: demo - -[neutron2] -module: datasources/neutron_driver.py -username: demo -password: password -auth_url: http://127.0.0.1:5000/v2.0 -tenant_name: demo - -[nova] -module: datasources/nova_driver.py -username: demo -password: password -auth_url: http://127.0.0.1:5000/v2.0 -tenant_name: demo - diff --git a/congress/tests/datasources/__init__.py b/congress/tests/datasources/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/datasources/fakes.py b/congress/tests/datasources/fakes.py deleted file mode 100644 index 3c848efd..00000000 --- a/congress/tests/datasources/fakes.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - - -class NovaFakeClient(mock.MagicMock): - # TODO(rajdeepd): Replace Fake with mocks directly in test_neutron_driver - def __init__(self, *args, **kwargs): - super(NovaFakeClient, self).__init__(*args, **kwargs) - self.servers = mock.MagicMock() - self.servers.list.return_value = self.get_server_list() - self.flavors = mock.MagicMock() - self.flavors.list.return_value = self.get_flavor_list() - - self.hosts = mock.MagicMock() - self.hosts.list.return_value = self.get_host_list() - self.services = mock.MagicMock() - self.services.list.return_value = self.get_service_list() - - self.availability_zones = mock.MagicMock() - self.availability_zones.list.return_value = self.get_zone_list() - - def get_mock_server(self, id, name, host_id, status, tenant_id, user_id, - flavor, image, zone=None, host_name=None): - server = mock.MagicMock() - server.id = id - server.hostId = host_id - server.tenant_id = tenant_id - server.user_id = user_id - server.status = status - server.name = name - server.image = image - server.flavor = flavor - if zone is not None: - setattr(server, 'OS-EXT-AZ:availability_zone', zone) - else: - # This ensures that the magic mock raises an AttributeError - delattr(server, 'OS-EXT-AZ:availability_zone') - if host_name is not None: - setattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname', - host_name) - else: - # This ensures that the magic mock raises an AttributeError - delattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname') - return server - - def get_server_list(self): - server_one = ( - self.get_mock_server(1234, 'sample-server', - "e4d909c290d0fb1ca068ffaddf22cbd0", - 'BUILD', - '50e14867-7c64-4ec9-be8d-ed2470ca1d24', - '33ea0494-2bdf-4382-a445-9068997430b9', - {"id": 1}, {"id": 2}, 'default', 'host1')) - - server_two = ( - self.get_mock_server(5678, 'sample-server2', - "9e107d9d372bb6826bd81d3542a419d6", - 'ACTIVE', - '50e14867-7c64-4ec9-be8d-ed2470ca1d24', - '33ea0494-2bdf-4382-a445-9068997430b9', - {"id": 1}, {"id": 2})) - - server_three = ( - self.get_mock_server(9012, 'sample-server3', - "9e107d9d372bb6826bd81d3542a419d6", - 'ACTIVE', - '50e14867-7c64-4ec9-be8d-ed2470ca1d24', - '33ea0494-2bdf-4382-a445-9068997430b9', - {"id": 1}, {"id": 2}, 'foo', 'host2')) - - return [server_one, server_two, server_three] - - def get_flavor(self, id, name, vcpus, ram, disk, ephemeral, rxtx_factor): - f = mock.MagicMock() - f.id = id - f.name = name - f.vcpus = vcpus - f.ram = ram - f.disk = disk - f.ephemeral = ephemeral - f.rxtx_factor = rxtx_factor - return f - - def get_flavor_list(self): - flavor_one = self.get_flavor(1, "256 MB Server", 1, 256, 10, 10, 1.0) - flavor_two = self.get_flavor(2, "512 MB Server", 2, 512, 20, 20, 1.0) - flavor_three = self.get_flavor(3, "128 MB Server", 4, 128, 0, 0, 3.0) - flavor_four = self.get_flavor(4, "1024 MB Server", 3, 1024, 10, 10, - 2.0) - - return [flavor_one, flavor_two, flavor_three, flavor_four] - - def get_host(self, host_name, service, zone): - h = mock.MagicMock() - h.host_name = host_name - h.service = service - h.zone = zone - return h - - def get_host_list(self): - h_one = self.get_host('host1', 'nova-compute', 'nova1') - h_two = self.get_host('host2', 'nova-cert', 'nova1') - - return [h_one, h_two] - - def get_service(self, id, binary, host, zone, status, state, - updated_at, disabled_reason): - s = mock.MagicMock() - s.id = id - s.binary = binary - s.host = host - s.zone = zone - s.status = status - s.state = state - s.updated_at = updated_at - s.disabled_reason = disabled_reason - - return s - - def get_service_list(self): - service_one = self.get_service(1, 'nova-compute', 'nova', - 'nova1', 'enabled', 'up', - '2015-07-28T08:28:37.000000', None) - service_two = self.get_service(2, 'nova-schedule', 'nova', - 'nova1', 'disabled', 'up', - '2015-07-28T08:28:38.000000', - 'daily maintenance') - - return [service_one, service_two] - - def get_availability_zone(self, name, state): - zone = mock.MagicMock() - zone.zoneName = name - zone.zoneState = state - return zone - - def get_zone_list(self): - zone_one = self.get_availability_zone('AZ1', 'available') - zone_two = self.get_availability_zone('AZ2', 'not available') - - return [zone_one, zone_two] diff --git a/congress/tests/datasources/performance_datasource_driver.py b/congress/tests/datasources/performance_datasource_driver.py deleted file mode 100644 index d4a18a59..00000000 --- a/congress/tests/datasources/performance_datasource_driver.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datasources import datasource_driver - - -class PerformanceTestDriver(datasource_driver.PollingDataSourceDriver): - TABLE = 'p' - - # This is the most common per-value translator, so define it once here. - value_trans = {'translation-type': 'VALUE'} - - p_translator = { - 'translation-type': 'HDICT', - 'table-name': TABLE, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'field1', 'translator': value_trans}, - {'fieldname': 'field2', 'translator': value_trans}, - {'fieldname': 'field3', 'translator': value_trans}, - {'fieldname': 'field4', 'translator': value_trans}, - {'fieldname': 'field5', 'translator': value_trans}, - {'fieldname': 'field6', 'translator': value_trans})} - - def __init__(self, name='', args=None): - # if args is None: - # args = self._empty_openstack_credentials() - super(PerformanceTestDriver, self).__init__(name, args) - self.client_data = None - self.register_translator(PerformanceTestDriver.p_translator) - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'performance' - result['description'] = 'Datasource driver used for perf tests' - # result['config'] = ds_utils.get_openstack_required_config() - # result['config']['api_version'] = constants.OPTIONAL - result['secret'] = ['password'] - return result - - def update_from_datasource(self): - if self.client_data is not None: - self.state = {} - row_data = self.convert_objs(self.client_data, self.p_translator) - self.state[self.TABLE] = set() - for table, row in row_data: - assert table == self.TABLE - self.state[table].add(row) diff --git a/congress/tests/datasources/plexxi_fakes.py b/congress/tests/datasources/plexxi_fakes.py deleted file mode 100644 index 9cab80d7..00000000 --- a/congress/tests/datasources/plexxi_fakes.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -class MockAffinity(object): - def __init__(self, uuid, name): - self.uuid = uuid - self.name = name - - def getUuid(self): - return self.uuid - - def getName(self): - return self.name - - -class MockCoreSession(object): - def __init__(self): - pass - - def disconnect(): - pass - - -class MockHost(object): - def __init__(self, uuid, name, mac_count, pnics): - self.uuid = uuid - self.name = name - self.mac_count = mac_count - self.pnics = pnics - self.vms = [] - - def addvm(self, vm): - self.vms.append(vm) - - def getForeignUuid(self): - return self.uuid - - def getUuid(self): - return self.uuid - - def getName(self): - return self.name - - def getPhysicalNetworkInterfaces(self): - return self.pnics - - def getVirtualMachineCount(self): - return len(self.vms) - - def getVirtualMachines(self): - return self.vms - - -class MockNetworkLink(object): - def __init__(self, uuid, name, stopint, startint): - self.uuid = uuid - self.name = name - self.startint = startint - self.stopint = stopint - - def getUuid(self): - return self.uuid - - def getName(self): - return self.name - - def getStartNetworkInterface(self): - return self.startint - - def getStopNetworkInterface(self): - return self.stopint - - -class MockNIC(object): - def __init__(self, uuid, mac): - self.uuid = uuid - self.mac = mac - - def getMacAddress(self): - return self.mac - - -class MockPort(object): - def __init__(self, uuid, name, networklinks): - self.uuid = uuid - self.name = name - self.networklinks = networklinks - - def getUuid(self): - return self.uuid - - def getName(self): - return self.name - - def getNetworkLinks(self): - return self.networklinks - - -class MockSwitch(object): - def __init__(self, uuid, ip, name, status, pnics): - self.uuid = uuid - self.ip = ip - self.status = status - self.pnics = pnics - self.name = name - - def getUuid(self): - return self.uuid - - def getName(self): - return self.name - - def getIpAddress(self): - return self.ip - - def getStatus(self): - return self.status - - def getPhysicalNetworkInterfaces(self): - return self.pnics - - -class MockVM(object): - def __init__(self, uuid, ip, name, host, vnics): - self.uuid = uuid - self.ip = ip - self.host = host - self.name = name - self.vnics = vnics - - def getForeignUuid(self): - return self.uuid - - def getVirtualizationHost(self): - return self.host - - def getName(self): - return self.name - - def getIpAddress(self): - return self.ip - - def getVirtualNetworkInterfaces(self): - return self.vnics - - -class MockVSwitch(object): - def __init__(self, uuid, hosts, vnics): - self.uuid = uuid - self.hosts = hosts - self.vnics = vnics - - def getForeignUuid(self): - return self.uuid - - def getVirtualizationHosts(self): - return self.hosts - - def getVirtualNetworkInterfaces(self): - return self.vnics diff --git a/congress/tests/datasources/test_ceilometer_driver.py b/congress/tests/datasources/test_ceilometer_driver.py deleted file mode 100644 index 05b984af..00000000 --- a/congress/tests/datasources/test_ceilometer_driver.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (c) 2014 Montavista Software, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from ceilometerclient.v2 import client -import mock - -from congress.datasources import ceilometer_driver -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import helper - -ResponseObj = util.ResponseObj - - -class TestCeilometerDriver(base.TestCase): - - def setUp(self): - super(TestCeilometerDriver, self).setUp() - self.ceilometer_client = mock.MagicMock() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - with mock.patch.object(client.Client, '__init__', - return_value=None): - self.driver = ceilometer_driver.CeilometerDriver( - name='testceilometer', - args=args) - - def test_list_meters(self): - meters_data = [ - ResponseObj({'name': 'instance:m1.tiny', - 'type': 'gauge', - 'unit': 'instance', - 'resource_id': 'a257ba13-0b36-4a86-ae89-f78dd28b8ae5', - 'user_id': '2b01323fd71345bc8cdc5dbbd6d127ea', - 'project_id': '0020df0171ec41b597cd8b3002e21bee', - 'meter_id': 'YTI1N2JhMTMtMGIzNi00YTg2LWFlODktZjc4ZG', - 'source': 'openstack'}), - ResponseObj({'name': 'network.incoming.bytes', - 'type': 'cumulative', - 'unit': 'B', - 'resource_id': 'instance-00000001-tap437ce69c-e5', - 'user_id': '2b01323fd71345bc8cdc5dbbd6d127ea', - 'project_id': '0020df0171ec41b597cd8b3002e21bee', - 'meter_id': 'aW5zdGFuY2UtMDAwMDAwMDEtYTI1N2JhMT', - 'source': 'openstack'})] - - self.driver._translate_meters(meters_data) - meter_list = list(self.driver.state['meters']) - self.assertIsNotNone(meter_list) - self.assertEqual(2, len(meter_list)) - - for meter in meter_list: - if meter[1] == 'network.incoming.bytes': - meter1 = meter - elif meter[1] == 'instance:m1.tiny': - meter2 = meter - - # Verifying individual tuple data - self.assertEqual(('aW5zdGFuY2UtMDAwMDAwMDEtYTI1N2JhMT', - 'network.incoming.bytes', - 'cumulative', - 'B', - 'openstack', - 'instance-00000001-tap437ce69c-e5', - '2b01323fd71345bc8cdc5dbbd6d127ea', - '0020df0171ec41b597cd8b3002e21bee'), - meter1) - - self.assertEqual(('YTI1N2JhMTMtMGIzNi00YTg2LWFlODktZjc4ZG', - 'instance:m1.tiny', - 'gauge', - 'instance', - 'openstack', - 'a257ba13-0b36-4a86-ae89-f78dd28b8ae5', - '2b01323fd71345bc8cdc5dbbd6d127ea', - '0020df0171ec41b597cd8b3002e21bee'), - meter2) - - def test_list_alarms(self): - threshold_rule1 = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - threshold_rule2 = {'key4': 'value4', - 'key5': 'value5', - 'key6': 'value6'} - - alarms_data = [ - ResponseObj({'alarm_id': '7ef99553-a73f-4b18-a617-997a479c48e9', - 'name': 'cpu_high2', - 'state': 'insufficient data', - 'enabled': 'True', - 'threshold_rule': threshold_rule1, - 'type': 'threshold', - 'description': 'instance running hot', - 'time_constraints': '[]', - 'user_id': '2b01323fd71345bc8cdc5dbbd6d127ea', - 'project_id': '', - 'alarm_actions': "[u'log://']", - 'ok_actions': '[]', - 'insufficient_data_actions': '[]', - 'repeat_actions': 'False', - 'timestamp': '2014-09-30T05:00:43.351041', - 'state_timestamp': '2014-09-30T05:00:43.351041'}), - ResponseObj({'alarm_id': 'd1b2b7a7-9512-4290-97ca-2580ed72c375', - 'name': 'cpu_high', - 'state': 'insufficient data', - 'enabled': 'True', - 'threshold_rule': threshold_rule2, - 'type': 'threshold', - 'description': 'instance running hot', - 'time_constraints': '[]', - 'user_id': '2b01323fd71345bc8cdc5dbbd6d127ea', - 'project_id': '', - 'alarm_actions': "[u'log://']", - 'ok_actions': '[]', - 'insufficient_data_actions': '[]', - 'repeat_actions': 'False', - 'timestamp': '2014-09-30T04:55:36.015925', - 'state_timestamp': '2014-09-30T04:55:36.015925'})] - - self.driver._translate_alarms(alarms_data) - alarm_list = list(self.driver.state['alarms']) - self.assertIsNotNone(alarm_list) - self.assertEqual(2, len(alarm_list)) - - alarm_threshold_rule = list(self.driver.state['alarms.threshold_rule']) - self.assertIsNotNone(alarm_threshold_rule) - self.assertEqual(6, len(alarm_threshold_rule)) - - for alarm in alarm_list: - if alarm[1] == 'cpu_high2': - alarm1 = alarm - elif alarm[1] == 'cpu_high': - alarm2 = alarm - - for thres in alarm_threshold_rule: - if thres[1] in ['key1', 'key2', 'key3']: - thresh_rule_id1 = thres[0] - elif thres[1] in ['key4', 'key5', 'key6']: - thresh_rule_id2 = thres[0] - - # Verifying individual tuple data - self.assertEqual(('7ef99553-a73f-4b18-a617-997a479c48e9', - 'cpu_high2', 'insufficient data', - 'True', - thresh_rule_id1, - 'threshold', 'instance running hot', - '[]', '2b01323fd71345bc8cdc5dbbd6d127ea', - '', "[u'log://']", '[]', '[]', 'False', - '2014-09-30T05:00:43.351041', - '2014-09-30T05:00:43.351041'), - alarm1) - self.assertEqual(('d1b2b7a7-9512-4290-97ca-2580ed72c375', - 'cpu_high', 'insufficient data', 'True', - thresh_rule_id2, - 'threshold', 'instance running hot', - '[]', '2b01323fd71345bc8cdc5dbbd6d127ea', - '', "[u'log://']", '[]', '[]', - 'False', '2014-09-30T04:55:36.015925', - '2014-09-30T04:55:36.015925'), - alarm2) - - def test_list_events(self): - trait1 = [{'name': 'value1', 'type': 'value2', 'value': 'value3'}, - {'name': 'value7', 'type': 'value8', 'value': 'value9'}] - trait2 = [{'name': 'value4', 'type': 'value5', 'value': 'value6'}] - trait3 = [] - - events_data = [ - ResponseObj({'message_id': '6834861c-ccb3-4c6f-ac00-fe8fe1ad4ed4', - 'event_type': 'image.create', - 'generated': '2014-09-29T08:19:45.556301', - 'traits': trait1}), - ResponseObj({'message_id': '3676d6d4-5c65-4442-9eda-b78d750ea91f', - 'event_type': 'compute.instance.update', - 'generated': '2014-09-30T04:54:45.395522', - 'traits': trait2}), - ResponseObj({'message_id': 'fae7b03d-b5b7-4b4f-b2ef-06d2af03f21e', - 'event_type': 'telemetry.api', - 'generated': '2015-09-02T10:12:50.338919', - 'traits': trait3})] - - self.driver._translate_events(events_data) - events_set = self.driver.state['events'] - expected_events = {('6834861c-ccb3-4c6f-ac00-fe8fe1ad4ed4', - 'image.create', '2014-09-29T08:19:45.556301'), - ('3676d6d4-5c65-4442-9eda-b78d750ea91f', - 'compute.instance.update', - '2014-09-30T04:54:45.395522'), - ('fae7b03d-b5b7-4b4f-b2ef-06d2af03f21e', - 'telemetry.api', '2015-09-02T10:12:50.338919')} - self.assertEqual(expected_events, events_set) - - event_traits_set = self.driver.state['events.traits'] - expected_traits = {('6834861c-ccb3-4c6f-ac00-fe8fe1ad4ed4', - 'value1', 'value2', 'value3'), - ('6834861c-ccb3-4c6f-ac00-fe8fe1ad4ed4', - 'value7', 'value8', 'value9'), - ('3676d6d4-5c65-4442-9eda-b78d750ea91f', - 'value4', 'value5', 'value6')} - self.assertEqual(expected_traits, event_traits_set) - - def test_list_statistics(self): - statistics_data = [ - {'meter_name': 'network', - 'period': 0, 'groupby': - {'resource_id': '2fdef98a-8a00-4094-b6b8-b3f742076417'}, - 'period_start': '2014-12-09T12:52:39.366015', - 'period_end': '2014-12-09T12:52:56.478338', - 'max': 0.0, 'min': 0.0, 'avg': 0.0, 'sum': 0.0, - 'count': 10, 'duration': 17.112323, 'unit': 'GB', - 'duration_start': '2014-12-09T12:52:39.366015', - 'duration_end': '2014-12-09T12:52:56.478338'}, - {'meter_name': 'instance', - 'period': 0, 'groupby': - {'resource_id': '8a1340fa-fd43-4376-9deb-37c872c47e38'}, - 'period_start': '2014-12-09T12:52:39.366015', - 'period_end': '2014-12-09T13:04:34', - 'max': 1.0, 'min': 1.0, 'avg': 1.0, 'sum': 13.0, - 'count': 13, 'duration': 714.633985, - 'unit': 'instance', - 'duration_start': '2014-12-09T12:52:39.366015', - 'duration_end': '2014-12-09T13:04:34'}] - - self.driver._translate_statistics(statistics_data) - statistics_list = list(self.driver.state['statistics']) - self.assertIsNotNone(statistics_list) - self.assertEqual(2, len(statistics_list)) - - # Verifying individual tuple data - s1 = next(x for x in statistics_list if x[0] == 'network') - s2 = next(x for x in statistics_list if x[0] == 'instance') - - self.assertEqual(('network', - '2fdef98a-8a00-4094-b6b8-b3f742076417', - 0.0, 10, 17.112323, - '2014-12-09T12:52:39.366015', - '2014-12-09T12:52:56.478338', - 0.0, 0.0, 0, - '2014-12-09T12:52:56.478338', - '2014-12-09T12:52:39.366015', - 0.0, 'GB'), s1) - self.assertEqual(('instance', - '8a1340fa-fd43-4376-9deb-37c872c47e38', - 1.0, 13, 714.633985, - '2014-12-09T12:52:39.366015', - '2014-12-09T13:04:34', - 1.0, 1.0, 0, - '2014-12-09T13:04:34', - '2014-12-09T12:52:39.366015', - 13.0, 'instance'), s2) - - def test_execute(self): - class CeilometerClient(object): - def __init__(self): - self.testkey = None - - def setAlarm(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - ceilometer_client = CeilometerClient() - self.driver.ceilometer_client = ceilometer_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('setAlarm', api_args) - - self.assertEqual(ceilometer_client.testkey, expected_ans) diff --git a/congress/tests/datasources/test_cinder_driver.py b/congress/tests/datasources/test_cinder_driver.py deleted file mode 100644 index a14b681d..00000000 --- a/congress/tests/datasources/test_cinder_driver.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) 2014 Montavista Software, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datasources import cinder_driver -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import helper - -ResponseObj = util.ResponseObj - - -class TestCinderDriver(base.TestCase): - - def setUp(self): - super(TestCinderDriver, self).setUp() - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - self.driver = cinder_driver.CinderDriver(name='testcinder', args=args) - - def test_list_volumes(self): - volumes_data = [ - ResponseObj({'id': '8bf2eddb-0e1a-46f9-a49a-853f8016f476', - 'size': '1', - 'user_id': 'b75055d5f0834d99ae874f085cf95272', - 'status': 'available', - 'description': 'foo', - 'name': 'bar', - 'bootable': 'false', - 'created_at': '2014-10-09T12:16:23.000000', - 'volume_type': 'lvmdriver-1', - 'encrypted': False, - 'availability_zone': 'nova1', - 'replication_status': 'r_status1', - 'multiattach': True, - 'snapshot_id': '3b890e8a-7881-4430-b087-9e9e642e5e0d', - 'source_volid': - 'b4c36f7a-ac1b-41a6-9e83-03a6c1149669', - 'consistencygroup_id': - '7aa9787f-285d-4d22-8211-e20af07f1044', - 'migration_status': 'm_status1', - 'attachments': - ['d9655db9-640b-40a5-ae2f-1166183518a6', - 'fc1a3f20-9be3-431f-9cb2-670c191e4282'], - 'extra_attribute': ['extra']}), - ResponseObj({'id': '7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', - 'size': '1', - 'user_id': '6e14edb203a84aa6a5a6a90872cbae79', - 'status': 'creating', - 'description': 'wonder', - 'name': 'alice', - 'bootable': 'true', - 'created_at': '2014-10-12T06:54:55.000000', - 'volume_type': 'None', - 'encrypted': True, - 'availability_zone': 'nova2', - 'replication_status': 'r_status2', - 'multiattach': False, - 'snapshot_id': '658b5663-9e83-406b-8b81-4a50cafaa2d6', - 'source_volid': - 'bf789ec1-b4a2-4ea0-94f4-4a6ebcc00ad8', - 'consistencygroup_id': - '960ec54c-c2a4-4e4c-8192-8b1d9eb65fae', - 'migration_status': 'm_status2', - 'attachments': [], - 'extra_attribute': ['extra']})] - - volume_list = self.driver._translate_volumes(volumes_data) - self.assertIsNotNone(volume_list) - self.assertEqual(4, len(volume_list)) - - self.assertEqual({('8bf2eddb-0e1a-46f9-a49a-853f8016f476', '1', - 'b75055d5f0834d99ae874f085cf95272', 'available', - 'foo', 'bar', 'false', '2014-10-09T12:16:23.000000', - 'lvmdriver-1', 'False', 'nova1', 'r_status1', - 'True', '3b890e8a-7881-4430-b087-9e9e642e5e0d', - 'b4c36f7a-ac1b-41a6-9e83-03a6c1149669', - '7aa9787f-285d-4d22-8211-e20af07f1044', - 'm_status1'), - ('7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', '1', - '6e14edb203a84aa6a5a6a90872cbae79', 'creating', - 'wonder', 'alice', 'true', - '2014-10-12T06:54:55.000000', 'None', - 'True', 'nova2', 'r_status2', 'False', - '658b5663-9e83-406b-8b81-4a50cafaa2d6', - 'bf789ec1-b4a2-4ea0-94f4-4a6ebcc00ad8', - '960ec54c-c2a4-4e4c-8192-8b1d9eb65fae', - 'm_status2')}, - self.driver.state['volumes']) - - self.assertEqual({('8bf2eddb-0e1a-46f9-a49a-853f8016f476', - 'd9655db9-640b-40a5-ae2f-1166183518a6'), - ('8bf2eddb-0e1a-46f9-a49a-853f8016f476', - 'fc1a3f20-9be3-431f-9cb2-670c191e4282')}, - self.driver.state['attachments']) - - def test_list_snaphosts(self): - snapshots_data = [ - ResponseObj({'status': 'available', - 'created_at': '2014-10-12T06:54:55.000000', - 'volume_id': 'b75055d5f0834d99ae874f085cf95272', - 'size': '1', - 'id': '7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', - 'name': 'foo'}), - ResponseObj({'status': 'creating', - 'created_at': '2014-10-12T06:54:55.000000', - 'volume_id': '6e14edb203a84aa6a5a6a90872cbae79', - 'size': '1', - 'id': '7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', - 'name': 'baar'})] - - snapshot_list = self.driver._translate_snapshots(snapshots_data) - self.assertIsNotNone(snapshot_list) - self.assertEqual(2, len(snapshot_list)) - - self.assertEqual({('7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', '1', - 'available', 'b75055d5f0834d99ae874f085cf95272', - 'foo', '2014-10-12T06:54:55.000000'), - ('7cd8f73d-3243-49c9-a25b-a77ceb6ad1fa', '1', - 'creating', '6e14edb203a84aa6a5a6a90872cbae79', - 'baar', '2014-10-12T06:54:55.000000')}, - self.driver.state['snapshots']) - - def test_list_services(self): - services_data = [ - ResponseObj({'status': 'enabled', - 'binary': 'cinder-scheduler', - 'zone': 'nova', - 'state': 'up', - 'updated_at': '2014-10-10T06:25:08.000000', - 'host': 'openstack@lvmdriver-1', - 'disabled_reason': 'None'}), - ResponseObj({'status': 'enabled', - 'binary': 'cinder-scheduler', - 'zone': 'nova', - 'state': 'up', - 'updated_at': '2014-10-10T06:25:08.000000', - 'host': 'openstack', - 'disabled_reason': 'None'})] - - service_list = self.driver._translate_services(services_data) - self.assertIsNotNone(service_list) - self.assertEqual(2, len(service_list)) - - self.assertEqual({('enabled', 'cinder-scheduler', 'nova', - 'up', '2014-10-10T06:25:08.000000', - 'openstack@lvmdriver-1', 'None'), - ('enabled', 'cinder-scheduler', 'nova', - 'up', '2014-10-10T06:25:08.000000', - 'openstack', 'None')}, - self.driver.state['services']) - - def test_execute(self): - class CinderClient(object): - def __init__(self): - self.testkey = None - - def createVolume(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - cinder_client = CinderClient() - self.driver.cinder_client = cinder_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('createVolume', api_args) - - self.assertEqual(expected_ans, cinder_client.testkey) diff --git a/congress/tests/datasources/test_cloudfoundryv2_driver.py b/congress/tests/datasources/test_cloudfoundryv2_driver.py deleted file mode 100644 index fc83acc6..00000000 --- a/congress/tests/datasources/test_cloudfoundryv2_driver.py +++ /dev/null @@ -1,371 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys - -# NOTE(arosen): done to avoid the fact that cloudfoundryclient -# isn't in the openstack global reqirements. -import mock -sys.modules['cloudfoundryclient.v2.client'] = mock.Mock() -sys.modules['cloudfoundryclient.v2'] = mock.Mock() -sys.modules['cloudfoundryclient'] = mock.Mock() - -from congress.datasources import cloudfoundryv2_driver -from congress.tests import base -from congress.tests import helper - - -ORG1_GUID = '5187136c-ef7d-47e6-9e6b-ac7780bab3db' -ORG_DATA = ( - {"total_results": 1, - "next_url": 'null', - "total_pages": 1, - "prev_url": 'null', - "resources": [{ - "entity": - {"status": "active", - "spaces_url": "/v2/organizations/" + ORG1_GUID + "/spaces", - "private_domains_url": - "/v2/organizations/" + ORG1_GUID + "/private_domains", - "name": "foo.com", - "domains_url": - "/v2/organizations/" + ORG1_GUID + "/domains", - "billing_enabled": 'true', - "quota_definition_guid": - "b72b1acb-ff4f-468d-99c0-05cd91012b62", - "app_events_url": - "/v2/organizations/" + ORG1_GUID + "/app_events", - "space_quota_definitions_url": - "/v2/organizations/" + ORG1_GUID + "/space_quota_definitions", - "quota_definition_url": - "/v2/quota_definitions/b72b1acb-ff4f-468d-99c0-05cd91012b62", - "auditors_url": - "/v2/organizations/" + ORG1_GUID + "/auditors", - "managers_url": - "/v2/organizations/" + ORG1_GUID + "/managers", - "users_url": - "/v2/organizations/" + ORG1_GUID + "/users", - "billing_managers_url": - "/v2/organizations/" + ORG1_GUID + "/billing_managers" - }, - "metadata": - {"url": - "/v2/organizations/5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "created_at": "2015-01-21T02:17:28+00:00", - "guid": "5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "updated_at": "2015-01-21T02:17:28+00:00" - } - } - ] - } -) - - -SPACE1_GUID = "8da5477d-340e-4bb4-808a-54d9f72017d1" -SPACE2_GUID = "79479021-1e77-473a-8c63-28de9d2ca697" -ORG1_SPACES_DATA = ( - {"total_results": 2, - "next_url": "null", - "total_pages": 1, - "prev_url": "null", - "resources": [{ - "entity": - {"developers_url": "/v2/spaces/" + SPACE1_GUID + "/developers", - "service_instances_url": - "/v2/spaces/" + SPACE1_GUID + "/service_instances", - "events_url": "/v2/spaces/" + SPACE1_GUID + "/events", - "name": "development", - "domains_url": "/v2/spaces/" + SPACE1_GUID + "/domains", - "app_events_url": "/v2/spaces/" + SPACE1_GUID + "/app_events", - "routes_url": "/v2/spaces/" + SPACE1_GUID + "/routes", - "organization_guid": "5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "space_quota_definition_guid": "null", - "apps_url": "/v2/spaces/" + SPACE1_GUID + "/apps", - "auditors_url": "/v2/spaces/" + SPACE1_GUID + "/auditors", - "managers_url": "/v2/spaces/" + SPACE1_GUID + "/managers", - "organization_url": - "/v2/organizations/5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "security_groups_url": - "/v2/spaces/" + SPACE1_GUID + "/security_groups" - }, - "metadata": - {"url": "/v2/spaces/" + SPACE1_GUID, - "created_at": "2015-01-21T02:17:28+00:00", - "guid": SPACE1_GUID, - "updated_at": "null" - } - }, - {"entity": - {"developers_url": "/v2/spaces/" + SPACE2_GUID + "/developers", - "service_instances_url": - "/v2/spaces/" + SPACE2_GUID + "/service_instances", - "events_url": "/v2/spaces/" + SPACE2_GUID + "/events", - "name": "test2", - "domains_url": "/v2/spaces/" + SPACE2_GUID + "/domains", - "app_events_url": "/v2/spaces/" + SPACE2_GUID + "/app_events", - "routes_url": "/v2/spaces/" + SPACE2_GUID + "/routes", - "organization_guid": "5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "space_quota_definition_guid": "null", - "apps_url": "/v2/spaces/" + SPACE2_GUID + "/apps", - "auditors_url": "/v2/spaces/" + SPACE2_GUID + "/auditors", - "managers_url": "/v2/spaces/" + SPACE2_GUID + "/managers", - "organization_url": - "/v2/organizations/5187136c-ef7d-47e6-9e6b-ac7780bab3db", - "security_groups_url": - "/v2/spaces/" + SPACE2_GUID + "/security_groups" - }, - "metadata": - {"url": "/v2/spaces/" + SPACE2_GUID, - "created_at": "2015-01-22T19:02:32+00:00", - "guid": SPACE2_GUID, - "updated_at": "null" - } - } - ] - } -) - -APP1_GUID = "c3bd7fc1-73b4-4cc7-a6c8-9976c30edad5" -APP2_GUID = "f7039cca-95ac-49a6-b116-e32a53ddda69" -APPS_IN_SPACE1 = ( - {"total_results": 2, - "next_url": "null", - "total_pages": 1, - "prev_url": "null", - "resources": [{ - "entity": - {"version": "fec00ce7-a980-49e1-abec-beed5516618f", - "staging_failed_reason": "null", - "instances": 1, - "routes_url": "/v2/apps" + APP1_GUID + "routes", - "space_url": "/v2/spaces/8da5477d-340e-4bb4-808a-54d9f72017d1", - "docker_image": "null", - "console": "false", - "package_state": "STAGED", - "state": "STARTED", - "production": "false", - "detected_buildpack": "Ruby", - "memory": 256, - "package_updated_at": "2015-01-21T21:00:40+00:00", - "staging_task_id": "71f75ad3cad64884a92c4e7738eaae16", - "buildpack": "null", - "stack_url": "/v2/stacks/50688ae5-9bfc-4bf6-a4bf-caadb21a32c6", - "events_url": "/v2/apps" + APP1_GUID + "events", - "service_bindings_url": - "/v2/apps" + APP1_GUID + "service_bindings", - "detected_start_command": - "bundle exec rake db:migrate && bundle exec rails s -p $PORT", - "disk_quota": 1024, - "stack_guid": "50688ae5-9bfc-4bf6-a4bf-caadb21a32c6", - "space_guid": "8da5477d-340e-4bb4-808a-54d9f72017d1", - "name": "rails_sample_app", - "health_check_type": "port", - "command": - "bundle exec rake db:migrate && bundle exec rails s -p $PORT", - "debug": "null", - "environment_json": "null", - "health_check_timeout": "null" - }, - "metadata": - {"url": "/v2/apps/c3bd7fc1-73b4-4cc7-a6c8-9976c30edad5", - "created_at": "2015-01-21T21:01:19+00:00", - "guid": "c3bd7fc1-73b4-4cc7-a6c8-9976c30edad5", - "updated_at": "2015-01-21T21:01:19+00:00" - } - }, - {"entity": - {"version": "a1b52559-32f3-4765-9fd3-6e35293fb6d0", - "staging_failed_reason": "null", - "instances": 1, - "routes_url": "/v2/apps" + APP2_GUID + "routes", - "space_url": "/v2/spaces/8da5477d-340e-4bb4-808a-54d9f72017d1", - "docker_image": "null", - "console": "false", - "package_state": "PENDING", - "state": "STOPPED", - "production": "false", - "detected_buildpack": "null", - "memory": 1024, - "package_updated_at": "null", - "staging_task_id": "null", - "buildpack": "null", - "stack_url": "/v2/stacks/50688ae5-9bfc-4bf6-a4bf-caadb21a32c6", - "events_url": "/v2/apps" + APP2_GUID + "events", - "service_bindings_url": - "/v2/apps" + APP2_GUID + "service_bindings", - "detected_start_command": "", - "disk_quota": 1024, - "stack_guid": "50688ae5-9bfc-4bf6-a4bf-caadb21a32c6", - "space_guid": "8da5477d-340e-4bb4-808a-54d9f72017d1", - "name": "help", - "health_check_type": "port", - "command": "null", - "debug": "null", - "environment_json": "null", - "health_check_timeout": "null" - }, - "metadata": - {"url": "/v2/apps/f7039cca-95ac-49a6-b116-e32a53ddda69", - "created_at": "2015-01-21T18:48:34+00:00", - "guid": "f7039cca-95ac-49a6-b116-e32a53ddda69", - "updated_at": "null" - } - } - ] - } -) - - -APPS_IN_SPACE2 = {"total_results": 0, - "next_url": "null", - "total_pages": 1, - "prev_url": "null", - "resources": []} - -SERVICES_IN_SPACE1 = { - "guid": "8da5477d-340e-4bb4-808a-54d9f72017d1", - "name": "development", - "services": [{ - "bound_app_count": 0, - "guid": "88f61682-d78e-410f-88ee-1e0eabbbc7da", - "last_operation": None, - "name": "rails-postgres", - "service_plan": { - "guid": "fbcec3af-3e8d-4ee7-adfe-3f12a137ed66", - "name": "turtle", - "service": { - "guid": "34dbc753-34ed-4cf1-9a87-a224dfca569b", - "label": "elephantsql", - "provider": None, - "version": None - } - } - }] -} - -EXPECTED_STATE = { - 'organizations': set([ - ('5187136c-ef7d-47e6-9e6b-ac7780bab3db', 'foo.com', - '2015-01-21T02:17:28+00:00', '2015-01-21T02:17:28+00:00')]), - 'spaces': set([ - ('8da5477d-340e-4bb4-808a-54d9f72017d1', 'development', - '2015-01-21T02:17:28+00:00', 'null'), - ('79479021-1e77-473a-8c63-28de9d2ca697', 'test2', - '2015-01-22T19:02:32+00:00', 'null')]), - 'apps': set([ - ('8da5477d-340e-4bb4-808a-54d9f72017d1', - 'c3bd7fc1-73b4-4cc7-a6c8-9976c30edad5', 'null', - 'bundle exec rake db:migrate && bundle exec rails s -p $PORT', - 'false', 'null', 'Ruby', - 'bundle exec rake db:migrate && bundle exec rails s -p $PORT', - 1024, 'null', 'null', 'null', 1, - 256, 'rails_sample_app', 'STAGED', '2015-01-21T21:00:40+00:00', - 'false', 'null', '71f75ad3cad64884a92c4e7738eaae16', 'STARTED', - 'fec00ce7-a980-49e1-abec-beed5516618f', '2015-01-21T21:01:19+00:00', - '2015-01-21T21:01:19+00:00'), - ('8da5477d-340e-4bb4-808a-54d9f72017d1', - 'f7039cca-95ac-49a6-b116-e32a53ddda69', 'null', 'null', 'false', - 'null', 'null', '', 1024, 'null', 'null', 'null', 1, 1024, - 'help', 'PENDING', 'null', 'false', 'null', 'null', 'STOPPED', - 'a1b52559-32f3-4765-9fd3-6e35293fb6d0', - '2015-01-21T18:48:34+00:00', 'null')]), - 'service_bindings': set([]), - 'services': set([ - ('88f61682-d78e-410f-88ee-1e0eabbbc7da', - '8da5477d-340e-4bb4-808a-54d9f72017d1', 'rails-postgres', - 0, 'None', 'turtle')]), -} - - -class TestCloudFoundryV2Driver(base.TestCase): - - def setUp(self): - super(TestCloudFoundryV2Driver, self).setUp() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = cloudfoundryv2_driver.CloudFoundryV2Driver(args=args) - - def test_update_from_datasource(self): - def _side_effect_get_org_spaces(org): - if org == ORG1_GUID: - return ORG1_SPACES_DATA - raise ValueError("This should occur...") - - def _side_effect_get_apps_in_space(space): - if space == SPACE1_GUID: - return APPS_IN_SPACE1 - elif space == SPACE2_GUID: - return APPS_IN_SPACE2 - else: - raise ValueError("This should not occur....") - - def _side_effect_get_spaces_summary(space): - if space == SPACE1_GUID: - return SERVICES_IN_SPACE1 - else: - return {"guid": space, - "services": []} - - def _side_effect_get_app_services(space): - return {'resources': []} - - with base.nested( - mock.patch.object(self.driver.cloudfoundry, - "get_organizations", - return_value=ORG_DATA), - mock.patch.object(self.driver.cloudfoundry, - "get_organization_spaces", - side_effect=_side_effect_get_org_spaces), - mock.patch.object(self.driver.cloudfoundry, - "get_apps_in_space", - side_effect=_side_effect_get_apps_in_space), - mock.patch.object(self.driver.cloudfoundry, - "get_spaces_summary", - side_effect=_side_effect_get_spaces_summary), - mock.patch.object(self.driver.cloudfoundry, - "get_app_service_bindings", - side_effect=_side_effect_get_app_services), - - - ) as (get_organizations, get_organization_spaces, - get_apps_in_space, get_spaces_summary, - get_app_services_guids): - self.driver.update_from_datasource() - self.assertEqual(EXPECTED_STATE, self.driver.state) - - def test_execute(self): - class CloudfoundryClient(object): - def __init__(self): - self.testkey = None - - def setServices(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - cloudfoundry_client = CloudfoundryClient() - self.driver.cloudfoundry = cloudfoundry_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('setServices', api_args) - - self.assertEqual(expected_ans, cloudfoundry_client.testkey) diff --git a/congress/tests/datasources/test_datasource_driver.py b/congress/tests/datasources/test_datasource_driver.py deleted file mode 100644 index 4fa075d8..00000000 --- a/congress/tests/datasources/test_datasource_driver.py +++ /dev/null @@ -1,2029 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import hashlib -import json - -import eventlet -import mock -from oslo_utils import uuidutils - -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils -from congress.db import db_ds_table_data -from congress import exception -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import fake_datasource -from congress.tests import helper - - -class TestDatasourceDriver(base.TestCase): - - def setUp(self): - super(TestDatasourceDriver, self).setUp() - self.val_trans = {'translation-type': 'VALUE'} - - def compute_hash(self, obj): - s = json.dumps(sorted(obj, key=(lambda x: str(type(x)) + repr(x))), - sort_keys=True) - h = hashlib.md5(s.encode('ascii')).hexdigest() - return h - - def test_translator_key_elements(self): - """Test for keys of all translator.""" - expected_params = { - 'hdict': ('translation-type', 'table-name', 'parent-key', - 'id-col', 'selector-type', 'field-translators', - 'in-list', 'parent-col-name', 'objects-extract-fn', - 'parent-key-desc'), - 'vdict': ('translation-type', 'table-name', 'parent-key', - 'id-col', 'key-col', 'val-col', 'translator', - 'parent-col-name', 'objects-extract-fn'), - 'list': ('translation-type', 'table-name', 'parent-key', - 'id-col', 'val-col', 'translator', 'parent-col-name', - 'objects-extract-fn', 'parent-key-desc', 'val-col-desc'), - } - - actual_params = { - 'hdict': datasource_driver.DataSourceDriver.HDICT_PARAMS, - 'vdict': datasource_driver.DataSourceDriver.VDICT_PARAMS, - 'list': datasource_driver.DataSourceDriver.LIST_PARAMS, - } - - for key, params in actual_params.items(): - expected = expected_params[key] - self.assertEqual(params, expected) - - def test_in_list_results_hdict_hdict(self): - ports_fixed_ips_translator = { - 'translation-type': 'HDICT', - 'table-name': 'fixed-ips', - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'ip_address', 'translator': self.val_trans}, - {'fieldname': 'subnet_id', 'translator': self.val_trans})} - - ports_translator = { - 'translation-type': 'HDICT', - 'table-name': 'ports', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'fixed_ips', - 'translator': ports_fixed_ips_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(ports_translator) - ports = [{'id': '12345', - 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': 'aa'}, - {'ip_address': '2.2.2.2', 'subnet_id': 'bb'}]}] - row_data = driver.convert_objs(ports, ports_translator) - expected = [('fixed-ips', ('12345', '1.1.1.1', 'aa')), - ('fixed-ips', ('12345', '2.2.2.2', 'bb')), - ('ports', ('12345',))] - self.assertEqual(row_data, expected) - - def test_getting_parent_key_from_nested_tables(self): - level3_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level3', - 'parent-key': 'parent_key', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'level3_thing', 'translator': self.val_trans},)} - - level2_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level2', - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'thing', 'translator': self.val_trans}, - {'fieldname': 'level3', - 'translator': level3_translator})} - - level1_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'level2', - 'translator': level2_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(level1_translator) - data = [ - {'id': 11, 'level2': - {'thing': 'blah!', 'level3': [{'level3_thing': '12345'}]}}] - - row_data = driver.convert_objs(data, level1_translator) - expected = [('level3', (11, '12345')), - ('level2', (11, 'blah!')), - ('level1', (11,))] - self.assertEqual(row_data, expected) - - def test_parent_col_name_in_hdict(self): - level2_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level2', - 'parent-key': 'id', - 'parent-col-name': 'level1_id', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'thing', 'translator': self.val_trans},)} - - level1_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'level2', - 'translator': level2_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - datasource_driver.DataSourceDriver.TRANSLATORS = [level1_translator] - driver.register_translator(level1_translator) - # test schema - schema = driver.get_schema() - expected = {'level1': ({'name': 'id', 'desc': None},), - 'level2': ({'name': 'level1_id', 'desc': None}, - {'name': 'thing', 'desc': None})} - self.assertEqual(schema, expected) - - # test data - data = [{'id': 11, 'level2': {'thing': 'blah!'}}] - row_data = driver.convert_objs(data, level1_translator) - expected = [('level2', (11, 'blah!')), ('level1', (11,))] - self.assertEqual(row_data, expected) - - def test_parent_col_name_in_vdict(self): - level2_translator = { - 'translation-type': 'VDICT', - 'table-name': 'level2', - 'parent-key': 'id', - 'key-col': 'id', - 'val-col': 'value', - 'parent-col-name': 'level1_id', - 'translator': self.val_trans} - - level1_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'level2', - 'translator': level2_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - datasource_driver.DataSourceDriver.TRANSLATORS = [level1_translator] - # test schema - schema = driver.get_schema() - expected = {'level1': ({'name': 'id', 'desc': None},), - 'level2': ('level1_id', 'id', 'value')} - self.assertEqual(expected, schema) - - # test data - data = [{'id': 11, 'level2': {'thing': 'blah!'}}] - row_data = driver.convert_objs(data, level1_translator) - expected = [('level2', (11, 'thing', 'blah!')), ('level1', (11,))] - self.assertEqual(row_data, expected) - - def test_parent_col_name_in_list(self): - level2_translator = { - 'translation-type': 'LIST', - 'table-name': 'level2', - 'parent-key': 'id', - 'parent-key-desc': 'level1_parent-desc', - 'parent-col-name': 'level1_id', - 'val-col': 'level_1_data', - 'val-col-desc': 'level_1_desc', - 'translator': self.val_trans} - - level1_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'level2', - 'translator': level2_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - datasource_driver.DataSourceDriver.TRANSLATORS = [level1_translator] - # test schema - schema = driver.get_schema() - expected = {'level1': ({'name': 'id', 'desc': None},), - 'level2': ({'name': 'level1_id', - 'desc': 'level1_parent-desc'}, - {'name': 'level_1_data', - 'desc': 'level_1_desc'})} - self.assertEqual(expected, schema) - - # test data - data = [{'id': 11, 'level2': ['thing']}] - row_data = driver.convert_objs(data, level1_translator) - expected = [('level2', (11, 'thing')), ('level1', (11,))] - self.assertEqual(row_data, expected) - - def test_check_for_duplicate_table_names_hdict_list(self): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'table1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'tags', - 'translator': {'translation-type': 'LIST', - 'table-name': 'table1', - 'val-col': 'tag', - 'translator': self.val_trans}},)} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.DuplicateTableName, - driver.register_translator, - translator) - - def test_check_for_duplicate_table_names_nested_list_list(self): - # Test a LIST containing a LIST with the same table name. - translator = {'translation-type': 'LIST', 'table-name': 'testtable', - 'id-col': 'id_col', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'testtable', - 'id-col': 'id', 'val-col': 'val', - 'translator': self.val_trans}} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.DuplicateTableName, - driver.register_translator, translator) - - def test_check_for_duplicate_table_names_in_different_translator(self): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'table1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'tags', - 'translator': {'translation-type': 'LIST', - 'table-name': 'table2', - 'val-col': 'tag', - 'translator': self.val_trans}},)} - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(translator) - self.assertRaises(exception.DuplicateTableName, - driver.register_translator, - translator) - - def test_check_for_duplicate_table_names_hdict_hdict(self): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'table1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'tags', - 'translator': {'translation-type': 'HDICT', - 'table-name': 'table1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'x', - 'translator': self.val_trans},)}},)} - - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.DuplicateTableName, - driver.register_translator, - translator) - - def test_invalid_translation_type(self): - translator = {'translation-type': 'YOYO', - 'table-name': 'table1'} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidTranslationType, - driver.register_translator, - translator) - - translator = {'translation-type': 'LIS', - 'table-name': 'table1'} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidTranslationType, - driver.register_translator, - translator) - - def test_no_key_col_in_vdict(self): - translator = {'translation-type': 'VDICT', - 'table-name': 'table1', - 'val-col': 'id-col'} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_no_val_col_in_vdict(self): - translator = {'translation-type': 'VDICT', - 'table-name': 'table1', - 'key-col': 'id-col'} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_no_val_col_in_list(self): - translator = {'translation-type': 'LIST', - 'table-name': 'table1'} - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_no_parent_key_id_col(self): - translator = {'translation-type': 'LIST', - 'table-name': 'table1', - 'id-col': 'id-col', - 'parent-key': 'parent_key_column'} - - # Test LIST - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - # Test HDICT - translator['translation-type'] = 'VDICT' - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - # Test HDICT - translator['translation-type'] = 'HDICT' - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_check_no_extra_params(self): - translator = {'translation-type': 'LIST', - 'table-name': 'table1', - 'id-col': 'id-col', - 'invalid_column': 'blah'} - - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_check_no_extra_params_nested_hdict(self): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'table1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'tags', - 'translator': {'translation-type': 'HDICT', - 'table-name': 'table2', - 'selector-type': 'DICT_SELECTOR', - 'invalid_column': 'yaya', - 'field-translators': - ({'fieldname': 'x', - 'translator': self.val_trans},)}},)} - - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_check_no_extra_params_nested_list_hdict(self): - translator = { - 'translation-type': 'LIST', - 'table-name': 'table1', - 'val-col': 'fixed_ips', - 'translator': { - 'table-name': 'table2', - 'invalid-column': 'hello_there!', - 'translation-type': 'HDICT', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'ip_address', - 'translator': self.val_trans},)}} - - driver = datasource_driver.DataSourceDriver('', None) - self.assertRaises(exception.InvalidParamException, - driver.register_translator, - translator) - - def test_convert_vdict_with_id(self): - # Test a single VDICT with an id column. - resp = {'a': 'FOO', 'b': 123} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = self.compute_hash((('a', 'FOO'), ('b', 123))) - - self.assertEqual(2, len(rows)) - self.assertEqual(k1, k) - self.assertIn(('testtable', (k, 'a', 'FOO')), rows) - self.assertIn(('testtable', (k, 'b', 123)), rows) - - def test_convert_vdict_without_id(self): - # Test a single VDICT without an id column. - resp = {'a': 'FOO', 'b': 123} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(2, len(rows)) - self.assertIsNone(k) - self.assertIn(('testtable', ('a', 'FOO')), rows) - self.assertIn(('testtable', ('b', 123)), rows) - - def test_convert_vdict_with_id_function(self): - # Test a single VDICT with an id column that is a function. - resp = {'a': 'FOO', 'b': 123} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': lambda obj: 'id:' + obj['a'], - 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = 'id:FOO' - - self.assertEqual(2, len(rows)) - self.assertEqual(k1, k) - self.assertIn(('testtable', (k, 'a', 'FOO')), rows) - self.assertIn(('testtable', (k, 'b', 123)), rows) - - def test_convert_vdict_list(self): - # Test a VDICT that contains lists. - resp = {'foo': (1, 2, 3), 'bar': ('a', 'b')} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable', - 'id-col': 'id_col', 'val-col': 'val_col', - 'translator': self.val_trans}} - rows, actual_k = datasource_driver.DataSourceDriver.convert_obj( - resp, translator) - - k1 = self.compute_hash((1, 2, 3)) - k2 = self.compute_hash(('a', 'b')) - k = self.compute_hash((('foo', k1), ('bar', k2))) - - self.assertEqual(7, len(rows)) - self.assertEqual(k, actual_k) - - self.assertIn(('subtable', (k1, 1)), rows) - self.assertIn(('subtable', (k1, 2)), rows) - self.assertIn(('subtable', (k1, 3)), rows) - self.assertIn(('subtable', (k2, 'a')), rows) - self.assertIn(('subtable', (k2, 'b')), rows) - self.assertIn(('testtable', (k, 'foo', k1)), rows) - self.assertIn(('testtable', (k, 'bar', k2)), rows) - - def test_convert_vdict_is_none(self): - # Test a single VDICT with an id column. - resp = None - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - self.assertIsNone(rows) - - def test_convert_list_with_id(self): - # Test a single LIST with an id_column - resp = (1, 'a', 'b', True) - translator = {'translation-type': 'LIST', 'table-name': 'testtable', - 'id-col': 'id_col', 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = self.compute_hash((1, 'a', 'b', 'True')) - - self.assertEqual(4, len(rows)) - self.assertEqual(k1, k) - self.assertIn(('testtable', (k, 1)), rows) - self.assertIn(('testtable', (k, 'a')), rows) - self.assertIn(('testtable', (k, 'b')), rows) - self.assertIn(('testtable', (k, 'True')), rows) - - def test_convert_list_with_id_function(self): - # Test a single LIST with an id function - resp = (1, 'a', 'b', True) - translator = {'translation-type': 'LIST', 'table-name': 'testtable', - 'id-col': lambda obj: obj[0], 'val-col': 'value', - 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = 1 - - self.assertEqual(4, len(rows)) - self.assertEqual(k1, k) - self.assertIn(('testtable', (k, 1)), rows) - self.assertIn(('testtable', (k, 'a')), rows) - self.assertIn(('testtable', (k, 'b')), rows) - self.assertIn(('testtable', (k, 'True')), rows) - - def test_convert_list_without_id(self): - # Test a single LIST without an id_column - resp = (1, 'a', 'b', True) - translator = {'translation-type': 'LIST', 'table-name': 'testtable', - 'val-col': 'value', 'translator': self.val_trans} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(4, len(rows)) - self.assertIsNone(k) - self.assertIn(('testtable', (1,)), rows) - self.assertIn(('testtable', ('a',)), rows) - self.assertIn(('testtable', ('b',)), rows) - self.assertIn(('testtable', ('True',)), rows) - - def test_convert_list_with_sublist(self): - # Test a single LIST with an id_column - resp = ((1, 2, 3), ('a', 'b', 'c'), (True, False)) - translator = {'translation-type': 'LIST', 'table-name': 'testtable', - 'id-col': 'id_col', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable', - 'id-col': 'id_col', 'val-col': 'val_col', - 'translator': self.val_trans}} - rows, actual_k = datasource_driver.DataSourceDriver.convert_obj( - resp, translator) - - k1 = self.compute_hash((1, 2, 3)) - k2 = self.compute_hash(('a', 'b', 'c')) - k3 = self.compute_hash(('True', 'False')) - k = self.compute_hash((k1, k2, k3)) - - self.assertEqual(11, len(rows)) - self.assertEqual(k, actual_k) - self.assertIn(('subtable', (k1, 1)), rows) - self.assertIn(('subtable', (k1, 2)), rows) - self.assertIn(('subtable', (k1, 3)), rows) - self.assertIn(('subtable', (k2, 'a')), rows) - self.assertIn(('subtable', (k2, 'b')), rows) - self.assertIn(('subtable', (k2, 'c')), rows) - self.assertIn(('subtable', (k3, 'True')), rows) - self.assertIn(('subtable', (k3, 'False')), rows) - self.assertIn(('testtable', (k, k1)), rows) - self.assertIn(('testtable', (k, k2)), rows) - self.assertIn(('testtable', (k, k3)), rows) - - def test_convert_recursive_hdict_single_fields(self): - # Test simple fields inside of an HDICT - resp = util.ResponseObj({'testfield1': 'FOO', - 'testfield2': 123}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': self.val_trans}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': self.val_trans})} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(1, len(rows)) - self.assertIsNone(k) - self.assertEqual([('testtable', ('FOO', 123))], rows) - - def test_convert_recursive_hdict_single_fields_empty_fields(self): - # Test simple fields inside of an HDICT where the translator - # interprests a non-present field as None. - resp = util.ResponseObj({'testfield1': 'FOO'}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': self.val_trans}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': self.val_trans})} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(1, len(rows)) - self.assertIsNone(k) - self.assertEqual([('testtable', ('FOO', 'None'))], rows) - - def test_convert_recursive_hdict_single_fields_default_col(self): - # Test simple fields inside of an HDICT using the default col name. - - resp = util.ResponseObj({'testfield1': 'FOO'}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', - 'translator': self.val_trans},)} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(1, len(rows)) - self.assertIsNone(k) - self.assertEqual([('testtable', ('FOO',))], rows) - - def test_convert_recursive_hdict_extract_subfields(self): - # Test simple fields inside of an HDICT - # Also tests with and without extract-fn. - field = util.ResponseObj({'b': 123}) - resp = util.ResponseObj({'testfield1': {'a': 'FOO'}, - 'testfield2': field, - 'testfield3': 456}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': {'translation-type': 'VALUE', - 'extract-fn': lambda x: x['a']}}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': {'translation-type': 'VALUE', - 'extract-fn': lambda x: x.b}}, - {'fieldname': 'testfield3', 'col': 'col3', - 'translator': {'translation-type': 'VALUE'}})} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(1, len(rows)) - self.assertEqual(self.compute_hash(('FOO', 123, 456)), k) - self.assertEqual([('testtable', (k, 'FOO', 123, 456))], rows) - - def test_convert_recursive_hdict_sublists(self): - # Test sublists inside of an HDICT - resp = util.ResponseObj({'testfield1': ('FOO', 'BAR'), - 'testfield2': (1, 2, 3)}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable1', - 'id-col': 'id', 'val-col': 'value', - 'translator': self.val_trans}}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable2', - 'id-col': 'id', 'val-col': 'value', - 'translator': self.val_trans}})} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = self.compute_hash(('FOO', 'BAR')) - k2 = self.compute_hash((1, 2, 3)) - - self.assertIsNone(k) - self.assertEqual(6, len(rows)) - self.assertIn(('subtable1', (k1, 'FOO')), rows) - self.assertIn(('subtable1', (k1, 'BAR')), rows) - self.assertIn(('subtable2', (k2, 1)), rows) - self.assertIn(('subtable2', (k2, 2)), rows) - self.assertIn(('subtable2', (k2, 3)), rows) - self.assertIn(('testtable', (k1, k2)), rows) - - def test_convert_recursive_hdict_vdict(self): - # Test translator of an VDICT inside of an HDICT - resp = util.ResponseObj({'testfield1': {'a': 123, 'b': 456}, - 'testfield2': {'c': 'abc', 'd': 'def'}}) - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': {'translation-type': 'VDICT', - 'table-name': 'subtable1', - 'id-col': 'id', 'key-col': 'key', - 'val-col': 'value', - 'translator': self.val_trans}}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': {'translation-type': 'VDICT', - 'table-name': 'subtable2', - 'id-col': 'id', 'key-col': 'key', - 'val-col': 'value', - 'translator': self.val_trans}})} - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = self.compute_hash((('a', 123), ('b', 456))) - k2 = self.compute_hash((('c', 'abc'), ('d', 'def'))) - - self.assertIsNone(k) - self.assertEqual(5, len(rows)) - self.assertIn(('subtable1', (k1, 'a', 123)), rows) - self.assertIn(('subtable1', (k1, 'b', 456)), rows) - self.assertIn(('subtable2', (k2, 'c', 'abc')), rows) - self.assertIn(('subtable2', (k2, 'd', 'def')), rows) - self.assertIn(('testtable', (k1, k2)), rows) - - def test_convert_recursive_hdict_hdict(self): - # Test translator of an HDICT inside of an HDICT. - resp = util.ResponseObj({'testfield1': {'a': 123, 'b': 456}, - 'testfield2': {'c': 'abc', 'd': 'def'}}) - - subtranslator_1 = {'translation-type': 'HDICT', - 'table-name': 'subtable1', - 'selector-type': 'DICT_SELECTOR', - 'id-col': 'id', - 'field-translators': ( - {'fieldname': 'a', - 'col': 'a1', - 'translator': self.val_trans}, - {'fieldname': 'b', - 'col': 'b1', - 'translator': self.val_trans})} - - subtranslator_2 = {'translation-type': 'HDICT', - 'table-name': 'subtable2', - 'selector-type': 'DICT_SELECTOR', - 'id-col': 'id', - 'field-translators': ( - {'fieldname': 'c', - 'col': 'c1', - 'translator': self.val_trans}, - {'fieldname': 'd', - 'col': 'd1', - 'translator': self.val_trans})} - - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', 'col': 'col1', - 'translator': subtranslator_1}, - {'fieldname': 'testfield2', 'col': 'col2', - 'translator': subtranslator_2})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - k1 = self.compute_hash((123, 456)) - k2 = self.compute_hash(('abc', 'def')) - - self.assertIsNone(k) - self.assertEqual(3, len(rows)) - self.assertIn(('subtable1', (k1, 123, 456)), rows) - self.assertIn(('subtable2', (k2, 'abc', 'def')), rows) - self.assertIn(('testtable', (k1, k2)), rows) - - def test_convert_hdict_hdict_parent_key_without_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = {'foreign-key': 100, 'foo': {'f1': 123}} - subtranslator = {'translation-type': 'HDICT', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': ({'fieldname': 'f1', - 'translator': self.val_trans}, - )} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': subtranslator})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(2, len(rows)) - self.assertIsNone(k) - - self.assertIn(('subtable', (100, 123)), rows) - self.assertIn(('testtable', (100,)), rows) - - def test_convert_hdict_hdict_parent_key_with_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = {'foreign-key': 100, 'foo': {'f1': 123}} - subtranslator = {'translation-type': 'HDICT', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': ({'fieldname': 'f1', - 'translator': self.val_trans}, - )} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DICT_SELECTOR', 'id-col': 'id', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': subtranslator})} - - rows, actual_k = datasource_driver.DataSourceDriver.convert_obj( - resp, translator) - - k = self.compute_hash((100,)) - self.assertEqual(2, len(rows)) - self.assertEqual(k, actual_k) - - self.assertIn(('subtable', (100, 123)), rows) - self.assertIn(('testtable', (k, 100,)), rows) - - def test_convert_hdict_vdict_parent_key_without_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = util.ResponseObj({'foreign-key': 100, - 'foo': {'f1': 123, 'f2': 456}}) - subtranslator = {'translation-type': 'VDICT', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'key-col': 'key_col', - 'val-col': 'val_col', - 'translator': self.val_trans} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': subtranslator})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(3, len(rows)) - self.assertIsNone(k) - - self.assertIn(('subtable', (100, 'f1', 123)), rows) - self.assertIn(('subtable', (100, 'f2', 456)), rows) - self.assertIn(('testtable', (100,)), rows) - - def test_convert_hdict_vdict_parent_key_with_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = util.ResponseObj({'foreign-key': 100, - 'foo': {'f1': 123, 'f2': 456}}) - list_translator = {'translation-type': 'VDICT', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'key-col': 'key_col', - 'val-col': 'val_col', - 'translator': self.val_trans} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'id-col': 'id', 'selector-type': 'DOT_SELECTOR', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': list_translator})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(3, len(rows)) - self.assertEqual(self.compute_hash((100,)), k) - - self.assertIn(('subtable', (100, 'f1', 123)), rows) - self.assertIn(('subtable', (100, 'f2', 456)), rows) - self.assertIn(('testtable', (k, 100)), rows) - - def test_convert_hdict_list_parent_key_without_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = util.ResponseObj({'foreign-key': 100, 'foo': (1, 2)}) - list_translator = {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'val-col': 'val_col', - 'translator': self.val_trans} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': list_translator})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(3, len(rows)) - self.assertIsNone(k) - - self.assertIn(('subtable', (100, 1)), rows) - self.assertIn(('subtable', (100, 2)), rows) - self.assertIn(('testtable', (100,)), rows) - - def test_convert_hdict_list_parent_key_with_id(self): - # Test a HDICT that contains lists using a parent_key. - resp = util.ResponseObj({'foreign-key': 100, 'foo': (1, 2)}) - list_translator = {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'foreign-key', - 'val-col': 'val_col', - 'translator': self.val_trans} - translator = {'translation-type': 'HDICT', 'table-name': 'testtable', - 'id-col': 'id', 'selector-type': 'DOT_SELECTOR', - 'field-translators': ({'fieldname': 'foreign-key', - 'translator': self.val_trans}, - {'fieldname': 'foo', - 'translator': list_translator})} - - rows, k = datasource_driver.DataSourceDriver.convert_obj(resp, - translator) - - self.assertEqual(3, len(rows)) - self.assertEqual(self.compute_hash((100,)), k) - - self.assertIn(('subtable', (100, 1)), rows) - self.assertIn(('subtable', (100, 2)), rows) - self.assertIn(('testtable', (k, 100)), rows) - - def test_convert_vdict_list_parent_key_without_id(self): - # Test a VDICT that contains lists using a parent_key. - resp = {'foo': (1, 2, 3), 'bar': ('a', 'b')} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'key', - 'val-col': 'val_col', - 'translator': self.val_trans}} - rows, actual_k = datasource_driver.DataSourceDriver.convert_obj( - resp, translator) - - self.assertEqual(7, len(rows)) - self.assertIsNone(actual_k) - - self.assertIn(('subtable', ('foo', 1)), rows) - self.assertIn(('subtable', ('foo', 2)), rows) - self.assertIn(('subtable', ('foo', 3)), rows) - self.assertIn(('subtable', ('bar', 'a')), rows) - self.assertIn(('subtable', ('bar', 'b')), rows) - self.assertIn(('testtable', ('foo',)), rows) - self.assertIn(('testtable', ('bar',)), rows) - - def test_convert_vdict_list_parent_key_with_id(self): - # Test a VDICT that contains lists using a parent_key. - resp = {'foo': (1, 2, 3), 'bar': ('a', 'b')} - translator = {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'key', - 'val-col': 'val_col', - 'translator': self.val_trans}} - rows, actual_k = datasource_driver.DataSourceDriver.convert_obj( - resp, translator) - - k = self.compute_hash((('foo',), ('bar',))) - - self.assertEqual(7, len(rows)) - self.assertEqual(k, actual_k) - - self.assertIn(('subtable', ('foo', 1)), rows) - self.assertIn(('subtable', ('foo', 2)), rows) - self.assertIn(('subtable', ('foo', 3)), rows) - self.assertIn(('subtable', ('bar', 'a')), rows) - self.assertIn(('subtable', ('bar', 'b')), rows) - self.assertIn(('testtable', (k, 'foo')), rows) - self.assertIn(('testtable', (k, 'bar')), rows) - - def test_convert_bad_params(self): - def verify_invalid_params(translator, err_msg): - args = helper.datasource_openstack_args() - driver = datasource_driver.DataSourceDriver('', args=args) - try: - driver.register_translator(translator) - except exception.InvalidParamException as e: - self.assertIn(err_msg, str(e)) - else: - self.fail("Expected InvalidParamException but got none") - - # Test an invalid translation-type. - verify_invalid_params( - {'translation-typeXX': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans}, - 'Param (translation-type) must be in translator') - - # Test invalid HDICT params - verify_invalid_params( - {'translation-type': 'HDICT', 'table-nameXX': 'testtable', - 'id-col': 'id_col', 'selector-type': 'DOT_SELECTOR', - 'field-translators': ({'fieldname': 'abc', - 'translator': self.val_trans},)}, - 'Params (table-nameXX) are invalid') - - # Test invalid HDICT field translator params - verify_invalid_params( - {'translation-type': 'HDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'abc', - 'translator': {'translation-typeXX': 'VALUE'}},)}, - 'Param (translation-type) must be in translator') - - # Test invalid HDICT field translator params - verify_invalid_params( - {'translation-type': 'HDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'selector-type': 'DOT_SELECTOR', - 'field-translators': - ({'fieldname': 'abc', - 'translator': {'translation-type': 'VALUE', - 'XX': 123}},)}, - 'Params (XX) are invalid') - - # Test invalid VDICT params - verify_invalid_params( - {'translation-type': 'VDICT', 'table-nameXX': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': self.val_trans}, - 'Params (table-nameXX) are invalid') - - # Test invalid VDICT sub translator params - verify_invalid_params( - {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-typeXX': 'VALUE'}}, - 'Param (translation-type) must be in translator') - - # Test invalid VDICT sub translator params - verify_invalid_params( - {'translation-type': 'VDICT', 'table-nameXX': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-type': 'VALUE', 'XX': 123}}, - 'Params (table-nameXX) are invalid') - - # Test invalid LIST params - verify_invalid_params( - {'translation-type': 'LIST', 'table-nameXX': 'testtable', - 'id-col': 'id_col', 'val-col': 'value', - 'translator': self.val_trans}, - 'Params (table-nameXX) are invalid') - - # Test invalid LIST sub translator params - verify_invalid_params( - {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-typeXX': 'VALUE'}}, - 'Param (translation-type) must be in translator') - - # Test invalid LIST sub translator params - verify_invalid_params( - {'translation-type': 'VDICT', 'table-name': 'testtable', - 'id-col': 'id_col', 'key-col': 'key', 'val-col': 'value', - 'translator': {'translation-type': 'VALUE', 'XX': 123}}, - 'Params (XX) are invalid') - - def test_get_schema(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'testtable', - 'selector-type': 'DOT_SELECTOR', - 'field-translators': ( - {'fieldname': 'testfield1', - 'col': 'parent_col1', - 'translator': {'translation-type': 'HDICT', - 'table-name': 'subtable1', - 'id-col': 'id1', - 'field-translators': ( - {'fieldname': 'a', - 'col': 'a1', - 'translator': self.val_trans}, - {'fieldname': 'b', - 'col': 'b1', - 'translator': self.val_trans})}}, - {'fieldname': 'testfield2', - 'translator': {'translation-type': 'HDICT', - 'table-name': 'subtable2', - 'id-col': 'id2', - 'field-translators': ( - {'fieldname': 'c', - 'col': 'c1', - 'translator': self.val_trans}, - {'fieldname': 'd', - 'col': 'd1', - 'translator': self.val_trans})}}, - {'fieldname': 'ztestfield3', 'col': 'zparent_col3', - 'translator': self.val_trans}, - {'fieldname': 'testfield4', 'col': 'parent_col4', - 'translator': {'translation-type': 'VALUE', - 'extract-fn': lambda x: x.id}}, - {'fieldname': 'testfield5', 'col': 'parent_col5', - 'translator': {'translation-type': 'VDICT', - 'table-name': 'subtable3', 'id-col': 'id3', - 'key-col': 'key3', 'val-col': 'value3', - 'translator': self.val_trans}}, - {'fieldname': 'testfield6', 'col': 'parent_col6', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable4', 'id-col': 'id4', - 'val-col': 'value4', - 'translator': self.val_trans}}, - {'fieldname': 'testfield7', 'col': 'parent_col7', - 'translator': {'translation-type': 'VDICT', - 'table-name': 'subtable5', - 'key-col': 'key5', 'val-col': 'value5', - 'translator': self.val_trans}}, - {'fieldname': 'testfield8', 'col': 'parent_col8', - 'translator': {'translation-type': 'LIST', - 'table-name': 'subtable6', - 'val-col': 'value6', - 'translator': self.val_trans}})} - - TRANSLATORS = [translator] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - schema = TestDriver().get_schema() - self.assertEqual(7, len(schema)) - - self.assertEqual(({'name': 'id1', 'desc': None}, - {'name': 'a1', 'desc': None}, - {'name': 'b1', 'desc': None}), schema['subtable1']) - self.assertEqual(({'name': 'id2', 'desc': None}, - {'name': 'c1', 'desc': None}, - {'name': 'd1', 'desc': None}), schema['subtable2']) - self.assertEqual(('id3', 'key3', 'value3'), schema['subtable3']) - self.assertEqual( - ({'name': 'id4', 'desc': None}, - {'name': 'value4', 'desc': None}), schema['subtable4']) - self.assertEqual(('key5', 'value5'), schema['subtable5']) - self.assertEqual(({'name': 'value6', - 'desc': None},), schema['subtable6']) - self.assertEqual( - ({'name': 'parent_col1', 'desc': None}, - {'name': 'testfield2', 'desc': None}, - {'name': 'zparent_col3', 'desc': None}, - {'name': 'parent_col4', 'desc': None}, - {'name': 'parent_col5', 'desc': None}, - {'name': 'parent_col6', 'desc': None}, - {'name': 'parent_col7', 'desc': None}, - {'name': 'parent_col8', 'desc': None}), schema['testtable']) - - def test_get_schema_with_table_reuse(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator = {'translation-type': 'LIST', - 'table-name': 'testtable', - 'id-col': 'id_col', 'val-col': 'value', - 'translator': {'translation-type': 'LIST', - 'table-name': 'testtable', - 'id-col': 'id', 'val-col': 'val', - 'translator': self.val_trans}} - - TRANSLATORS = [translator] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - try: - TestDriver().get_schema() - except exception.DuplicateTableName as e: - self.assertIn('table (testtable) used twice', str(e)) - else: - self.fail("Expected InvalidParamException but got none") - - def test_get_schema_with_hdict_parent(self): - class TestDriver(datasource_driver.DataSourceDriver): - subtranslator = {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'id', 'val-col': 'val', - 'translator': self.val_trans} - - translator = {'translation-type': 'HDICT', - 'table-name': 'testtable', - 'id-col': 'id_col', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': ({'fieldname': 'unique_key', - 'translator': self.val_trans}, - {'fieldname': 'sublist', - 'translator': subtranslator})} - - TRANSLATORS = [translator] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - schema = TestDriver().get_schema() - - self.assertEqual(2, len(schema)) - self.assertEqual( - ({'name': 'id_col', 'desc': None}, - {'name': 'unique_key', 'desc': None}), schema['testtable']) - self.assertEqual( - ({'name': 'parent_key', 'desc': None}, - {'name': 'val', 'desc': None}), schema['subtable']) - - def test_get_schema_with_hdict_id_function(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator = { - 'translation-type': 'HDICT', - 'table-name': 'testtable', - 'id-col': lambda obj: obj, - 'selector-type': 'DICT_SELECTOR', - 'field-translators': ({'fieldname': 'field1', - 'desc': 'test-field-1', - 'translator': self.val_trans}, - {'fieldname': 'field2', - 'desc': 'test-field-2', - 'translator': self.val_trans})} - - TRANSLATORS = [translator] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - schema = TestDriver().get_schema() - - self.assertEqual(1, len(schema)) - self.assertEqual( - ({'name': 'id-col', 'desc': None}, - {'name': 'field1', 'desc': 'test-field-1'}, - {'name': 'field2', 'desc': 'test-field-2'}), schema['testtable']) - - def test_get_schema_with_vdict_parent(self): - class TestDriver(datasource_driver.DataSourceDriver): - subtranslator = {'translation-type': 'LIST', - 'table-name': 'subtable', - 'parent-key': 'id_col', 'val-col': 'val', - 'translator': self.val_trans} - - translator = {'translation-type': 'VDICT', - 'table-name': 'testtable', - 'id-col': 'id_col', - 'key-col': 'key', - 'val-col': 'val', - 'translator': subtranslator} - - TRANSLATORS = [translator] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - schema = TestDriver().get_schema() - - self.assertEqual(2, len(schema)) - self.assertEqual(('id_col', 'key'), schema['testtable']) - self.assertEqual( - ({'name': 'parent_key', 'desc': None}, - {'name': 'val', 'desc': None}), schema['subtable']) - - def test_get_tablename(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator1 = { - 'translation-type': 'HDICT', - 'table-name': 'table-name1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'col1', 'translator': self.val_trans}, - {'fieldname': 'col2', 'translator': self.val_trans}) - } - TRANSLATORS = [translator1] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - expected_ret = 'table-name1' - ret = TestDriver().get_tablename('table-name1') - self.assertEqual(expected_ret, ret) - - def test_get_tablenames(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator1 = { - 'translation-type': 'HDICT', - 'table-name': 'table-name1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'col1', 'translator': self.val_trans}, - {'fieldname': 'col2', 'translator': self.val_trans}) - } - translator2 = { - 'translation-type': 'HDICT', - 'table-name': 'table-name2', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'col1', 'translator': self.val_trans}, - {'fieldname': 'col2', 'translator': self.val_trans}) - } - - TRANSLATORS = [translator1, translator2] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - expected_ret = ['table-name1', 'table-name2'] - ret = TestDriver().get_tablenames() - self.assertEqual(set(expected_ret), set(ret)) - - def test_get_row_data(self): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - - test_driver = TestDriver() - test_driver.state = {'fake_table': [('d1', 'd2'), ('d3', 'd4')]} - result = test_driver.get_row_data('fake_table') - expected = [{'data': ('d1', 'd2')}, - {'data': ('d3', 'd4')}] - self.assertItemsEqual(expected, result) - - def test_nested_get_tables(self): - class TestDriver(datasource_driver.DataSourceDriver): - translator2 = { - 'translation-type': 'HDICT', - 'table-name': 'table-name2', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'col1', 'translator': self.val_trans}, - {'fieldname': 'col2', 'translator': self.val_trans}) - } - - translator1 = { - 'translation-type': 'HDICT', - 'table-name': 'table-name1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'col1', 'translator': self.val_trans}, - {'fieldname': 'col2', 'translator': translator2}) - } - - TRANSLATORS = [translator1] - - def __init__(self): - super(TestDriver, self).__init__('', None) - - expected_ret = ['table-name1', 'table-name2'] - ret = TestDriver().get_tablenames() - self.assertEqual(set(expected_ret), set(ret)) - - def test_update_state_on_changed(self): - mocked_self = mock.MagicMock() - mocked_self.raw_state = dict() - resource = 'fake_resource' - - @datasource_utils.update_state_on_changed(resource) - def _translate_raw_data(_self, raw_data): - return mock.sentinel.translated_data - - result = _translate_raw_data(mocked_self, mock.sentinel.raw_data) - - self.assertEqual(mock.sentinel.translated_data, result) - self.assertEqual(mock.sentinel.raw_data, - mocked_self.raw_state[resource]) - mocked_self._update_state.assert_called_once_with( - resource, mock.sentinel.translated_data) - - # raw data is not changed, don't translate anything. - result = _translate_raw_data(mocked_self, mock.sentinel.raw_data) - - self.assertEqual([], result) - self.assertEqual(mock.sentinel.raw_data, - mocked_self.raw_state[resource]) - mocked_self._update_state.assert_called_once_with( - resource, mock.sentinel.translated_data) - - def test_update_state_on_changed_with_changed_raw_data(self): - mocked_self = mock.MagicMock() - mocked_self.raw_state = dict() - resource = 'fake_resource' - mocked_self.raw_state[resource] = mock.sentinel.last_data - - @datasource_utils.update_state_on_changed(resource) - def _translate_raw_data(_self, raw_data): - return mock.sentinel.translated_data - - result = _translate_raw_data(mocked_self, mock.sentinel.new_data) - - self.assertEqual(mock.sentinel.translated_data, result) - self.assertEqual(mock.sentinel.new_data, - mocked_self.raw_state[resource]) - mocked_self._update_state.assert_called_once_with( - resource, mock.sentinel.translated_data) - - def test_update_state_on_changed_with_empty_raw_data(self): - mocked_self = mock.MagicMock() - mocked_self.raw_state = dict() - resource = 'fake_resource' - mocked_self.raw_state[resource] = mock.sentinel.last_data - - @datasource_utils.update_state_on_changed(resource) - def _translate_raw_data(_self, raw_data): - return [] - - result = _translate_raw_data(mocked_self, []) - - self.assertEqual([], result) - self.assertEqual([], mocked_self.raw_state[resource]) - mocked_self._update_state.assert_called_once_with(resource, []) - - # The test case should be removed, once oslo-incubator bug/1499369 is - # resolved. - def test_update_state_on_changed_with_wrong_eq(self): - class EqObject(object): - def __eq__(self, other): - return True - - mocked_self = mock.MagicMock() - mocked_self.raw_state = dict() - resource = 'fake_resource' - cached_data = EqObject() - mocked_self.raw_state[resource] = [cached_data] - - @datasource_utils.update_state_on_changed(resource) - def _translate_raw_data(_self, raw_data): - return [] - - new_data = EqObject() - _translate_raw_data(mocked_self, [new_data]) - mocked_self._update_state.assert_called_once_with(resource, []) - self.assertIs(new_data, mocked_self.raw_state[resource][0]) - - def test_update_state(self): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - - test_driver = TestDriver() - test_driver.state = {'fake_table': set(), 'foo_table': set(), - 'unchanged_table': {mock.sentinel.data}} - test_driver._table_deps = {'fake_table': ['fake_table', 'foo_table'], - 'unchanged_table': ['unchanged_table']} - - row_data = [('fake_table', mock.sentinel.data1), - ('fake_table', mock.sentinel.data2), - ('foo_table', mock.sentinel.data3)] - expected_state = {'fake_table': {mock.sentinel.data1, - mock.sentinel.data2}, - 'foo_table': {mock.sentinel.data3}, - 'unchanged_table': {mock.sentinel.data}} - - test_driver._update_state('fake_table', row_data) - - self.assertEqual(expected_state, test_driver.state) - - def test_update_state_with_undefined_table(self): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - - test_driver = TestDriver() - test_driver.state = {'fake_table': set(), 'foo_table': set()} - test_driver._table_deps = {'fake_table': ['fake_table', 'foo_table']} - - row_data = [('fake_table', mock.sentinel.data1), - ('foo_table', mock.sentinel.data2), - ('undefined_table', mock.sentinel.data3)] - expected_state = {'fake_table': {mock.sentinel.data1}, - 'foo_table': {mock.sentinel.data2}} - - test_driver._update_state('fake_table', row_data) - - self.assertEqual(expected_state, test_driver.state) - - def test_update_state_with_none_row_data(self): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - - test_driver = TestDriver() - test_driver.state = {'fake_table': {mock.sentinel.data1}, - 'foo_table': {mock.sentinel.data2}} - test_driver._table_deps = {'fake_table': ['fake_table', 'foo_table']} - - expected_state = {'fake_table': set(), 'foo_table': set()} - test_driver._update_state('fake_table', []) - - self.assertEqual(expected_state, test_driver.state) - - def test_update_state_with_part_none_row_data(self): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - - test_driver = TestDriver() - test_driver.state = {'fake_table': set(), - 'foo_table': {mock.sentinel.data3}} - test_driver._table_deps = {'fake_table': ['fake_table', 'foo_table']} - - row_data = [('fake_table', mock.sentinel.data1), - ('fake_table', mock.sentinel.data2)] - expected_state = {'fake_table': {mock.sentinel.data1, - mock.sentinel.data2}, - 'foo_table': set()} - - test_driver._update_state('fake_table', row_data) - - self.assertEqual(expected_state, test_driver.state) - - def test_build_table_deps(self): - level10_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level10', - 'parent-key': 'parent_key', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'level3_thing', 'translator': self.val_trans},)} - - level3_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level3', - 'parent-key': 'parent_key', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'field-translators': - ({'fieldname': 'level3_thing', 'translator': self.val_trans},)} - - level2_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level2', - 'parent-key': 'id', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'thing', 'translator': self.val_trans}, - {'fieldname': 'level3', - 'translator': level3_translator})} - - level1_translator = { - 'translation-type': 'HDICT', - 'table-name': 'level1', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'level2', - 'translator': level2_translator})} - - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(level1_translator) - driver.register_translator(level10_translator) - expected_table_deps = {'level1': ['level1', 'level2', 'level3'], - 'level10': ['level10']} - self.assertEqual(expected_table_deps, driver._table_deps) - - @mock.patch.object(eventlet, 'spawn') - def test_init_consistence_with_exception(self, mock_spawn): - class TestDriver(datasource_driver.DataSourceDriver): - def __init__(self): - super(TestDriver, self).__init__('', None) - self.do_something() - self._init_end_start_poll() - - def do_something(self): - pass - - with mock.patch.object(TestDriver, "do_something", - side_effect=Exception()): - test_driver = None - try: - test_driver = TestDriver() - self.fail("Exception should be raised") - except Exception: - self.assertEqual(0, mock_spawn.call_count) - self.assertIsNone(test_driver) - - def test_objects_extract_func(self): - def translate_json_str_to_list(objs): - result = [] - data_list = objs['result'] - for k, v in data_list.items(): - dict_obj = json.loads(v) - for key, value in dict_obj.items(): - obj = { - 'key': key, - 'value': value - } - result.append(obj) - - return result - - test_translator = { - 'translation-type': 'HDICT', - 'table-name': 'test', - 'selector-type': 'DICT_SELECTOR', - 'objects-extract-fn': translate_json_str_to_list, - 'field-translators': - ({'fieldname': 'key', 'translator': self.val_trans}, - {'fieldname': 'value', 'translator': self.val_trans}) - } - - objs = { - "result": { - "data1": """{"key1": "value1", "key2": "value2"}""", - } - } - - expected_ret = [('test', ('key1', 'value1')), - ('test', ('key2', 'value2'))] - - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(test_translator) - - ret = driver.convert_objs(objs, test_translator) - - for row in ret: - self.assertIn(row, expected_ret) - expected_ret.remove(row) - self.assertEqual([], expected_ret) - - def test_recursive_objects_extract_func(self): - def translate_json_str_to_list(objs): - result = [] - data_str = objs['data'] - dict_list = json.loads(data_str) - for key, value in dict_list.items(): - obj = { - 'key': key, - 'value': value - } - result.append(obj) - return result - - test_child_translator = { - 'translation-type': 'HDICT', - 'table-name': 'test-child', - 'parent-key': 'id', - 'parent-col-name': 'result', - 'selector-type': 'DICT_SELECTOR', - 'in-list': True, - 'objects-extract-fn': translate_json_str_to_list, - 'field-translators': - ({'fieldname': 'key', 'translator': self.val_trans}, - {'fieldname': 'value', 'translator': self.val_trans}) - } - - test_parent_translator = { - 'translation-type': 'HDICT', - 'table-name': 'test-parent', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': self.val_trans}, - {'fieldname': 'result', 'translator': test_child_translator}) - } - - expected_ret = [('test-parent', ('id-1', )), - ('test-child', ('id-1', 'key1', 'value1')), - ('test-child', ('id-1', 'key2', 'value2'))] - - objs = [{ - "id": "id-1", - "result": { - "data": """{"key1": "value1", "key2": "value2"}""", - } - }] - - driver = datasource_driver.DataSourceDriver('', None) - driver.register_translator(test_parent_translator) - - ret = driver.convert_objs(objs, test_parent_translator) - - for row in ret: - self.assertIn(row, expected_ret) - expected_ret.remove(row) - self.assertEqual([], expected_ret) - -# Old version -# class TestPollingDataSourceDriver(base.TestCase): -# class TestDriver(datasource_driver.PollingDataSourceDriver): -# def __init__(self): -# super(TestPollingDataSourceDriver.TestDriver, self).__init__( -# '', '', None, None, None) -# self._init_end_start_poll() - -# def setUp(self): -# super(TestPollingDataSourceDriver, self).setUp() - -# @mock.patch.object(eventlet, 'spawn') -# def test_init_consistence(self, mock_spawn): -# test_driver = TestPollingDataSourceDriver.TestDriver() -# mock_spawn.assert_called_once_with(test_driver.poll_loop, -# test_driver.poll_time) -# self.assertTrue(test_driver.initialized) -# self.assertIsNotNone(test_driver.worker_greenthread) - -# @mock.patch.object(eventlet.greenthread, 'kill') -# @mock.patch.object(eventlet, 'spawn') -# def test_cleanup(self, mock_spawn, mock_kill): -# dummy_thread = dict() -# mock_spawn.return_value = dummy_thread - -# test_driver = TestPollingDataSourceDriver.TestDriver() - -# self.assertEqual(test_driver.worker_greenthread, dummy_thread) - -# test_driver.cleanup() - -# mock_kill.assert_called_once_with(dummy_thread) -# self.assertIsNone(test_driver.worker_greenthread) - - -class TestPollingDataSourceDriver(base.TestCase): - class TestDriver(datasource_driver.PollingDataSourceDriver): - def __init__(self): - super(TestPollingDataSourceDriver.TestDriver, self).__init__() - self.node = 'node' - self._rpc_server = mock.MagicMock() - self._init_end_start_poll() - - def setUp(self): - super(TestPollingDataSourceDriver, self).setUp() - - @mock.patch.object(eventlet, 'spawn') - def test_init_consistence(self, mock_spawn): - test_driver = TestPollingDataSourceDriver.TestDriver() - mock_spawn.assert_not_called() - self.assertIsNone(test_driver.worker_greenthread) - test_driver.start() - mock_spawn.assert_called_once_with(test_driver.poll_loop, - test_driver.poll_time) - self.assertTrue(test_driver.initialized) - self.assertIsNotNone(test_driver.worker_greenthread) - - @mock.patch.object(eventlet.greenthread, 'kill') - @mock.patch.object(eventlet, 'spawn') - def test_cleanup(self, mock_spawn, mock_kill): - dummy_thread = mock.MagicMock() - mock_spawn.return_value = dummy_thread - - test_driver = TestPollingDataSourceDriver.TestDriver() - test_driver.start() - - self.assertEqual(test_driver.worker_greenthread, dummy_thread) - - test_driver.stop() - - mock_kill.assert_called_once_with(dummy_thread) - self.assertIsNone(test_driver.worker_greenthread) - - def test_evaluate_lazy_table(self): - args = {'lazy_tables': ['fake_table']} - test_driver = fake_datasource.FakeDataSource(args=args) - - self.assertNotIn('fake_table', test_driver._table_deps) - test_driver.update_from_datasource() - self.assertEqual(test_driver.update_number, 0) - - test_driver.get_snapshot('fake_table') - - self.assertIn('fake_table', test_driver._table_deps) - test_driver.update_from_datasource() - # update happens twice before the check. First one is in get_snapshot. - self.assertEqual(test_driver.update_number, 2) - - def test_add_update_method(self): - class TestDriver(datasource_driver.PollingDataSourceDriver): - test_translator = { - 'table-name': 'test' - } - - def __init__(self): - super(TestDriver, self).__init__('', None) - self.add_update_method(self.update_method, - self.test_translator) - - def update_method(self): - pass - - test_driver = TestDriver() - self.assertEqual(test_driver.update_methods['test'], - test_driver.update_method) - - def test_add_duplicated_update_method(self): - class TestDriver(datasource_driver.PollingDataSourceDriver): - test_translator = { - 'table-name': 'test' - } - - def __init__(self): - super(TestDriver, self).__init__('', None) - self.add_update_method(self.update_method, - self.test_translator) - - def update_method(self): - pass - - test_driver = TestDriver() - self.assertRaises(exception.Conflict, test_driver.add_update_method, - test_driver.update_method, - test_driver.test_translator) - - -class TestPushedDriver(base.SqlTestCase): - class TestDriver(datasource_driver.PushedDataSourceDriver): - value_trans = {'translation-type': 'VALUE'} - test_translator = { - 'translation-type': 'HDICT', - 'table-name': 'test_translator', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'status', 'translator': value_trans}) - } - - TRANSLATORS = [test_translator] - - def __init__(self, args=None): - super(TestPushedDriver.TestDriver, self).__init__('test-pushed', - args) - - def setUp(self): - super(TestPushedDriver, self).setUp() - - def test_init_push_driver(self): - test_driver = TestPushedDriver.TestDriver() - self.assertTrue(test_driver.initialized) - - @mock.patch.object(datasource_driver.DataSourceDriver, 'publish') - def test_push_entire_data(self, mock_publish): - test_driver = TestPushedDriver.TestDriver() - obj = [ - {'id': 1, 'name': 'column1', 'status': 'up'}, - {'id': 2, 'name': 'column2', 'status': 'down'} - ] - test_driver.update_entire_data('test_translator', obj) - expected_state = set([ - (1, 'column1', 'up'), - (2, 'column2', 'down')]) - - mock_publish.assert_called_with('test_translator', - test_driver.state['test_translator']) - self.assertEqual(expected_state, test_driver.state['test_translator']) - - @mock.patch.object(datasource_driver.DataSourceDriver, 'publish') - def test_persist_data(self, mock_publish): - ds_id = uuidutils.generate_uuid() - obj = [ - {'id': 1, 'name': 'column1', 'status': 'up'}, - {'id': 2, 'name': 'column2', 'status': 'down'} - ] - - # test no persist if not enabled - test_driver = TestPushedDriver.TestDriver( - args={'ds_id': ds_id, 'persist_data': False}) - test_driver.update_entire_data('test_translator', obj) - expected_state = set([ - (1, 'column1', 'up'), - (2, 'column2', 'down')]) - self.assertEqual(expected_state, test_driver.state['test_translator']) - self.assertEqual( - [], db_ds_table_data.get_ds_table_data(test_driver.ds_id)) - - # test data persisted in DB - test_driver = TestPushedDriver.TestDriver( - args={'ds_id': ds_id, 'persist_data': True}) - test_driver.update_entire_data('test_translator', obj) - expected_state = set([ - (1, 'column1', 'up'), - (2, 'column2', 'down')]) - self.assertEqual(expected_state, test_driver.state['test_translator']) - self.assertEqual( - [{'tablename': 'test_translator', 'tabledata': expected_state}], - db_ds_table_data.get_ds_table_data(test_driver.ds_id)) - - # test no restoring persisted data if not enabled - del test_driver - test_driver = TestPushedDriver.TestDriver( - args={'ds_id': ds_id, 'persist_data': False}) - self.assertEqual(0, len(test_driver.state['test_translator'])) - self.assertEqual(set([]), test_driver.state['test_translator']) - - # test restoring persisted data - del test_driver - test_driver = TestPushedDriver.TestDriver( - args={'ds_id': ds_id, 'persist_data': True}) - self.assertEqual(expected_state, test_driver.state['test_translator']) - - -class TestExecutionDriver(base.TestCase): - class ExtendedExecutionDriver(datasource_driver.ExecutionDriver): - """Subclass of test target. - - Execution Driver is an add-on class for datasource driver so - it's assumed to have heartbeat_callbacks variable defined in - DataService class. - """ - def __init__(self): - # A variable defined in datasource_driver - self.heartbeat_callbacks = {} - super(TestExecutionDriver.ExtendedExecutionDriver, self).__init__() - - def setUp(self): - super(TestExecutionDriver, self).setUp() - self.exec_driver = TestExecutionDriver.ExtendedExecutionDriver() - - def test_get_method_nested(self): - class server(object): - def nested_method(self): - return True - - class NovaClient(object): - def __init__(self): - self.servers = server() - - def top_method(self): - return True - - nova_client = NovaClient() - method = self.exec_driver._get_method(nova_client, - "servers.nested_method") - self.assertTrue(method()) - - def test_get_method_top(self): - class NovaClient(object): - def top_method(self): - return True - - nova_client = NovaClient() - method = self.exec_driver._get_method(nova_client, "top_method") - self.assertTrue(method()) - - def test_execute_api(self): - class NovaClient(object): - def action(self, arg1, arg2, arg3): - return "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) - - nova_client = NovaClient() - arg = {"positional": ["value1", "value2"], "named": {"arg3": "value3"}} - # it will raise exception if the method _execute_api failed to location - # the api - self.exec_driver._execute_api(nova_client, "action", arg) - - def test_get_actions_order_by_name(self): - mock_methods = {'funcA': mock.MagicMock(), - 'funcH': mock.MagicMock(), - 'funcF': mock.MagicMock()} - with mock.patch.dict(self.exec_driver.executable_methods, - mock_methods): - action_list = self.exec_driver.get_actions().get('results') - expected_list = copy.deepcopy(action_list) - expected_list.sort(key=lambda item: item['name']) - self.assertEqual(expected_list, action_list) - - def test_add_executable_client_methods(self): - class FakeNovaClient(object): - - def _internal_action(self, arg1, arg2): - """internal action with docs. - - :param arg1: internal test arg1 - :param arg2: internal test arg2 - """ - pass - - def action_no_doc(self, arg1, arg2): - pass - - def action_doc(self, arg1, arg2): - """action with docs. - - :param arg1: test arg1 - :param arg2: test arg2 - """ - pass - - expected_methods = {'action_doc': [[{'desc': 'arg1: test arg1', - 'name': 'arg1'}, - {'desc': 'arg2: test arg2', - 'name': 'arg2'}], - 'action with docs. '], - 'action_no_doc': [[], '']} - - nova_client = FakeNovaClient() - api_prefix = 'congress.tests.datasources.test_datasource_driver' - self.exec_driver.add_executable_client_methods(nova_client, api_prefix) - self.assertEqual(expected_methods, self.exec_driver.executable_methods) - - @mock.patch.object(eventlet, 'spawn') - def test_request_execute_with_wait(self, mock_spawn): - thread = mock.MagicMock() - mock_spawn.return_value = thread - - context = {'node_id': 'fake_node'} - action = 'test-action' - action_args = () - - test_driver = fake_datasource.FakeDataSource() - test_driver.request_execute(context, action, action_args, True) - mock_spawn.assert_called_once_with(test_driver.execute, - action, action_args) - thread.wait.assert_called_once() - - @mock.patch.object(eventlet, 'spawn') - @mock.patch.object(eventlet.greenthread.GreenThread, 'wait') - def test_request_execute_without_wait(self, mock_wait, mock_spawn): - mock_wait.return_value = mock.MagicMock() - mock_spawn.return_value = mock.MagicMock() - - context = {'node_id': 'fake_node'} - action = 'test-action' - action_args = [] - - test_driver = fake_datasource.FakeDataSource() - test_driver.request_execute(context, action, action_args, False) - mock_spawn.assert_called_once_with(test_driver.execute, - action, action_args) - mock_wait.assert_not_called() diff --git a/congress/tests/datasources/test_doctor_driver.py b/congress/tests/datasources/test_doctor_driver.py deleted file mode 100644 index a5201eb5..00000000 --- a/congress/tests/datasources/test_doctor_driver.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016 NTT All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import doctor_driver -from congress.tests import base - - -class TestDoctorDriver(base.TestCase): - - def setUp(self): - super(TestDoctorDriver, self).setUp() - self.doctor = doctor_driver.DoctorDriver('test-doctor') - - def numbered_string(self, string, number): - return string + str(number) - - def generate_events_objects(self, row_number): - objects = [] - for i in range(0, row_number): - obj = { - "time": self.numbered_string('time', i), - "type": self.numbered_string('type', i), - "details": { - "hostname": self.numbered_string('hostname', i), - "status": self.numbered_string('status', i), - "monitor": self.numbered_string('monitor', i), - "monitor_event_id": self.numbered_string('event_id', i), - } - } - objects.append(obj) - return objects - - @mock.patch.object(doctor_driver.DoctorDriver, 'publish') - def test_events_table(self, mocked_publish): - objs = self.generate_events_objects(3) - self.doctor.update_entire_data('events', objs) - - self.assertEqual(3, len(self.doctor.state['events'])) - - # change elements in state['events'] set to list and sort by id - sorted_state = sorted(list(self.doctor.state['events']), - key=lambda x: x[0]) - for i, row in enumerate(sorted_state): - self.assertEqual(self.numbered_string('time', i), row[0]) - self.assertEqual(self.numbered_string('type', i), row[1]) - self.assertEqual(self.numbered_string('hostname', i), row[2]) - self.assertEqual(self.numbered_string('status', i), row[3]) - self.assertEqual(self.numbered_string('monitor', i), row[4]) - self.assertEqual(self.numbered_string('event_id', i), row[5]) diff --git a/congress/tests/datasources/test_glancev2_driver.py b/congress/tests/datasources/test_glancev2_driver.py deleted file mode 100644 index 868a01f6..00000000 --- a/congress/tests/datasources/test_glancev2_driver.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import glancev2_driver -from congress.tests import base -from congress.tests import helper - - -class TestGlanceV2Driver(base.TestCase): - - def setUp(self): - super(TestGlanceV2Driver, self).setUp() - self.keystone_client_p = mock.patch( - "keystoneclient.v2_0.client.Client") - self.keystone_client_p.start() - self.glance_client_p = mock.patch("glanceclient.v2.client.Client") - self.glance_client_p.start() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = glancev2_driver.GlanceV2Driver(args=args) - - self.mock_images = {'images': [ - {u'checksum': u'9e486c3bf76219a6a37add392e425b36', - u'container_format': u'bare', - u'created_at': u'2014-10-01T20:28:08Z', - u'disk_format': u'qcow2', - u'file': u'/v2/images/c42736e7-8b09-4906-abd2-d6dc8673c297/file', - u'id': u'c42736e7-8b09-4906-abd2-d6dc8673c297', - u'min_disk': 0, - u'min_ram': 0, - u'name': u'Fedora-x86_64-20-20140618-sda', - u'owner': u'4dfdcf14a20940799d89c7a5e7345978', - u'protected': False, - u'schema': u'/v2/schemas/image', - u'size': 209649664, - u'status': u'active', - u'tags': ['type=xen2', 'type=xen'], - u'updated_at': u'2014-10-01T20:28:09Z', - u'visibility': u'public'}, - {u'checksum': u'4eada48c2843d2a262c814ddc92ecf2c', - u'container_format': u'ami', - u'created_at': u'2014-10-01T20:28:06Z', - u'disk_format': u'ami', - u'file': u'/v2/images/6934941f-3eef-43f7-9198-9b3c188e4aab/file', - u'id': u'6934941f-3eef-43f7-9198-9b3c188e4aab', - u'kernel_id': u'15ed89b8-588d-47ad-8ee0-207ed8010569', - u'min_disk': 0, - u'min_ram': 0, - u'name': u'cirros-0.3.2-x86_64-uec', - u'owner': u'4dfdcf14a20940799d89c7a5e7345978', - u'protected': False, - u'ramdisk_id': u'c244d5c7-1c83-414c-a90d-af7cea1dd3b5', - u'schema': u'/v2/schemas/image', - u'size': 25165824, - u'status': u'active', - u'tags': [], - u'updated_at': u'2014-10-01T20:28:07Z', - u'visibility': u'public'}]} - - def test_update_from_datasource(self): - with mock.patch.object(self.driver.glance.images, "list") as img_list: - img_list.return_value = self.mock_images['images'] - self.driver.update_from_datasource() - expected = {'images': set([ - (u'6934941f-3eef-43f7-9198-9b3c188e4aab', - u'active', - u'cirros-0.3.2-x86_64-uec', - u'ami', - u'2014-10-01T20:28:06Z', - u'2014-10-01T20:28:07Z', - u'ami', - u'4dfdcf14a20940799d89c7a5e7345978', - 'False', - 0, - 0, - u'4eada48c2843d2a262c814ddc92ecf2c', - 25165824, - u'/v2/images/6934941f-3eef-43f7-9198-9b3c188e4aab/file', - u'15ed89b8-588d-47ad-8ee0-207ed8010569', - u'c244d5c7-1c83-414c-a90d-af7cea1dd3b5', - u'/v2/schemas/image', - u'public'), - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', - u'active', - u'Fedora-x86_64-20-20140618-sda', - u'bare', - u'2014-10-01T20:28:08Z', - u'2014-10-01T20:28:09Z', - u'qcow2', - u'4dfdcf14a20940799d89c7a5e7345978', - 'False', - 0, - 0, - u'9e486c3bf76219a6a37add392e425b36', - 209649664, - u'/v2/images/c42736e7-8b09-4906-abd2-d6dc8673c297/file', - 'None', - 'None', - u'/v2/schemas/image', - u'public')]), - 'tags': set([ - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', 'type=xen'), - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', 'type=xen2')])} - self.assertEqual(expected, self.driver.state) - - def test_execute(self): - class GlanceClient(object): - def __init__(self): - self.testkey = None - - def createSnapshot(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - glance_client = GlanceClient() - self.driver.glance = glance_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('createSnapshot', api_args) - - self.assertEqual(expected_ans, glance_client.testkey) diff --git a/congress/tests/datasources/test_heatv1_driver.py b/congress/tests/datasources/test_heatv1_driver.py deleted file mode 100644 index ae2dff8c..00000000 --- a/congress/tests/datasources/test_heatv1_driver.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from heatclient.v1 import events -from heatclient.v1 import resources -from heatclient.v1 import software_deployments as deployments -from heatclient.v1 import stacks - -from congress.datasources import datasource_utils as ds_utils -from congress.datasources import heatv1_driver -from congress.tests import base -from congress.tests import helper - - -class TestHeatV1Driver(base.TestCase): - - def setUp(self): - super(TestHeatV1Driver, self).setUp() - self.heat_client_p = mock.patch("heatclient.v1.client.Client") - self.heat_client_p.start() - ds_utils.get_keystone_session = mock.MagicMock() - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = heatv1_driver.HeatV1Driver(args=args) - - self.mock_stacks = {'stacks': [ - {u'id': u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'stack_name': u'my-stack', - u'description': - u'Simple template to deploy a single compute instance ', - u'creation_time': u'2015-04-25T21:20:35Z', - u'updated_time': u'None', - u'stack_status': u'CREATE_COMPLETE', - u'stack_status_reason': u'Stack CREATE completed successfully', - u'stack_owner': u'demo', - u'parent': u'None', - u'links': [ - {u'href': u'http://192.168.123.200:8004/v1', - u'rel': u'self'}]}]} - - self.mock_software_deployments = {'deployments': [ - {u'status': u'COMPLETE', - u'server_id': u'ec14c864-096e-4e27-bb8a-2c2b4dc6f3f5', - u'config_id': u'8da95794-2ad9-4979-8ae5-739ce314c5cd', - u'action': u'CREATE', - u'status_reason': u'Outputs received', - u'id': u'ef422fa5-719a-419e-a10c-72e3a367b0b8', - u'output_values': { - u'deploy_stdout': u'Writing to /tmp/barmy\n', - u'deploy_stderr': u'+ echo Writing to /tmp/barmy\n', - u'deploy_status_code': u'0', - u'result': u'The file /tmp/barmy contains fu for server'}}]} - - self.mock_resources = {'resources': [ - {u'physical_resource_id': u'3eaa34ea-5c14-49b8-8386-c1ec1b93a29e', - u'logical_resource_id': u'server', - u'stack_id': u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'resource_name': u'server', - u'resource_type': u'OS::Nova::Server', - u'creation_time': u'2016-01-16T15:45:34', - u'updated_time': u'2016-01-16T15:45:34', - u'resource_status': u'CREATE_COMPLETE', - u'resource_status_reason': u'state changed', - u'links': [ - {u'href': u'http://10.0.2.15:8004/v1', u'rel': u'self'}]}]} - - self.mock_events = {'events': [ - {u'id': u'd85ea275-6842-468a-b36d-3d99719dcf0e', - u'physical_resource_id': u'49dfc907-30db-4f2c-9cc0-844dc327f0f2', - u'logical_resource_id': u'test', - u'stack_id': u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'resource_name': u'test', - u'event_time': u'2016-01-17T11:22:31', - u'resource_status': u'CREATE_COMPLETE', - u'resource_status_reason': u'Stack CREATE completed successfully', - u'links': [ - {u'href': u'http://10.0.2.15:8004/v1', u'rel': u'self'}]}]} - - def mock_value(self, mock_data, key, obj_class): - data = mock_data[key] - return [obj_class(self, res, loaded=True) for res in data if res] - - def test_update_from_datasource(self): - dep = self.mock_software_deployments - with base.nested( - mock.patch.object(self.driver.heat.stacks, - "list", - return_value=self.mock_value( - self.mock_stacks, - "stacks", - stacks.Stack)), - mock.patch.object(self.driver.heat.resources, - "list", - return_value=self.mock_value( - self.mock_resources, - "resources", - resources.Resource)), - mock.patch.object(self.driver.heat.events, - "list", - return_value=self.mock_value( - self.mock_events, - "events", - events.Event)), - mock.patch.object(self.driver.heat.software_deployments, - "list", - return_value=self.mock_value( - dep, - 'deployments', - deployments.SoftwareDeployment)), - ) as (list, list, list, list): - self.driver.update_from_datasource() - expected = { - 'stacks': set([ - (u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'my-stack', - u'Simple template to deploy a single compute instance ', - u'2015-04-25T21:20:35Z', - u'None', - u'CREATE_COMPLETE', - u'Stack CREATE completed successfully', - u'demo', - u'None')]), - 'stacks_links': set([ - (u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'http://192.168.123.200:8004/v1', - u'self')]), - 'deployments': set([ - (u'COMPLETE', - u'ec14c864-096e-4e27-bb8a-2c2b4dc6f3f5', - u'8da95794-2ad9-4979-8ae5-739ce314c5cd', - u'CREATE', - u'Outputs received', - u'ef422fa5-719a-419e-a10c-72e3a367b0b8')]), - 'deployment_output_values': set([ - (u'ef422fa5-719a-419e-a10c-72e3a367b0b8', - u'Writing to /tmp/barmy\n', - u'+ echo Writing to /tmp/barmy\n', - u'0', - u'The file /tmp/barmy contains fu for server')]), - 'resources_links': set([ - (u'3eaa34ea-5c14-49b8-8386-c1ec1b93a29e', - u'http://10.0.2.15:8004/v1', - u'self')]), - 'resources': set([ - (u'3eaa34ea-5c14-49b8-8386-c1ec1b93a29e', - u'server', - u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'server', - u'OS::Nova::Server', - u'2016-01-16T15:45:34', - u'2016-01-16T15:45:34', - u'CREATE_COMPLETE', - u'state changed')]), - 'events_links': set([ - (u'd85ea275-6842-468a-b36d-3d99719dcf0e', - u'http://10.0.2.15:8004/v1', - u'self')]), - 'events': set([ - (u'd85ea275-6842-468a-b36d-3d99719dcf0e', - u'49dfc907-30db-4f2c-9cc0-844dc327f0f2', - u'test', - u'da4e63e2-f79b-4cbb-bee8-33b2a9bd1ac8', - u'test', - u'2016-01-17T11:22:31', - u'CREATE_COMPLETE', - u'Stack CREATE completed successfully')])} - self.assertEqual(expected, self.driver.state) - - def test_execute(self): - class HeatClient(object): - def __init__(self): - self.testkey = None - - def abandanStack(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - heat_client = HeatClient() - self.driver.heat = heat_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('abandanStack', api_args) - - self.assertEqual(expected_ans, heat_client.testkey) diff --git a/congress/tests/datasources/test_ironic_driver.py b/congress/tests/datasources/test_ironic_driver.py deleted file mode 100644 index fe398ade..00000000 --- a/congress/tests/datasources/test_ironic_driver.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import ironicclient.v1.chassis as IrChassis -import ironicclient.v1.driver as IrDriver -import ironicclient.v1.node as IrNode -import ironicclient.v1.port as IrPort -import mock - -from congress.datasources import ironic_driver -from congress.tests import base -from congress.tests import helper - - -class TestIronicDriver(base.TestCase): - - def setUp(self): - super(TestIronicDriver, self).setUp() - self.ironic_client_p = mock.patch("ironicclient.client.get_client") - self.ironic_client_p.start() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - - self.driver = ironic_driver.IronicDriver(args=args) - - self.mock_chassis = {"chassis": [ - {"uuid": "89a15e07-5c80-48a4-b440-9c61ddb7e652", - "extra": {}, - "created_at": "2015-01-13T06:52:01+00:00", - "updated_at": None, - "description": "ironic test chassis"}]} - - self.mock_nodes = {"nodes": [ - {"instance_uuid": "2520745f-b4da-4e10-9d32-84451cfa8b33", - "uuid": "9cf035f0-351c-43d5-8968-f9fe2c41787b", - "chassis_uuid": "89a15e07-5c80-48a4-b440-9c61ddb7e652", - "properties": {"memory_mb": "512", "cpu_arch": "x86_64", - "local_gb": "10", "cpus": "1"}, - "driver": "pxe_ssh", - "maintenance": False, - "console_enabled": False, - "created_at": "2015-01-13T06:52:02+00:00", - "updated_at": "2015-02-10T07:55:23+00:00", - "provision_updated_at": "2015-01-13T07:55:24+00:00", - "provision_state": "active", - "power_state": "power on"}, - {"instance_uuid": None, - "uuid": "7a95ebf5-f213-4427-b669-010438f43e87", - "chassis_uuid": "89a15e07-5c80-48a4-b440-9c61ddb7e652", - "properties": {"memory_mb": "512", "cpu_arch": "x86_64", - "local_gb": "10", "cpus": "1"}, - "driver": "pxe_ssh", - "maintenance": False, - "console_enabled": False, - "created_at": "2015-01-13T06:52:04+00:00", - "updated_at": "2015-02-10T07:55:24+00:00", - "provision_updated_at": None, - "provision_state": None, - "power_state": "power off"}]} - - self.mock_ports = {"ports": [ - {"uuid": "43190aae-d5fe-444f-9d50-155fca4bad82", - "node_uuid": "9cf035f0-351c-43d5-8968-f9fe2c41787b", - "extra": {"vif_port_id": "9175f72b-5783-4cea-8ae0-55df69fee568"}, - "created_at": "2015-01-13T06:52:03+00:00", - "updated_at": "2015-01-30T03:17:23+00:00", - "address": "52:54:00:7f:e7:2e"}, - {"uuid": "49f3205a-db1e-4497-9371-6011ef572981", - "node_uuid": "7a95ebf5-f213-4427-b669-010438f43e87", - "extra": {}, - "created_at": "2015-01-13T06:52:05+00:00", - "updated_at": None, - "address": "52:54:00:98:f2:4e"}]} - - self.mock_drivers = {"drivers": [ - {"hosts": ["localhost"], "name": "pxe_ssh"}, - {"hosts": ["localhost"], "name": "pxe_ipmitool"}, - {"hosts": ["localhost"], "name": "fake"}]} - - self.expected_state = { - 'drivers': set([ - ('pxe_ipmitool',), - ('fake',), - ('pxe_ssh',)]), - 'node_properties': set([ - ('7a95ebf5-f213-4427-b669-010438f43e87', - '512', 'x86_64', '10', '1'), - ('9cf035f0-351c-43d5-8968-f9fe2c41787b', - '512', 'x86_64', '10', '1')]), - 'chassises': set([ - ('89a15e07-5c80-48a4-b440-9c61ddb7e652', - '2015-01-13T06:52:01+00:00', 'None')]), - 'active_hosts': set([ - ('pxe_ipmitool', 'localhost'), - ('pxe_ssh', 'localhost'), - ('fake', 'localhost')]), - 'nodes': set([ - ('9cf035f0-351c-43d5-8968-f9fe2c41787b', - '89a15e07-5c80-48a4-b440-9c61ddb7e652', - 'power on', - 'False', - 'pxe_ssh', - '2520745f-b4da-4e10-9d32-84451cfa8b33', - '2015-01-13T06:52:02+00:00', - '2015-01-13T07:55:24+00:00', - '2015-02-10T07:55:23+00:00'), - ('7a95ebf5-f213-4427-b669-010438f43e87', - '89a15e07-5c80-48a4-b440-9c61ddb7e652', - 'power off', - 'False', - 'pxe_ssh', - 'None', - '2015-01-13T06:52:04+00:00', - 'None', - '2015-02-10T07:55:24+00:00')]), - 'ports': set([ - ('49f3205a-db1e-4497-9371-6011ef572981', - '7a95ebf5-f213-4427-b669-010438f43e87', - '52:54:00:98:f2:4e', '', - '2015-01-13T06:52:05+00:00', 'None'), - ('43190aae-d5fe-444f-9d50-155fca4bad82', - '9cf035f0-351c-43d5-8968-f9fe2c41787b', - '52:54:00:7f:e7:2e', - '9175f72b-5783-4cea-8ae0-55df69fee568', - '2015-01-13T06:52:03+00:00', - '2015-01-30T03:17:23+00:00')]) - } - - def mock_value(self, mock_data, key, obj_class): - data = mock_data[key] - return [obj_class(self, res, loaded=True) for res in data if res] - - def test_driver_called(self): - self.assertIsNotNone(self.driver.ironic_client) - - def test_update_from_datasource(self): - with base.nested( - mock.patch.object(self.driver.ironic_client.chassis, - "list", - return_value=self.mock_value(self.mock_chassis, - "chassis", - IrChassis.Chassis)), - mock.patch.object(self.driver.ironic_client.node, - "list", - return_value=self.mock_value(self.mock_nodes, - "nodes", - IrNode.Node)), - mock.patch.object(self.driver.ironic_client.port, - "list", - return_value=self.mock_value(self.mock_ports, - "ports", - IrPort.Port)), - mock.patch.object(self.driver.ironic_client.driver, - "list", - return_value=self.mock_value(self.mock_drivers, - "drivers", - IrDriver.Driver)), - ) as (self.driver.ironic_client.chassis.list, - self.driver.ironic_client.node.list, - self.driver.ironic_client.port.list, - self.driver.ironic_client.driver.list): - self.driver.update_from_datasource() - self.assertEqual(self.expected_state, self.driver.state) - - def test_execute(self): - class IronicClient(object): - def __init__(self): - self.testkey = None - - def updateNode(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - ironic_client = IronicClient() - self.driver.ironic_client = ironic_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('updateNode', api_args) - - self.assertEqual(expected_ans, ironic_client.testkey) diff --git a/congress/tests/datasources/test_keystone_driver.py b/congress/tests/datasources/test_keystone_driver.py deleted file mode 100644 index 12a5a339..00000000 --- a/congress/tests/datasources/test_keystone_driver.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import keystone_driver -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import helper - -ResponseObj = util.ResponseObj - - -class TestKeystoneDriver(base.TestCase): - - def setUp(self): - super(TestKeystoneDriver, self).setUp() - - class FakeClient(object): - def __init__(self): - self.users = mock.MagicMock() - self.roles = mock.MagicMock() - self.tenants = mock.MagicMock() - - self.users_data = [ - ResponseObj({'username': 'alice', - 'name': 'alice foo', - 'enabled': True, - 'tenantId': '019b18a15f2a44c1880d57704b2c4009', - 'id': '00f2c34a156c40058004ee8eb3320e04', - 'email': 'alice@foo.com'}), - ResponseObj({'username': 'bob', - 'name': 'bob bar', - 'enabled': False, - 'tenantId': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - 'id': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', - 'email': 'bob@bar.edu'})] - - self.roles_data = [ - ResponseObj({'id': 'cccccccccccccccccccccccccccccccc', - 'name': 'admin'}), - ResponseObj({'id': 'dddddddddddddddddddddddddddddddd', - 'name': 'viewer'})] - - self.tenants_data = [ - ResponseObj({'enabled': True, - 'description': 'accounting team', - 'name': 'accounting', - 'id': '00000000000000000000000000000001'}), - ResponseObj({'enabled': False, - 'description': 'eng team', - 'name': 'eng', - 'id': '00000000000000000000000000000002'})] - - self.keystone_client = mock.patch("keystoneclient.v2_0.client.Client", - return_value=FakeClient()) - self.keystone_client.start() - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - self.driver = keystone_driver.KeystoneDriver(args=args) - self.driver.client.users.list.return_value = self.users_data - self.driver.client.roles.list.return_value = self.roles_data - self.driver.client.tenants.list.return_value = self.tenants_data - - def test_list_users(self): - """Test conversion of complex user objects to tables.""" - self.driver.update_from_datasource() - user_list = self.driver.state[keystone_driver.KeystoneDriver.USERS] - self.assertIsNotNone(user_list) - self.assertEqual(2, len(user_list)) - - # Check an individual user entry - self.assertIn(('alice', 'alice foo', 'True', - '019b18a15f2a44c1880d57704b2c4009', - '00f2c34a156c40058004ee8eb3320e04', - 'alice@foo.com'), user_list) - self.assertIn(('bob', 'bob bar', 'False', - 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', - 'bob@bar.edu'), user_list) - - def test_list_roles(self): - """Test conversion of complex role objects to tables.""" - self.driver.update_from_datasource() - roles_list = self.driver.state[keystone_driver.KeystoneDriver.ROLES] - self.assertIsNotNone(roles_list) - self.assertEqual(2, len(roles_list)) - - # Check an individual role entry - self.assertIn(('cccccccccccccccccccccccccccccccc', 'admin'), - roles_list) - self.assertIn(('dddddddddddddddddddddddddddddddd', 'viewer'), - roles_list) - - def test_list_tenants(self): - """Test conversion of complex tenant objects to tables.""" - self.driver.update_from_datasource() - tenants_key = keystone_driver.KeystoneDriver.TENANTS - tenants_list = self.driver.state[tenants_key] - self.assertIsNotNone(tenants_list) - self.assertEqual(2, len(tenants_list)) - - # Check an individual role entry - self.assertIn(('True', 'accounting team', 'accounting', - '00000000000000000000000000000001'), - tenants_list) - self.assertIn(('False', 'eng team', 'eng', - '00000000000000000000000000000002'), - tenants_list) - - def test_execute(self): - class KeystoneClient(object): - def __init__(self): - self.testkey = None - - def enableProject(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - keystone_client = KeystoneClient() - self.driver.client = keystone_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('enableProject', api_args) - - self.assertEqual(expected_ans, keystone_client.testkey) diff --git a/congress/tests/datasources/test_keystonev3_driver.py b/congress/tests/datasources/test_keystonev3_driver.py deleted file mode 100644 index a84af213..00000000 --- a/congress/tests/datasources/test_keystonev3_driver.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import keystonev3_driver -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import helper - -ResponseObj = util.ResponseObj - - -class TestKeystoneDriver(base.TestCase): - - def setUp(self): - super(TestKeystoneDriver, self).setUp() - - class FakeClient(object): - def __init__(self): - self.users = mock.MagicMock() - self.roles = mock.MagicMock() - self.projects = mock.MagicMock() - self.domains = mock.MagicMock() - - self.users_data = [ - ResponseObj({ - 'id': '00f2c34a156c40058004ee8eb3320e04', - 'name': 'alice', - 'enabled': True, - 'default_project_id': '019b18a15f2a44c1880d57704b2c4009', - 'domain_id': 'default'}), - ResponseObj({ - 'id': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', - 'name': 'bob', - 'enabled': False, - 'default_project_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - 'domain_id': 'default'})] - - self.roles_data = [ - ResponseObj({'id': 'cccccccccccccccccccccccccccccccc', - 'name': 'admin'}), - ResponseObj({'id': 'dddddddddddddddddddddddddddddddd', - 'name': 'viewer'})] - - self.projects_data = [ - ResponseObj({'enabled': True, - 'description': 'accounting team', - 'name': 'accounting', - 'domain_id': 'default', - 'id': '00000000000000000000000000000001'}), - ResponseObj({'enabled': False, - 'description': 'eng team', - 'domain_id': 'default', - 'name': 'eng', - 'id': '00000000000000000000000000000002'})] - - self.domains_data = [ - ResponseObj({'enabled': True, - 'description': 'domain 1', - 'name': 'default', - 'id': '1fbe4e6fedb34050ad56c6e5dd225998'}), - - ResponseObj({'enabled': False, - 'description': 'domain 2', - 'name': 'test domain', - 'id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'})] - - self.keystone_client = mock.patch("keystoneclient.v3.client.Client", - return_value=FakeClient()) - self.keystone_client.start() - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - self.driver = keystonev3_driver.KeystoneV3Driver(args=args) - self.driver.client.users.list.return_value = self.users_data - self.driver.client.roles.list.return_value = self.roles_data - self.driver.client.projects.list.return_value = self.projects_data - self.driver.client.domains.list.return_value = self.domains_data - - def test_list_users(self): - """Test conversion of complex user objects to tables.""" - self.driver.update_from_datasource() - user_list = self.driver.state[keystonev3_driver.KeystoneV3Driver.USERS] - self.assertIsNotNone(user_list) - self.assertEqual(2, len(user_list)) - - # Check an individual user entry - self.assertIn(('00f2c34a156c40058004ee8eb3320e04', - 'alice', 'True', '019b18a15f2a44c1880d57704b2c4009', - 'default'), user_list) - self.assertIn(('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', - 'bob', 'False', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - 'default'), user_list) - - def test_list_roles(self): - """Test conversion of complex role objects to tables.""" - self.driver.update_from_datasource() - roles_table = keystonev3_driver.KeystoneV3Driver.ROLES - roles_list = self.driver.state[roles_table] - self.assertIsNotNone(roles_list) - self.assertEqual(2, len(roles_list)) - - # Check an individual role entry - self.assertIn(('cccccccccccccccccccccccccccccccc', 'admin'), - roles_list) - self.assertIn(('dddddddddddddddddddddddddddddddd', 'viewer'), - roles_list) - - def test_list_domains(self): - self.driver.update_from_datasource() - domains_table = keystonev3_driver.KeystoneV3Driver.DOMAINS - domains_list = self.driver.state[domains_table] - self.assertIsNotNone(domains_list) - self.assertEqual(2, len(domains_list)) - - # Check an individual role entry - self.assertIn(('False', 'domain 2', 'test domain', - 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), domains_list) - self.assertIn(('True', 'domain 1', 'default', - '1fbe4e6fedb34050ad56c6e5dd225998'), domains_list) - - def test_list_projects(self): - """Test conversion of complex tenant objects to tables.""" - self.driver.update_from_datasource() - projects_table = keystonev3_driver.KeystoneV3Driver.PROJECTS - projects_list = self.driver.state[projects_table] - self.assertIsNotNone(projects_list) - self.assertEqual(2, len(projects_list)) - - # Check an individual role entry - self.assertIn(('True', 'accounting team', 'accounting', 'default', - '00000000000000000000000000000001'), projects_list) - self.assertIn(('False', 'eng team', 'eng', 'default', - '00000000000000000000000000000002'), projects_list) - - def test_execute(self): - class KeystoneClient(object): - def __init__(self): - self.testkey = None - - def enableProject(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - keystone_client = KeystoneClient() - self.driver.client = keystone_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('enableProject', api_args) - - self.assertEqual(expected_ans, keystone_client.testkey) diff --git a/congress/tests/datasources/test_monasca_driver.py b/congress/tests/datasources/test_monasca_driver.py deleted file mode 100644 index e46e50a9..00000000 --- a/congress/tests/datasources/test_monasca_driver.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2015 Cisco, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import mock -import sys - -sys.modules['monascaclient.client'] = mock.Mock() -sys.modules['monascaclient'] = mock.Mock() - -from congress.datasources import monasca_driver -from congress.tests import base -from congress.tests import helper - - -class TestMonascaDriver(base.TestCase): - - def setUp(self): - super(TestMonascaDriver, self).setUp() - self.keystone_client_p = mock.patch( - "keystoneclient.v3.client.Client") - self.keystone_client_p.start() - self.monasca_client_p = mock.patch("monascaclient.client.Client") - self.monasca_client_p.start() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = monasca_driver.MonascaDriver(args=args) - - self.mock_metrics = {"links": [{ - "rel": "self", - "href": "http://192.168.10.4:8070/v2.0/metrics"}], - "elements": [{ - "id": "0", - "name": "mem.used_buffers", - "dimensions": { - "component": "monasca-persister", - "service": "monitoring", - "hostname": "ceilosca", - "url": "http://localhost:8091/metrics"} - }]} - - self.mock_statistics = {"links": [{ - "rel": "self", - "href": "http://192.168.10.4:8070/v2.0/metrics/statistics"}], - "elements": [{ - "id": "2015-11-30T18:00:00Z", - "name": "mem.used_buffers", - "dimensions": {}, - "columns": ["timestamp", "avg"], - "statistics": [ - ["2015-11-24T00:00:00Z", 56], - ["2015-11-24T06:00:00Z", 46], - ["2015-11-24T12:00:00Z", 70], - ["2015-11-24T18:00:00Z", 60]] - }]} - - def test_statistics_update_from_datasource(self): - self.driver._translate_statistics(self.mock_statistics['elements']) - stats_list = list(self.driver.state[self.driver.STATISTICS]) - stats_data_list = list(self.driver.state[self.driver.DATA]) - self.assertIsNotNone(stats_list) - self.assertIsNotNone(stats_data_list) - - expected_stats = [ - ('mem.used_buffers', 'd1fea02438d17fb7446255573bf54d45')] - self.assertEqual(expected_stats, stats_list) - - expected_stats_data = [ - ('d1fea02438d17fb7446255573bf54d45', - "['2015-11-24T00:00:00Z', 56]"), - ('d1fea02438d17fb7446255573bf54d45', - "['2015-11-24T12:00:00Z', 70]"), - ('d1fea02438d17fb7446255573bf54d45', - "['2015-11-24T18:00:00Z', 60]"), - ('d1fea02438d17fb7446255573bf54d45', - "['2015-11-24T06:00:00Z', 46]")] - self.assertEqual(sorted(expected_stats_data), sorted(stats_data_list)) - - def test_metrics_update_from_datasource(self): - with mock.patch.object(self.driver.monasca.metrics, "list") as metrics: - metrics.return_value = self.mock_metrics['elements'] - self.driver.update_from_datasource() - - expected = { - 'dimensions': set([ - ('e138b5d90a4265c7525f480dd988210b', - 'component', 'monasca-persister'), - ('e138b5d90a4265c7525f480dd988210b', - 'service', 'monitoring'), - ('e138b5d90a4265c7525f480dd988210b', - 'hostname', 'ceilosca'), - ('e138b5d90a4265c7525f480dd988210b', - 'url', 'http://localhost:8091/metrics')]), - 'metrics': set([ - ('0', 'mem.used_buffers', - 'e138b5d90a4265c7525f480dd988210b')]), - 'statistics': set([]), - 'statistics.data': set([]) - } - self.assertEqual(expected, self.driver.state) - - def test_execute(self): - class MonascaClient(object): - def __init__(self): - self.testkey = None - - def getStatistics(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - monasca_client = MonascaClient() - self.driver.monasca = monasca_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('getStatistics', api_args) - - self.assertEqual(expected_ans, monasca_client.testkey) diff --git a/congress/tests/datasources/test_murano_driver.py b/congress/tests/datasources/test_murano_driver.py deleted file mode 100644 index d727b135..00000000 --- a/congress/tests/datasources/test_murano_driver.py +++ /dev/null @@ -1,795 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -# mocking muranoclient so that python-muranoclient -# doesn't need to be included in requirements.txt. -# (Including python-muranoclient in requirements.txt will -# cause failures in Jenkins because python-muranoclient is not -# included in global_requirements.txt at this point) -import sys - -sys.modules['muranoclient'] = mock.Mock() -sys.modules['muranoclient.client'] = mock.Mock() -sys.modules['muranoclient.common'] = mock.Mock() -sys.modules['muranoclient.common.exceptions'] = mock.Mock() - -from congress.datasources import murano_driver -from congress.tests import base -from congress.tests.datasources import util -from congress.tests import helper - - -class TestMuranoDriver(base.TestCase): - def setUp(self): - super(TestMuranoDriver, self).setUp() - self.keystone_client_p = mock.patch( - "keystoneclient.v2_0.client.Client") - self.keystone_client_p.start() - self.murano_client = mock.MagicMock() - self.murano_client.environments.list.return_value = env_response - self.murano_client.services.list.return_value = service_response - self.murano_client.deployments.list.return_value = deployment_response - self.murano_client.packages.list.return_value = package_response - self.murano_client.actions.call.return_value = action_response - args = helper.datasource_openstack_args() - self.driver = murano_driver.MuranoDriver(args=args) - self.driver.murano_client = self.murano_client - - def test_list_environments(self): - """Test conversion of environments objects to tables.""" - self.driver.state[self.driver.STATES] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - envs = self.driver.murano_client.environments.list() - self.driver._translate_environments(envs) - - # datasource tables - states = list(self.driver.state[self.driver.STATES]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - parent_types = list(self.driver.state[self.driver.PARENT_TYPES]) - - # verify tables - self.assertIsNotNone(states) - self.assertIsNotNone(properties) - for row in expected_states: - self.assertIn(row, states, ("%s not in states" % str(row))) - for row in expected_env_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - for row in expected_environment_parent_types: - self.assertIn(row, parent_types, - ("%s not in parent_types" % str(row))) - - def test_translate_services(self): - """Test conversion of environment services to tables.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - envs = self.driver.murano_client.environments.list() - pkgs = self.driver.murano_client.packages.list() - # package properties are needed for mapping parent_types - self.driver._translate_packages(pkgs) - self.driver._translate_services(envs) - - # datasource tables - objects = list(self.driver.state[self.driver.OBJECTS]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - parent_types = list(self.driver.state[self.driver.PARENT_TYPES]) - relationships = list(self.driver.state[self.driver.RELATIONSHIPS]) - - # verify tables - self.assertIsNotNone(objects) - self.assertIsNotNone(properties) - self.assertIsNotNone(parent_types) - self.assertIsNotNone(relationships) - for row in expected_service_objects: - self.assertIn(row, objects, ("%s not in objects" % str(row))) - for row in expected_service_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - for row in expected_service_parent_types: - self.assertIn(row, parent_types, - ("%s not in parent_types" % str(row))) - for row in expected_service_relationships: - self.assertIn(row, relationships, - ("%s not in relationships" % str(row))) - - def test_translate_environment_services(self): - """Test conversion of environment services to tables.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - envs = self.driver.murano_client.environments.list() - pkgs = self.driver.murano_client.packages.list() - # package properties are needed for mapping parent_types - self.driver._translate_packages(pkgs) - - for env in envs: - services = self.murano_client.services.list(env.id) - self.driver._translate_environment_services(services, env.id) - - # datasource tables - objects = list(self.driver.state[self.driver.OBJECTS]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - parent_types = list(self.driver.state[self.driver.PARENT_TYPES]) - relationships = list(self.driver.state[self.driver.RELATIONSHIPS]) - - # verify tables - self.assertIsNotNone(objects) - self.assertIsNotNone(properties) - self.assertIsNotNone(parent_types) - self.assertIsNotNone(relationships) - for row in expected_service_objects: - self.assertIn(row, objects, ("%s not in objects" % str(row))) - for row in expected_service_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - for row in expected_service_parent_types: - self.assertIn(row, parent_types, - ("%s not in parent_types" % str(row))) - for row in expected_service_relationships: - self.assertIn(row, relationships, - ("%s not in relationships" % str(row))) - - def test_translate_packages(self): - """Test conversion of application packages to tables.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - pkgs = self.driver.murano_client.packages.list() - self.driver._translate_packages(pkgs) - - # datasource tables - objects = list(self.driver.state[self.driver.OBJECTS]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - - # verify tables - self.assertIsNotNone(objects) - self.assertIsNotNone(properties) - for row in expected_package_objects: - self.assertIn(row, objects, ("%s not in objects" % str(row))) - for row in expected_package_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - - def test_translate_deployments(self): - """Test conversion of deployments to tables.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - envs = self.driver.murano_client.environments.list() - pkgs = self.driver.murano_client.packages.list() - # package properties are needed for mapping parent_types - self.driver._translate_packages(pkgs) - self.driver._translate_deployments(envs) - - # datasource tables - objects = list(self.driver.state[self.driver.OBJECTS]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - parent_types = list(self.driver.state[self.driver.PARENT_TYPES]) - - # verify tables - self.assertIsNotNone(objects) - self.assertIsNotNone(properties) - self.assertIsNotNone(parent_types) - for row in expected_deployment_objects: - self.assertIn(row, objects, ("%s not in objects" % str(row))) - for row in expected_deployment_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - for row in expected_deployment_parent_types: - self.assertIn(row, parent_types, - ("%s not in parent_types" % str(row))) - - def test_translate_environment_deployments(self): - """Test conversion of deployments to tables.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - envs = self.driver.murano_client.environments.list() - pkgs = self.driver.murano_client.packages.list() - # package properties are needed for mapping parent_types - self.driver._translate_packages(pkgs) - - for env in envs: - deps = self.murano_client.deployments.list(env.id) - self.driver._translate_environment_deployments(deps, env.id) - - # datasource tables - objects = list(self.driver.state[self.driver.OBJECTS]) - properties = list(self.driver.state[self.driver.PROPERTIES]) - parent_types = list(self.driver.state[self.driver.PARENT_TYPES]) - - # verify tables - self.assertIsNotNone(objects) - self.assertIsNotNone(properties) - self.assertIsNotNone(parent_types) - for row in expected_deployment_objects: - self.assertIn(row, objects, ("%s not in objects" % str(row))) - for row in expected_deployment_properties: - self.assertIn(row, properties, - ("%s not in properties" % str(row))) - for row in expected_deployment_parent_types: - self.assertIn(row, parent_types, - ("%s not in parent_types" % str(row))) - - def test_translate_connected(self): - """Test translation of relationships to connected table.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - self.driver.state[self.driver.CONNECTED] = set() - envs = self.driver.murano_client.environments.list() - self.driver._translate_services(envs) # to populate relationships - self.driver._translate_connected() - - # datasource tables - connected = list(self.driver.state[self.driver.CONNECTED]) - - # verify tables - self.assertIsNotNone(connected) - for row in expected_connected: - self.assertIn(row, connected, ("%s not in connected" % str(row))) - - def test_execute(self): - """Test action execution.""" - self.driver.state[self.driver.OBJECTS] = set() - self.driver.state[self.driver.PROPERTIES] = set() - self.driver.state[self.driver.PARENT_TYPES] = set() - self.driver.state[self.driver.RELATIONSHIPS] = set() - envs = self.driver.murano_client.environments.list() - pkgs = self.driver.murano_client.packages.list() - # package properties are needed for mapping parent_types - self.driver._translate_packages(pkgs) - self.driver._translate_services(envs) - - action = 'muranoaction' - action_args = {'positional': ['ad9762b2d82f44ca8b8a6ce4a19dd1cc', - '769af50c-9629-4694-b623-e9b392941279', - 'restartVM']} - self.driver.execute(action, action_args) - self.assertIn(action_response, self.driver.action_call_returns) - - -# Sample responses from murano-client -env_response = [ - util.ResponseObj({ - u'created': u'2015-03-24T18:35:14', - u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'name': u'quick-env-2', - u'networking': {}, - u'status': u'deploy failure', - u'tenant_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'updated': u'2015-03-24T18:46:56', - u'version': 1})] - -service_response = [ - util.ResponseObj({ - u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'MySQL'}, - u'_actions': {u'74f5b2d2-1f8d-4b1a-8238-4155ce2cadb2_restartVM': - {u'enabled': True, u'name': u'restartVM'}}, - u'id': u'769af50c-9629-4694-b623-e9b392941279', - u'status': u'deploy failure', - u'type': u'io.murano.databases.MySql'}, - u'database': u'', - u'instance': {u'?': {u'_actions': {}, - u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'type': - u'io.murano.resources.LinuxMuranoInstance'}, - u'assignFloatingIp': True, - u'availabilityZone': u'nova', - u'flavor': u'm1.small', - u'floatingIpAddress': u'172.24.4.4', - u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3', - u'ipAddresses': [u'10.0.11.3', u'172.24.4.4'], - u'keyname': u'', - u'name': u'bcnfli7nn738y1', - u'networks': {u'customNetworks': [], - u'primaryNetwork': None, - u'useEnvironmentNetwork': True, - u'useFlatNetwork': False}, - u'securityGroupName': None, - u'sharedIps': []}, - u'name': u'MySqlDB', - u'password': u'Passw0rd.', - u'username': u''}), - util.ResponseObj({ - u'?': {u'_26411a1861294160833743e45d0eaad9': - {u'name': u'Apache Tomcat'}, - u'_actions': {}, - u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'status': u'deploy failure', - u'type': u'io.murano.apps.apache.Tomcat'}, - u'instance': {u'?': {u'_actions': {}, - u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'type': - u'io.murano.resources.LinuxMuranoInstance'}, - u'assignFloatingIp': True, - u'availabilityZone': u'nova', - u'flavor': u'm1.small', - u'floatingIpAddress': u'172.24.4.4', - u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3', - u'ipAddresses': [u'10.0.11.4', u'172.24.4.4'], - u'keyname': u'', - u'name': u'woydqi7nn7ipc2', - u'networks': {u'customNetworks': [], - u'primaryNetwork': None, - u'useEnvironmentNetwork': True, - u'useFlatNetwork': False}, - u'securityGroupName': None, - u'sharedIps': []}, - u'name': u'Tomcat'}), - util.ResponseObj({ - u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'PetClinic'}, - u'_actions': {}, - u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'status': u'deploy failure', - u'type': u'io.murano.apps.java.PetClinic'}, - u'database': u'769af50c-9629-4694-b623-e9b392941279', - u'dbName': u'pet_db', - u'dbPassword': u'Passw0rd.', - u'dbUser': u'pet_user', - u'name': u'PetClinic', - u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'warLocation': - u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'})] - -deployment_response = [ - util.ResponseObj({ - u'action': {u'args': {}, - u'method': u'deploy', - u'object_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc'}, - u'created': u'2015-03-24T18:36:23', - u'description': - {u'?': {u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'type': u'io.murano.Environment'}, - u'defaultNetworks': - {u'environment': - {u'?': {u'id': - u'a2be8265b01743c0bdf645772d632bf0', - u'type': u'io.murano.resources.NeutronNetwork'}, - u'name': u'quick-env-2-network'}, - u'flat': None}, - u'name': u'quick-env-2', - u'services': - [{u'?': - {u'_26411a1861294160833743e45d0eaad9': - {u'name': u'MySQL'}, - u'id': u'769af50c-9629-4694-b623-e9b392941279', - u'type': u'io.murano.databases.MySql'}, - u'database': u'', - u'instance': - {u'?': {u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'type': u'io.murano.resources.LinuxMuranoInstance'}, - u'assignFloatingIp': True, - u'availabilityZone': u'nova', - u'flavor': u'm1.small', - u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3', - u'keyname': u'', - u'name': u'bcnfli7nn738y1'}, - u'name': u'MySqlDB', - u'password': u'*** SANITIZED ***', - u'username': u''}, - {u'?': - {u'_26411a1861294160833743e45d0eaad9': {u'name': u'Apache Tomcat'}, - u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'type': u'io.murano.apps.apache.Tomcat'}, - u'instance': - {u'?': {u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'type': u'io.murano.resources.LinuxMuranoInstance'}, - u'assignFloatingIp': True, - u'availabilityZone': u'nova', - u'flavor': u'm1.small', - u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3', - u'keyname': u'', - u'name': u'woydqi7nn7ipc2'}, - u'name': u'Tomcat'}, - {u'?': {u'_26411a1861294160833743e45d0eaad9': - {u'name': u'PetClinic'}, - u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'type': u'io.murano.apps.java.PetClinic'}, - u'database': u'769af50c-9629-4694-b623-e9b392941279', - u'dbName': u'pet_db', - u'dbPassword': u'*** SANITIZED ***', - u'dbUser': u'pet_user', - u'name': u'PetClinic', - u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'warLocation': - u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'}]}, - u'environment_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'finished': u'2015-03-24T18:46:56', - u'id': u'4aa60b31d8ce434284e03aa13c6e11e0', - u'result': {u'isException': True, - u'result': - {u'details': u'murano.common.exceptions.TimeoutException:' - ' The Agent does not respondwithin 600 seconds', - u'message': u'[murano.common.exceptions.TimeoutException]' - ': The Agent does not respondwithin 600 seconds'}}, - u'started': u'2015-03-24T18:36:23', - u'state': u'completed_w_errors', - u'updated': u'2015-03-24T18:46:56'})] - -package_response = [ - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.apps.apache.Tomcat'], - u'created': u'2015-03-23T21:28:11', - u'description': u'Apache Tomcat is an open source software ' - 'implementation of the Java Servlet and JavaServer Pages ' - 'technologies.\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.apps.apache.Tomcat', - u'id': u'a7d64980999948dc96401cdce5ae2141', - u'is_public': False, - u'name': u'Apache Tomcat', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'Servlets', u'Server', u'Pages', u'Java'], - u'type': u'Application', - u'updated': u'2015-03-23T21:28:11'}), - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.apps.linux.Git'], - u'created': u'2015-03-23T21:26:56', - u'description': u'Simple Git repo hosted on Linux VM.\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.apps.linux.Git', - u'id': u'3ff58cdfeb27487fb3127fb8fd45109c', - u'is_public': False, - u'name': u'Git', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'Linux', u'connection'], - u'type': u'Application', - u'updated': u'2015-03-23T21:26:56'}), - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.databases.MySql'], - u'created': u'2015-03-23T21:28:58', - u'description': u'MySql is a relational database management system ' - '(RDBMS), and ships with\nno GUI tools to administer MySQL databases ' - 'or manage data contained within\nthe databases.\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.databases.MySql', - u'id': u'884b764c0ce6439d8566b3b2da967687', - u'is_public': False, - u'name': u'MySQL', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'Database', u'MySql', u'SQL', u'RDBMS'], - u'type': u'Application', - u'updated': u'2015-03-23T21:28:58'}), - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.apps.java.PetClinic'], - u'created': u'2015-03-24T18:25:24', - u'description': u'An example of a Java app running on a ' - 'Apache Tomcat Servlet container and using the either Postgre SQL, ' - 'or MySql database\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.apps.java.PetClinic', - u'id': u'9f7c9e2ed8f9462a8f9037032ab64755', - u'is_public': False, - u'name': u'PetClinic', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'Servlets', u'Server', u'Pages', u'Java'], - u'type': u'Application', - u'updated': u'2015-03-24T18:25:24'}), - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.databases.PostgreSql'], - u'created': u'2015-03-23T21:29:10', - u'description': u'PostgreSQL is a powerful, open source ' - 'object-relational database system.\nIt has more than 15 years ' - 'of active development and a proven architecture\nthat has earned ' - 'it a strong reputation for reliability, data integrity,\nand ' - 'correctness.\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.databases.PostgreSql', - u'id': u'4b9c6a24c2e64f928156e0c87324c394', - u'is_public': False, - u'name': u'PostgreSQL', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'Database', u'Postgre', u'SQL', u'RDBMS'], - u'type': u'Application', - u'updated': u'2015-03-23T21:29:10'}), - util.ResponseObj({ - u'author': u'Mirantis, Inc', - u'categories': [], - u'class_definitions': [u'io.murano.databases.SqlDatabase'], - u'created': u'2015-03-24T18:26:32', - u'description': u'This is the interface defining API for different ' - 'SQL - RDBMS databases\n', - u'enabled': True, - u'fully_qualified_name': u'io.murano.databases', - u'id': u'5add5a561da341c4875495c5887957a8', - u'is_public': False, - u'name': u'SQL Library', - u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42', - u'supplier': {}, - u'tags': [u'SQL', u'RDBMS'], - u'type': u'Library', - u'updated': u'2015-03-24T18:26:32'})] - -action_response = 'c79eb72600024fa1995345a2b2eb3acd' - -# Expected datasource table content -expected_states = [ - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'deploy failure'), -] - -expected_environment_parent_types = [ - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Object'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Environment'), -] - -expected_env_properties = [ - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'created', '2015-03-24T18:35:14'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'version', 1), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'status', 'deploy failure'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'name', 'quick-env-2'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'updated', '2015-03-24T18:46:56'), -] - -expected_service_properties = [ - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '10.0.11.3'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '172.24.4.4'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'networks.useFlatNetwork', 'False'), - (u'769af50c-9629-4694-b623-e9b392941279', u'name', 'MySqlDB'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'networks.useEnvironmentNetwork', 'True'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'floatingIpAddress', '172.24.4.4'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbPassword', 'Passw0rd.'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'database', '769af50c-9629-4694-b623-e9b392941279'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'tomcat', 'ea6a7d9b-7799-4d00-9db3-4573cb94daec'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', u'warLocation', - 'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'availabilityZone', 'nova'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'name', 'bcnfli7nn738y1'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbUser', 'pet_user'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'flavor', 'm1.small'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '10.0.11.4'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'name', 'woydqi7nn7ipc2'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', u'name', 'PetClinic'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'assignFloatingIp', 'True'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'assignFloatingIp', 'True'), - (u'769af50c-9629-4694-b623-e9b392941279', u'password', 'Passw0rd.'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'flavor', 'm1.small'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbName', 'pet_db'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'networks.useFlatNetwork', 'False'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'networks.useEnvironmentNetwork', 'True'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'availabilityZone', 'nova'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'floatingIpAddress', '172.24.4.4'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '172.24.4.4'), - (u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'name', 'Tomcat'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'), -] - -expected_package_properties = [ - (u'4b9c6a24c2e64f928156e0c87324c394', u'is_public', 'False'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'connection'), - (u'884b764c0ce6439d8566b3b2da967687', u'created', '2015-03-23T21:28:58'), - (u'884b764c0ce6439d8566b3b2da967687', u'tags', 'SQL'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Servlets'), - (u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Servlets'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'created', '2015-03-23T21:29:10'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'fully_qualified_name', - 'io.murano.apps.java.PetClinic'), - (u'884b764c0ce6439d8566b3b2da967687', u'type', 'Application'), - (u'5add5a561da341c4875495c5887957a8', u'created', '2015-03-24T18:26:32'), - (u'884b764c0ce6439d8566b3b2da967687', u'name', 'MySQL'), - (u'884b764c0ce6439d8566b3b2da967687', u'tags', 'Database'), - (u'5add5a561da341c4875495c5887957a8', u'enabled', 'True'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Pages'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Database'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'type', 'Application'), - (u'5add5a561da341c4875495c5887957a8', u'type', 'Library'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'type', 'Application'), - (u'884b764c0ce6439d8566b3b2da967687', u'tags', 'MySql'), - (u'5add5a561da341c4875495c5887957a8', u'fully_qualified_name', - 'io.murano.databases'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'author', 'Mirantis, Inc'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'is_public', 'False'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'SQL'), - (u'884b764c0ce6439d8566b3b2da967687', u'enabled', 'True'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'updated', '2015-03-23T21:29:10'), - (u'884b764c0ce6439d8566b3b2da967687', u'fully_qualified_name', - 'io.murano.databases.MySql'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'name', 'PetClinic'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'fully_qualified_name', - 'io.murano.databases.PostgreSql'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Java'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Postgre'), - (u'a7d64980999948dc96401cdce5ae2141', u'is_public', 'False'), - (u'a7d64980999948dc96401cdce5ae2141', u'type', 'Application'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'name', 'PostgreSQL'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'Linux'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'author', 'Mirantis, Inc'), - (u'5add5a561da341c4875495c5887957a8', u'is_public', 'False'), - (u'5add5a561da341c4875495c5887957a8', u'tags', 'SQL'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'author', 'Mirantis, Inc'), - (u'5add5a561da341c4875495c5887957a8', u'class_definitions', - 'io.murano.databases.SqlDatabase'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'updated', '2015-03-23T21:26:56'), - (u'5add5a561da341c4875495c5887957a8', u'tags', 'RDBMS'), - (u'a7d64980999948dc96401cdce5ae2141', u'enabled', 'True'), - (u'5add5a561da341c4875495c5887957a8', u'updated', '2015-03-24T18:26:32'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'class_definitions', - 'io.murano.apps.java.PetClinic'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'enabled', 'True'), - (u'a7d64980999948dc96401cdce5ae2141', u'class_definitions', - 'io.murano.apps.apache.Tomcat'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'created', '2015-03-24T18:25:24'), - (u'5add5a561da341c4875495c5887957a8', u'author', 'Mirantis, Inc'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'is_public', 'False'), - (u'884b764c0ce6439d8566b3b2da967687', u'class_definitions', - 'io.murano.databases.MySql'), - (u'884b764c0ce6439d8566b3b2da967687', u'is_public', 'False'), - (u'884b764c0ce6439d8566b3b2da967687', u'tags', 'RDBMS'), - (u'a7d64980999948dc96401cdce5ae2141', u'author', 'Mirantis, Inc'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'name', 'Git'), - (u'a7d64980999948dc96401cdce5ae2141', u'fully_qualified_name', - 'io.murano.apps.apache.Tomcat'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Server'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'RDBMS'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'class_definitions', - 'io.murano.databases.PostgreSql'), - (u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Pages'), - (u'4b9c6a24c2e64f928156e0c87324c394', u'enabled', 'True'), - (u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Server'), - (u'a7d64980999948dc96401cdce5ae2141', u'updated', '2015-03-23T21:28:11'), - (u'884b764c0ce6439d8566b3b2da967687', u'updated', '2015-03-23T21:28:58'), - (u'a7d64980999948dc96401cdce5ae2141', u'name', 'Apache Tomcat'), - (u'884b764c0ce6439d8566b3b2da967687', u'author', 'Mirantis, Inc'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'enabled', 'True'), - (u'a7d64980999948dc96401cdce5ae2141', u'created', '2015-03-23T21:28:11'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'created', '2015-03-23T21:26:56'), - (u'5add5a561da341c4875495c5887957a8', u'name', 'SQL Library'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'type', 'Application'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'fully_qualified_name', - 'io.murano.apps.linux.Git'), - (u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Java'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', u'updated', '2015-03-24T18:25:24'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', u'class_definitions', - 'io.murano.apps.linux.Git'), -] - -expected_service_objects = [ - (u'769af50c-9629-4694-b623-e9b392941279', - u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.databases.MySql'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.java.PetClinic'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - u'769af50c-9629-4694-b623-e9b392941279', - u'io.murano.resources.LinuxMuranoInstance'), - (u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.apache.Tomcat'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'io.murano.resources.LinuxMuranoInstance'), -] - -expected_package_objects = [ - (u'5add5a561da341c4875495c5887957a8', - u'610c6afc1fc54d23a58d316bf76e5f42', u'Library'), - (u'4b9c6a24c2e64f928156e0c87324c394', - u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'), - (u'3ff58cdfeb27487fb3127fb8fd45109c', - u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'), - (u'a7d64980999948dc96401cdce5ae2141', - u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'), - (u'9f7c9e2ed8f9462a8f9037032ab64755', - u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'), - (u'884b764c0ce6439d8566b3b2da967687', - u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'), -] - -expected_service_parent_types = [ - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.resources.Instance'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - 'io.murano.resources.LinuxInstance'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.Object'), - (u'76b9ca88-c668-4e37-a830-5845adc10b0e', - 'io.murano.resources.LinuxMuranoInstance'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - 'io.murano.resources.LinuxInstance'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', - 'io.murano.resources.LinuxMuranoInstance'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.Object'), - (u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.resources.Instance'), -] - -expected_service_relationships = [ - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'769af50c-9629-4694-b623-e9b392941279', u'database'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', 'services'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'tomcat'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'769af50c-9629-4694-b623-e9b392941279', 'services'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'fda74653-8b66-42e2-be16-12ebc87d7570', 'services'), - (u'769af50c-9629-4694-b623-e9b392941279', - u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'instance'), - (u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'instance'), -] - -expected_connected = [ - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'769af50c-9629-4694-b623-e9b392941279'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'), - (u'769af50c-9629-4694-b623-e9b392941279', - u'76b9ca88-c668-4e37-a830-5845adc10b0e'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'fda74653-8b66-42e2-be16-12ebc87d7570'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'769af50c-9629-4694-b623-e9b392941279'), - (u'fda74653-8b66-42e2-be16-12ebc87d7570', - u'76b9ca88-c668-4e37-a830-5845adc10b0e'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'76b9ca88-c668-4e37-a830-5845adc10b0e'), - (u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', - u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'), - (u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'), -] - -expected_deployment_objects = [ - (u'a2be8265b01743c0bdf645772d632bf0', u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', - u'io.murano.resources.NeutronNetwork') -] - -expected_deployment_properties = [ - (u'a2be8265b01743c0bdf645772d632bf0', u'name', 'quick-env-2-network') -] - -expected_deployment_parent_types = [ - (u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.Object'), - (u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.Network'), - (u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.NeutronNetwork') -] diff --git a/congress/tests/datasources/test_neutron_driver.py b/congress/tests/datasources/test_neutron_driver.py deleted file mode 100644 index 49c5dd4e..00000000 --- a/congress/tests/datasources/test_neutron_driver.py +++ /dev/null @@ -1,699 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime - -import mock -from mox3 import mox -import neutronclient.v2_0.client -from oslo_config import cfg -from six.moves import range - -from congress.datalog import compile -from congress.datasources import neutron_driver -from congress import harness -from congress.tests import base -from congress.tests import helper - - -class TestNeutronDriver(base.TestCase): - - def setUp(self): - super(TestNeutronDriver, self).setUp() - self.neutron_client = mock.MagicMock() - self.neutron_client.list_networks.return_value = network_response - self.neutron_client.list_ports.return_value = port_response - self.neutron_client.list_routers.return_value = router_response - self.neutron_client.list_security_groups.return_value = ( - security_group_response) - args = helper.datasource_openstack_args() - self.driver = neutron_driver.NeutronDriver(args=args) - self.driver.neutron = self.neutron_client - - def test_list_networks(self): - """Test conversion of complex network objects to tables.""" - network_list = self.neutron_client.list_networks() - self.driver._translate_networks(network_list) - network_tuples = self.driver.state[self.driver.NETWORKS] - network_subnet_tuples = self.driver.state[ - self.driver.NETWORKS_SUBNETS] - - # size of networks/subnets - self.assertIsNotNone(network_tuples) - self.assertEqual(1, len(network_tuples)) - self.assertEqual(1, len(network_subnet_tuples)) - - # properties of first network - key_to_index = self.driver.get_column_map( - self.driver.NETWORKS) - network_tuple = network_tuples.pop() - subnet_tuple_guid = network_tuple[key_to_index['subnet_group_id']] - name = network_tuple[key_to_index['name']] - status = network_tuple[key_to_index['status']] - provider_physical_network = ( - network_tuple[key_to_index['provider:physical_network']]) - admin_state_up = network_tuple[key_to_index['admin_state_up']] - tenant_id = network_tuple[key_to_index['tenant_id']] - provider_network_type = ( - network_tuple[key_to_index['provider:network_type']]) - router_external = network_tuple[key_to_index['router:external']] - shared = network_tuple[key_to_index['shared']] - id = network_tuple[key_to_index['id']] - provider_segmentation_id = ( - network_tuple[key_to_index['provider:segmentation_id']]) - - # properties of first subnet - network_subnet_tuple = network_subnet_tuples.pop() - guid_key = network_subnet_tuple[0] - guid_value = network_subnet_tuple[1] - - # tests for network/subnet - self.assertEqual('ACTIVE', status) - self.assertIsNotNone(subnet_tuple_guid) - self.assertEqual(guid_key, subnet_tuple_guid) - self.assertEqual('4cef03d0-1d02-40bb-8c99-2f442aac6ab0', - guid_value) - self.assertEqual('test-network', - name) - self.assertEqual('None', provider_physical_network) - self.assertEqual('True', admin_state_up) - self.assertEqual('570fe78a1dc54cffa053bd802984ede2', - tenant_id) - self.assertEqual('gre', provider_network_type) - self.assertEqual('False', router_external) - self.assertEqual('False', shared) - self.assertEqual('240ff9df-df35-43ae-9df5-27fae87f2492', - id) - self.assertEqual(4, provider_segmentation_id) - - def test_list_ports(self): - """Test conversion of complex port objects to tuples.""" - # setup - self.driver._translate_ports(self.neutron_client.list_ports()) - d = self.driver.get_column_map(self.driver.PORTS) - - # number of ports - ports = self.driver.state[self.driver.PORTS] - self.assertIsNotNone(ports) - self.assertEqual(1, len(ports)) - - # simple properties of a port - port = ports.pop() - self.assertEqual('ACTIVE', port[d['status']]) - self.assertEqual('havana', port[d['binding:host_id']]) - self.assertEqual('', port[d['name']]) - self.assertEqual('True', port[d['admin_state_up']]) - self.assertEqual('240ff9df-df35-43ae-9df5-27fae87f2492', - port[d['network_id']]) - self.assertEqual('570fe78a1dc54cffa053bd802984ede2', - port[d['tenant_id']]) - self.assertEqual('ovs', port[d['binding:vif_type']]) - self.assertEqual('network:router_interface', port[d['device_owner']]) - self.assertEqual('fa:16:3e:ab:90:df', port[d['mac_address']]) - self.assertEqual('0a2ce569-85a8-45ec-abb3-0d4b34ff69ba', - port[d['id']]) - self.assertEqual('864e4acf-bf8e-4664-8cf7-ad5daa95681e', - port[d['device_id']]) - - # complex property: allowed_address_pairs - # TODO(thinrichs): add representative allowed_address_pairs - address_pairs = self.driver.state[ - self.driver.PORTS_ADDR_PAIRS] - self.assertEqual(0, len(address_pairs)) - - # complex property: extra_dhcp_opts - # TODO(thinrichs): add representative port_extra_dhcp_opts - dhcp_opts = self.driver.state[ - self.driver.PORTS_EXTRA_DHCP_OPTS] - self.assertEqual(0, len(dhcp_opts)) - - # complex property: binding:capabilities - binding_caps = self.driver.state[ - self.driver.PORTS_BINDING_CAPABILITIES] - cap_id = port[d['binding:capabilities_id']] - self.assertEqual(1, len(binding_caps)) - self.assertEqual((cap_id, 'port_filter', 'True'), binding_caps.pop()) - - # complex property: security_groups - sec_grps = self.driver.state[self.driver.PORTS_SECURITY_GROUPS] - self.assertEqual(2, len(sec_grps)) - security_grp_grp = port[d['security_groups_id']] - security_grp1 = '15ea0516-11ec-46e9-9e8e-7d1b6e3d7523' - security_grp2 = '25ea0516-11ec-46e9-9e8e-7d1b6e3d7523' - security_data = set([(security_grp_grp, security_grp1), - (security_grp_grp, security_grp2)]) - self.assertEqual(security_data, set(sec_grps)) - - # complex property: fixed_ips - # Need to show we have the following - # port(..., , ...) - # fixed_ips_groups(, ) - # fixed_ips_groups(, ) - # fixedips(, "subnet_id", "4cef03d0-1d02-40bb-8c99-2f442aac6ab0") - # fixedips(, "ip_address", "90.0.0.1") - # fixedips(, "subnet_id", "5cef03d0-1d02-40bb-8c99-2f442aac6ab0") - # fixedips(, "ip_address", "100.0.0.1") - # TODO(thinrichs): use functionality of policy-engine - # to make this test simpler to understand/write - fixed_ip_groups = self.driver.state[ - self.driver.PORTS_FIXED_IPS_GROUPS] - fixed_ips = self.driver.state[self.driver.PORTS_FIXED_IPS] - fixed_ip_grp = port[d['fixed_ips']] - # ensure groups of IPs are correct - self.assertEqual(2, len(fixed_ip_groups)) - groups = set([x[0] for x in fixed_ip_groups]) - self.assertEqual(set([fixed_ip_grp]), groups) - # ensure the IDs for fixed_ips are the right ones - fixed_ips_from_grp = [x[1] for x in fixed_ip_groups] - fixed_ips_from_ips = [x[0] for x in fixed_ips] - self.assertEqual(set(fixed_ips_from_grp), set(fixed_ips_from_ips)) - # ensure actual fixed_ips are right - self.assertEqual(4, len(fixed_ips)) - ips = [x for x in fixed_ips if x[1] == 'ip_address'] - subnets = [x for x in fixed_ips if x[1] == 'subnet_id'] - if ips[0][0] == subnets[0][0]: - ip0 = ips[0][2] - subnet0 = subnets[0][2] - ip1 = ips[1][2] - subnet1 = subnets[1][2] - else: - ip0 = ips[0][2] - subnet0 = subnets[1][2] - ip1 = ips[1][2] - subnet1 = subnets[0][2] - if ip0 == "90.0.0.1": - self.assertEqual("4cef03d0-1d02-40bb-8c99-2f442aac6ab0", subnet0) - self.assertEqual("90.0.0.1", ip0) - self.assertEqual("5cef03d0-1d02-40bb-8c99-2f442aac6ab0", subnet1) - self.assertEqual("100.0.0.1", ip1) - else: - self.assertEqual("4cef03d0-1d02-40bb-8c99-2f442aac6ab0", subnet1) - self.assertEqual("90.0.0.1", ip1) - self.assertEqual("5cef03d0-1d02-40bb-8c99-2f442aac6ab0", subnet0) - self.assertEqual("100.0.0.1", ip0) - - def test_list_routers(self): - self.driver._translate_routers(self.neutron_client.list_routers()) - d = self.driver.get_column_map(self.driver.ROUTERS) - - # number of routers - routers = self.driver.state[self.driver.ROUTERS] - self.assertIsNotNone(routers) - self.assertEqual(1, len(routers)) - - # simple properties of a router - router = routers.pop() - self.assertEqual('ACTIVE', router[d['status']]) - self.assertEqual('router1', router[d['name']]) - self.assertEqual('True', router[d['admin_state_up']]) - self.assertEqual('abb53cc6636848218f46d01f22bf1060', - router[d['tenant_id']]) - self.assertEqual('4598c424-d608-4366-9beb-139adbd7cff5', - router[d['id']]) - - # external gateway info - gateway_info = self.driver.state[ - self.driver.ROUTERS_EXTERNAL_GATEWAYS] - gateway_id = router[d['external_gateway_info']] - self.assertEqual(2, len(gateway_info)) - row1 = (gateway_id, 'network_id', - 'a821b8d3-af1f-4d79-9b8e-3da9674338ae') - row2 = (gateway_id, 'enable_snat', 'True') - self.assertEqual(set([row1, row2]), gateway_info) - - def test_list_security_groups(self): - self.driver._translate_security_groups( - self.neutron_client.list_security_groups()) - d = self.driver.get_column_map(self.driver.SECURITY_GROUPS) - - # number of security groups - sec_grps = self.driver.state[self.driver.SECURITY_GROUPS] - self.assertIsNotNone(sec_grps) - self.assertEqual(1, len(sec_grps)) - - # simple properties - sec_grp = sec_grps.pop() - self.assertEqual('abb53cc6636848218f46d01f22bf1060', - sec_grp[d['tenant_id']]) - self.assertEqual('default', sec_grp[d['name']]) - self.assertEqual('default', sec_grp[d['description']]) - self.assertEqual('9f3860a5-87b1-499c-bf93-5ca3ef247517', - sec_grp[d['id']]) - - def test_execute(self): - class NeutronClient(object): - def __init__(self): - self.testkey = None - - def connectNetwork(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - neutron_client = NeutronClient() - self.driver.neutron = neutron_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('connectNetwork', api_args) - - self.assertEqual(expected_ans, neutron_client.testkey) - - -class TestDataSourceDriver(base.TestCase): - - def setUp(self): - """Setup polling tests.""" - super(TestDataSourceDriver, self).setUp() - cfg.CONF.set_override( - 'drivers', - ['congress.datasources.neutron_driver.NeutronDriver']) - - # Create mock of Neutron client so we can control data - mock_factory = mox.Mox() - neutron_client = mock_factory.CreateMock( - neutronclient.v2_0.client.Client) - neutron_client.list_networks().InAnyOrder(1).AndReturn(network1) - neutron_client.list_ports().InAnyOrder(1).AndReturn(port_response) - neutron_client.list_routers().InAnyOrder(1).AndReturn(router_response) - neutron_client.list_security_groups().InAnyOrder(1).AndReturn( - security_group_response) - neutron_client.list_networks().InAnyOrder(2).AndReturn(network2) - neutron_client.list_ports().InAnyOrder(2).AndReturn(port_response) - neutron_client.list_routers().InAnyOrder(2).AndReturn(router_response) - neutron_client.list_security_groups().InAnyOrder(2).AndReturn( - security_group_response) - mock_factory.ReplayAll() - - node = helper.make_dsenode_new_partition('neutron_ds_node') - - engine = harness.create_policy_engine() - node.register_service(engine) - - neutron_args = { - 'name': 'neutron', - 'driver': 'neutron', - 'description': None, - 'type': None, - 'enabled': '1' - } - neutron_args['config'] = helper.datasource_openstack_args() - neutron_args['config']['poll_time'] = 0 - neutron_args['config']['client'] = neutron_client - neutron_ds = node.create_datasource_service(neutron_args) - node.register_service(neutron_ds) - - engine.create_policy('neutron') - engine.set_schema('neutron', neutron_ds.get_schema()) - neutron_ds.neutron = neutron_client - engine.debug_mode() - - # insert rule into policy to make testing easier. - # (Some of the IDs are auto-generated each time we convert) - engine.insert(create_network_group('p')) - - # create some garbage data - args = helper.datasource_openstack_args() - driver = neutron_driver.NeutronDriver(args=args) - network_key_to_index = driver.get_column_map( - neutron_driver.NeutronDriver.NETWORKS) - network_max_index = max(network_key_to_index.values()) - args1 = ['1'] * (network_max_index + 1) - args2 = ['2'] * (network_max_index + 1) - args1 = ",".join(args1) - args2 = ",".join(args2) - fake_networks = [ - 'neutron:networks({})'.format(args1), - 'neutron:networks({})'.format(args2)] - - # answer to query above for network1 - datalog1 = ( - 'p("240ff9df-df35-43ae-9df5-27fae87f2492") ' - 'p("340ff9df-df35-43ae-9df5-27fae87f2492") ' - 'p("440ff9df-df35-43ae-9df5-27fae87f2492")') - - # answer to query above for network2 - datalog2 = ( - 'p("240ff9df-df35-43ae-9df5-27fae87f2492") ' - 'p("640ff9df-df35-43ae-9df5-27fae87f2492") ' - 'p("540ff9df-df35-43ae-9df5-27fae87f2492")') - - # return value - self.info = {} - self.info['node'] = node - self.info['datalog1'] = datalog1 - self.info['datalog2'] = datalog2 - self.info['fake_networks'] = fake_networks - - def test_last_updated(self): - """Test the last_updated timestamping.""" - node = self.info['node'] - neutron = node.service_object('neutron') - - # initial value - last_updated = neutron.get_last_updated_time() - self.assertIsNone(last_updated) - - # first time updated - before_time = datetime.datetime.now() - neutron.poll() - last_updated = neutron.get_last_updated_time() - self.assertLess(before_time, last_updated) - self.assertLess(last_updated, datetime.datetime.now()) - - # second time updated - before_time = datetime.datetime.now() - neutron.poll() - last_updated = neutron.get_last_updated_time() - self.assertLess(before_time, last_updated) - self.assertLess(last_updated, datetime.datetime.now()) - - # TODO(dse2): port using generic test driver instead of Neutron - # def test_subscribe_poll(self): - # """Test subscribing before polling. The common case.""" - # cage = self.info['cage'] - # policy = cage.service_object('policy') - # neutron = cage.service_object('neutron') - # datalog1 = self.info['datalog1'] - # datalog2 = self.info['datalog2'] - - # # subscribe - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_subscribers(neutron, [(policy.name, 'networks')]) - - # # poll 1 - # neutron.poll() - # helper.retry_check_db_equal(policy, 'p(x)', datalog1) - - # # poll 2 - # neutron.poll() - # helper.retry_check_db_equal(policy, 'p(x)', datalog2) - - # TODO(dse2): port using generic test driver instead of Neutron - # def test_policy_initialization(self): - # """Test subscribing before polling. The common case.""" - # cage = self.info['cage'] - # policy = cage.service_object('policy') - # neutron = cage.service_object('neutron') - # datalog1 = self.info['datalog1'] - # fake_networks = self.info['fake_networks'] - - # # add garbage to policy - # for formula in fake_networks: - # policy.insert(formula) - - # # subscribe - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_subscribers(neutron, [(policy.name, 'networks')]) - - # # poll 1 - # neutron.poll() - # helper.retry_check_db_equal(policy, 'p(x)', datalog1) - - # TODO(dse2): port using generic test driver instead of Neutron - # def test_poll_subscribe(self): - # """Test polling before subscribing.""" - # cage = self.info['cage'] - # policy = cage.service_object('policy') - # neutron = cage.service_object('neutron') - # datalog1 = self.info['datalog1'] - # datalog2 = self.info['datalog2'] - # fake_networks = self.info['fake_networks'] - - # # add garbage to policy - # for formula in fake_networks: - # policy.insert(formula) - - # # poll 1 and then subscribe; should still see first result - # neutron.poll() - # helper.retry_check_number_of_updates(neutron, 1) - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_db_equal(policy, 'p(x)', datalog1) - - # # poll 2 - # neutron.poll() - # helper.retry_check_db_equal(policy, 'p(x)', datalog2) - - # TODO(dse2): port using generic test driver instead of Neutron - # def test_double_poll_subscribe(self): - # """Test double polling before subscribing.""" - # cage = self.info['cage'] - # policy = cage.service_object('policy') - # neutron = cage.service_object('neutron') - # datalog2 = self.info['datalog2'] - - # # poll twice and then subscribe: should see 2nd result - # neutron.poll() - # helper.retry_check_number_of_updates(neutron, 1) - # neutron.poll() - # helper.retry_check_number_of_updates(neutron, 2) - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_db_equal(policy, 'p(x)', datalog2) - - # TODO(dse2): port using generic test driver instead of Neutron - # def test_policy_recovery(self): - # """Test policy crashing and recovering (sort of).""" - # cage = self.info['cage'] - # policy = cage.service_object('policy') - # neutron = cage.service_object('neutron') - # datalog1 = self.info['datalog1'] - - # # get initial data - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_subscribers(neutron, [(policy.name, 'networks')]) - # neutron.poll() - # helper.retry_check_db_equal(policy, 'p(x)', datalog1) - - # # clear out policy's neutron:networks data (to simulate crashing) - # policy.initialize_tables(['neutron:networks'], []) - # # subscribe again (without unsubscribing) - # policy.subscribe('neutron', 'networks', callback=policy.receive_data) - # helper.retry_check_db_equal(policy, 'p(x)', datalog1) - - -def create_network_group(tablename, full_neutron_tablename=None): - driver = neutron_driver.NeutronDriver( - args=helper.datasource_openstack_args()) - if full_neutron_tablename is None: - full_neutron_tablename = 'neutron:networks' - network_key_to_index = driver.get_column_map( - neutron_driver.NeutronDriver.NETWORKS) - network_id_index = network_key_to_index['id'] - network_max_index = max(network_key_to_index.values()) - network_args = ['x' + str(i) for i in range(0, network_max_index + 1)] - formula = compile.parse1( - '{}({}) :- {}({})'.format( - tablename, - 'x' + str(network_id_index), - full_neutron_tablename, - ",".join(network_args))) - return formula - - -def create_networkXnetwork_group(tablename): - """Return rule of the form: - - TABLENAME(x,y) :- neutron:network(...,x,...),neutron:network(...,y,...) - """ - driver = neutron_driver.NeutronDriver( - args=helper.datasource_openstack_args()) - network_key_to_index = driver.get_column_map( - neutron_driver.NeutronDriver.NETWORKS) - network_id_index = network_key_to_index['id'] - network_max_index = max(network_key_to_index.values()) - net1_args = ['x' + str(i) for i in range(0, network_max_index + 1)] - net2_args = ['y' + str(i) for i in range(0, network_max_index + 1)] - formula = compile.parse1( - '{}({},{}) :- neutron:networks({}), neutron2:networks({})'.format( - tablename, - 'x' + str(network_id_index), - 'y' + str(network_id_index), - ",".join(net1_args), - ",".join(net2_args))) - return formula - -# Only diffs between network1 and network2 are the IDs -network1 = {'networks': [ - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '240ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}, - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '340ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}, - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '440ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}]} - -network2 = {'networks': [ - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '240ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}, - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '540ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}, - {'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '640ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}]} - - -# Sample responses from neutron-client, after parsing -network_response = { - 'networks': - [{'status': 'ACTIVE', - 'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'], - 'name': 'test-network', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': '570fe78a1dc54cffa053bd802984ede2', - 'provider:network_type': 'gre', - 'router:external': False, - 'shared': False, - 'id': '240ff9df-df35-43ae-9df5-27fae87f2492', - 'provider:segmentation_id': 4}]} - -port_response = { - "ports": - [{"status": "ACTIVE", - "binding:host_id": "havana", - "name": "", - "allowed_address_pairs": [], - "admin_state_up": True, - "network_id": "240ff9df-df35-43ae-9df5-27fae87f2492", - "tenant_id": "570fe78a1dc54cffa053bd802984ede2", - "extra_dhcp_opts": [], - "binding:vif_type": "ovs", - "device_owner": "network:router_interface", - "binding:capabilities": {"port_filter": True}, - "mac_address": "fa:16:3e:ab:90:df", - "fixed_ips": [ - {"subnet_id": "4cef03d0-1d02-40bb-8c99-2f442aac6ab0", - "ip_address": "90.0.0.1"}, - {"subnet_id": "5cef03d0-1d02-40bb-8c99-2f442aac6ab0", - "ip_address": "100.0.0.1"}], - "id": "0a2ce569-85a8-45ec-abb3-0d4b34ff69ba", - "security_groups": ['15ea0516-11ec-46e9-9e8e-7d1b6e3d7523', - '25ea0516-11ec-46e9-9e8e-7d1b6e3d7523'], - "device_id": "864e4acf-bf8e-4664-8cf7-ad5daa95681e"}]} - -router_response = { - 'routers': - [{u'status': u'ACTIVE', - u'external_gateway_info': - {u'network_id': u'a821b8d3-af1f-4d79-9b8e-3da9674338ae', - u'enable_snat': True}, - u'name': u'router1', - u'admin_state_up': True, - u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'routes': [], - u'id': u'4598c424-d608-4366-9beb-139adbd7cff5'}]} - -security_group_response = { - 'security_groups': - [{u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'name': u'default', - u'description': u'default', - u'security_group_rules': [ - {u'remote_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'direction': u'ingress', - u'remote_ip_prefix': None, - u'protocol': None, - u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'port_range_max': None, - u'security_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'port_range_min': None, - u'ethertype': u'IPv6', - u'id': u'15ea0516-11ec-46e9-9e8e-7d1b6e3d7523'}, - {u'remote_group_id': None, u'direction': u'egress', - u'remote_ip_prefix': None, - u'protocol': None, - u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'port_range_max': None, - u'security_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'port_range_min': None, - u'ethertype': u'IPv6', - u'id': u'5a2a86c5-c63c-4f17-b625-f9cd809c8331'}, - {u'remote_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'direction': u'ingress', - u'remote_ip_prefix': None, - u'protocol': None, - u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'port_range_max': None, - u'security_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'port_range_min': None, - u'ethertype': u'IPv4', - u'id': u'6499e807-af24-4486-9fa4-4897da2eb1dd'}, - {u'remote_group_id': None, - u'direction': u'egress', - u'remote_ip_prefix': None, - u'protocol': None, - u'tenant_id': u'abb53cc6636848218f46d01f22bf1060', - u'port_range_max': None, - u'security_group_id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517', - u'port_range_min': None, - u'ethertype': u'IPv4', - u'id': u'bb03ea93-b984-48de-8752-d816f1c4fbfa'}], - u'id': u'9f3860a5-87b1-499c-bf93-5ca3ef247517'}]} diff --git a/congress/tests/datasources/test_neutronv2_driver.py b/congress/tests/datasources/test_neutronv2_driver.py deleted file mode 100644 index 5db231e0..00000000 --- a/congress/tests/datasources/test_neutronv2_driver.py +++ /dev/null @@ -1,494 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import neutronv2_driver -from congress.tests import base -from congress.tests import helper - - -class TestNeutronV2Driver(base.TestCase): - - def setUp(self): - super(TestNeutronV2Driver, self).setUp() - self.neutron_client_p = mock.patch( - "neutronclient.v2_0.client.Client") - self.neutron_client_p.start() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = neutronv2_driver.NeutronV2Driver(args=args) - - self.mock_networks = {'networks': [ - {u'admin_state_up': True, - u'id': u'63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - u'name': u'private', - u'router:external': False, - u'shared': False, - u'status': u'ACTIVE', - u'subnets': [u'3c0eb3a3-4d16-4b1b-b327-44417182d0bb'], - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'admin_state_up': True, - u'id': u'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', - u'name': u'public', - u'router:external': True, - u'shared': False, - u'status': u'ACTIVE', - u'subnets': [u'10d20df9-e8ba-4756-ba30-d573ceb2e99a'], - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}]} - - self.mock_floatingips = {'floatingips': [ - {u"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", - u"tenant_id": "4969c491a3c74ee4af974e6d800c62de", - u"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", - u"fixed_ip_address": "10.0.0.3", - u"floating_ip_address": "172.24.4.228", - u"port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab", - u"id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7", - u"status": "ACTIVE"}, - {u"router_id": None, - u"tenant_id": "4969c491a3c74ee4af974e6d800c62de", - u"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", - u"fixed_ip_address": None, - u"floating_ip_address": "172.24.4.227", - u"port_id": None, - u"id": "61cea855-49cb-4846-997d-801b70c71bdd", - u"status": "DOWN"}]} - - self.mock_ports = {'ports': [ - {u'admin_state_up': True, - u'allowed_address_pairs': [], - u'binding:host_id': None, - u'binding:vif_details': {u'port_filter': True}, - u'binding:vif_type': u'ovs', - u'binding:vnic_type': u'normal', - u'device_id': u'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - u'device_owner': u'network:router_gateway', - u'fixed_ips': [ - {u'ip_address': u'1.1.1.2', - u'subnet_id': u'10d20df9-e8ba-4756-ba30-d573ceb2e99a'}], - u'id': u'04627c85-3553-436c-a7c5-0a64f5b87bb9', - u'mac_address': u'fa:16:3e:f3:19:e5', - u'name': u'', - u'network_id': u'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', - u'port_security_enabled': False, - u'security_groups': [], - u'status': u'DOWN', - u'tenant_id': u''}, - {u'admin_state_up': True, - u'allowed_address_pairs': [], - u'binding:host_id': None, - u'binding:vif_details': {u'port_filter': True}, - u'binding:vif_type': u'ovs', - u'binding:vnic_type': u'normal', - u'device_id': u'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - u'device_owner': u'network:router_interface', - u'fixed_ips': [ - {u'ip_address': u'169.254.169.253', - u'subnet_id': u'aa9ad4f7-baf0-4a41-85c3-1cc8a3066db6'}], - u'id': u'87f8933a-9582-48d8-ad16-9abf6e545002', - u'mac_address': u'fa:16:3e:b7:78:e8', - u'name': u'', - u'network_id': u'6743ff85-2cfd-48a7-9d3f-472cd418783e', - u'port_security_enabled': False, - u'security_groups': [], - u'status': u'DOWN', - u'tenant_id': u''}, - {u'admin_state_up': True, - u'allowed_address_pairs': [], - u'binding:host_id': None, - u'binding:vif_details': {u'port_filter': True}, - u'binding:vif_type': u'ovs', - u'binding:vnic_type': u'normal', - u'device_id': u'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - u'device_owner': u'network:router_interface', - u'fixed_ips': [ - {u'ip_address': u'10.0.0.1', - u'subnet_id': u'3c0eb3a3-4d16-4b1b-b327-44417182d0bb'}], - u'id': u'c58c3246-6c2e-490a-b4d9-3b8d5191b465', - u'mac_address': u'fa:16:3e:08:31:6e', - u'name': u'', - u'network_id': u'63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - u'port_security_enabled': False, - u'security_groups': [], - u'status': u'DOWN', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'admin_state_up': True, - u'allowed_address_pairs': [], - u'binding:host_id': None, - u'binding:vif_details': {u'port_filter': True}, - u'binding:vif_type': u'ovs', - u'binding:vnic_type': u'normal', - u'device_id': u'', - u'device_owner': u'', - u'fixed_ips': [ - {u'ip_address': u'10.0.0.2', - u'subnet_id': u'3c0eb3a3-4d16-4b1b-b327-44417182d0bb'}], - u'id': u'eb50003b-a081-4533-92aa-1cbd97f526a8', - u'mac_address': u'fa:16:3e:af:56:fa', - u'name': u'', - u'network_id': u'63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - u'port_security_enabled': True, - u'security_groups': [u'e0239062-4243-4798-865f-7055f03786d6'], - u'status': u'DOWN', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}]} - - self.mock_subnets = {'subnets': [ - {u'allocation_pools': [{u'end': u'1.1.1.254', - u'start': u'1.1.1.2'}], - u'cidr': u'1.1.1.0/24', - u'dns_nameservers': [], - u'enable_dhcp': True, - u'gateway_ip': u'1.1.1.1', - u'host_routes': [], - u'id': u'10d20df9-e8ba-4756-ba30-d573ceb2e99a', - u'ip_version': 4, - u'ipv6_address_mode': None, - u'ipv6_ra_mode': None, - u'name': u'', - u'network_id': u'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'allocation_pools': [{u'end': u'10.0.0.254', - u'start': u'10.0.0.2'}], - u'cidr': u'10.0.0.0/24', - u'dns_nameservers': [u'8.8.8.8'], - u'enable_dhcp': True, - u'gateway_ip': u'10.0.0.1', - u'host_routes': [{u'destination': u'10.10.0.2/32', - u'nexthop': u'10.0.0.1'}], - u'id': u'3c0eb3a3-4d16-4b1b-b327-44417182d0bb', - u'ip_version': 4, - u'ipv6_address_mode': None, - u'ipv6_ra_mode': None, - u'name': u'private-subnet', - u'network_id': u'63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'allocation_pools': [{u'end': u'169.254.169.254', - u'start': u'169.254.169.254'}], - u'cidr': u'169.254.169.252/30', - u'dns_nameservers': [], - u'enable_dhcp': True, - u'gateway_ip': u'169.254.169.253', - u'host_routes': [], - u'id': u'aa9ad4f7-baf0-4a41-85c3-1cc8a3066db6', - u'ip_version': 4, - u'ipv6_address_mode': None, - u'ipv6_ra_mode': None, - u'name': u'meta-f42dc4f1-f371-48cc-95be-cf1b97112ab8', - u'network_id': u'6743ff85-2cfd-48a7-9d3f-472cd418783e', - u'tenant_id': u''}]} - - self.mock_routers = {'routers': [ - {u'admin_state_up': True, - u'distributed': False, - u'external_gateway_info': { - u'enable_snat': True, - u'external_fixed_ips': [ - {u'ip_address': u'1.1.1.2', - u'subnet_id': u'10d20df9-e8ba-4756-ba30-d573ceb2e99a'}], - u'network_id': u'ecdea1af-7197-43c8-b3b0-34d90f72a2a8'}, - u'id': u'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - u'name': u'myrouter', - u'routes': [], - u'status': u'DOWN', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}]} - - self.mock_security_groups = {'security_groups': [ - {u'description': u'Default security group', - u'id': u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'name': u'default', - u'security_group_rules': [ - {u'direction': u'egress', - u'ethertype': u'IPv4', - u'id': u'1d943e83-e4e6-472a-9655-f74eb22f3668', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': None, - u'remote_ip_prefix': None, - u'security_group_id': - u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'tenant_id': u''}, - {u'direction': u'ingress', - u'ethertype': u'IPv4', - u'id': u'30be5ee1-5b0a-4929-aca5-0c25f1c6b733', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'remote_ip_prefix': None, - u'security_group_id': - u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'tenant_id': u''}, - {u'direction': u'ingress', - u'ethertype': u'IPv6', - u'id': u'639995b8-c3ac-44a3-a4f3-c74f9172ad54', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'remote_ip_prefix': None, - u'security_group_id': - u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'tenant_id': u''}, - {u'direction': u'egress', - u'ethertype': u'IPv6', - u'id': u'ed7fd9f6-e390-448a-9f5f-8dd4659282f7', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': None, - u'remote_ip_prefix': None, - u'security_group_id': - u'a268fc32-1a59-4154-9a7c-f453ef92560c', - u'tenant_id': u''}], - u'tenant_id': u''}, - {u'description': u'Default security group', - u'id': u'e0239062-4243-4798-865f-7055f03786d6', - u'name': u'default', - u'security_group_rules': [ - {u'direction': u'ingress', - u'ethertype': u'IPv6', - u'id': u'8a81fecc-ecc7-48ca-bccc-195799667e23', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': u'e0239062-4243-4798-865f-7055f03786d6', - u'remote_ip_prefix': None, - u'security_group_id': - u'e0239062-4243-4798-865f-7055f03786d6', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'direction': u'ingress', - u'ethertype': u'IPv4', - u'id': u'8f4d9e99-1fe8-4816-9f07-c4ecddea9427', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': u'e0239062-4243-4798-865f-7055f03786d6', - u'remote_ip_prefix': None, - u'security_group_id': - u'e0239062-4243-4798-865f-7055f03786d6', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'direction': u'egress', - u'ethertype': u'IPv4', - u'id': u'e70cf243-3389-4f80-82dc-92a3ec1f2d2a', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': None, - u'remote_ip_prefix': None, - u'security_group_id': - u'e0239062-4243-4798-865f-7055f03786d6', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}, - {u'direction': u'egress', - u'ethertype': u'IPv6', - u'id': u'eca1df0f-b222-4208-8f96-8a8024fd6834', - u'port_range_max': None, - u'port_range_min': None, - u'protocol': None, - u'remote_group_id': None, - u'remote_ip_prefix': None, - u'security_group_id': - u'e0239062-4243-4798-865f-7055f03786d6', - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}], - u'tenant_id': u'feee0a965cc34274917fb753623dd57d'}]} - - self.expected_state = { - 'subnets': set([ - ('3c0eb3a3-4d16-4b1b-b327-44417182d0bb', - 'feee0a965cc34274917fb753623dd57d', 'private-subnet', - '63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', 4, '10.0.0.0/24', - '10.0.0.1', 'True', 'None', 'None'), - ('aa9ad4f7-baf0-4a41-85c3-1cc8a3066db6', '', - 'meta-f42dc4f1-f371-48cc-95be-cf1b97112ab8', - '6743ff85-2cfd-48a7-9d3f-472cd418783e', 4, - '169.254.169.252/30', - '169.254.169.253', 'True', 'None', 'None'), - ('10d20df9-e8ba-4756-ba30-d573ceb2e99a', - 'feee0a965cc34274917fb753623dd57d', '', - 'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', 4, '1.1.1.0/24', - '1.1.1.1', 'True', 'None', 'None')]), - 'floating_ips': set([ - ("2f245a7b-796b-4f26-9cf9-9e82d248fda7", - "d23abc8d-2991-4a55-ba98-2aaea84cc72f", - "4969c491a3c74ee4af974e6d800c62de", - "376da547-b977-4cfe-9cba-275c80debf57", "10.0.0.3", - "172.24.4.228", "ce705c24-c1ef-408a-bda3-7bbd946164ab", - "ACTIVE"), - ("61cea855-49cb-4846-997d-801b70c71bdd", 'None', - "4969c491a3c74ee4af974e6d800c62de", - "376da547-b977-4cfe-9cba-275c80debf57", 'None', - "172.24.4.227", 'None', "DOWN")]), - 'routers': - set([('f42dc4f1-f371-48cc-95be-cf1b97112ab8', - 'feee0a965cc34274917fb753623dd57d', 'DOWN', 'True', - 'myrouter', 'False')]), - 'dns_nameservers': - set([('3c0eb3a3-4d16-4b1b-b327-44417182d0bb', '8.8.8.8')]), - 'security_group_rules': - set([('e0239062-4243-4798-865f-7055f03786d6', - 'e70cf243-3389-4f80-82dc-92a3ec1f2d2a', - 'feee0a965cc34274917fb753623dd57d', 'None', 'egress', - 'IPv4', 'None', 'None', 'None', 'None'), - ('a268fc32-1a59-4154-9a7c-f453ef92560c', - 'ed7fd9f6-e390-448a-9f5f-8dd4659282f7', '', 'None', - 'egress', 'IPv6', 'None', 'None', 'None', 'None'), - ('a268fc32-1a59-4154-9a7c-f453ef92560c', - '1d943e83-e4e6-472a-9655-f74eb22f3668', '', 'None', - 'egress', 'IPv4', 'None', 'None', 'None', 'None'), - ('a268fc32-1a59-4154-9a7c-f453ef92560c', - '30be5ee1-5b0a-4929-aca5-0c25f1c6b733', '', - 'a268fc32-1a59-4154-9a7c-f453ef92560c', 'ingress', - 'IPv4', 'None', 'None', 'None', 'None'), - ('e0239062-4243-4798-865f-7055f03786d6', - '8a81fecc-ecc7-48ca-bccc-195799667e23', - 'feee0a965cc34274917fb753623dd57d', - 'e0239062-4243-4798-865f-7055f03786d6', 'ingress', - 'IPv6', 'None', 'None', 'None', 'None'), - ('a268fc32-1a59-4154-9a7c-f453ef92560c', - '639995b8-c3ac-44a3-a4f3-c74f9172ad54', '', - 'a268fc32-1a59-4154-9a7c-f453ef92560c', 'ingress', - 'IPv6', 'None', 'None', 'None', 'None'), - ('e0239062-4243-4798-865f-7055f03786d6', - '8f4d9e99-1fe8-4816-9f07-c4ecddea9427', - 'feee0a965cc34274917fb753623dd57d', - 'e0239062-4243-4798-865f-7055f03786d6', - 'ingress', 'IPv4', 'None', 'None', 'None', 'None'), - ('e0239062-4243-4798-865f-7055f03786d6', - 'eca1df0f-b222-4208-8f96-8a8024fd6834', - 'feee0a965cc34274917fb753623dd57d', 'None', 'egress', - 'IPv6', 'None', 'None', 'None', 'None')]), - 'ports': - set([('c58c3246-6c2e-490a-b4d9-3b8d5191b465', - 'feee0a965cc34274917fb753623dd57d', '', - '63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - 'fa:16:3e:08:31:6e', 'True', 'DOWN', - 'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - 'network:router_interface'), - ('87f8933a-9582-48d8-ad16-9abf6e545002', '', '', - '6743ff85-2cfd-48a7-9d3f-472cd418783e', - 'fa:16:3e:b7:78:e8', 'True', 'DOWN', - 'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - 'network:router_interface'), - ('eb50003b-a081-4533-92aa-1cbd97f526a8', - 'feee0a965cc34274917fb753623dd57d', '', - '63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - 'fa:16:3e:af:56:fa', 'True', 'DOWN', '', ''), - ('04627c85-3553-436c-a7c5-0a64f5b87bb9', '', '', - 'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', - 'fa:16:3e:f3:19:e5', 'True', 'DOWN', - 'f42dc4f1-f371-48cc-95be-cf1b97112ab8', - 'network:router_gateway')]), - 'allocation_pools': - set([('10d20df9-e8ba-4756-ba30-d573ceb2e99a', '1.1.1.2', - '1.1.1.254'), - ('3c0eb3a3-4d16-4b1b-b327-44417182d0bb', '10.0.0.2', - '10.0.0.254'), - ('aa9ad4f7-baf0-4a41-85c3-1cc8a3066db6', - '169.254.169.254', '169.254.169.254')]), - 'host_routes': - set([('3c0eb3a3-4d16-4b1b-b327-44417182d0bb', - '10.10.0.2/32', '10.0.0.1')]), - 'security_group_port_bindings': - set([('eb50003b-a081-4533-92aa-1cbd97f526a8', - 'e0239062-4243-4798-865f-7055f03786d6')]), - 'external_gateway_infos': - set([('f42dc4f1-f371-48cc-95be-cf1b97112ab8', - 'ecdea1af-7197-43c8-b3b0-34d90f72a2a8', 'True')]), - 'fixed_ips': - set([('c58c3246-6c2e-490a-b4d9-3b8d5191b465', '10.0.0.1', - '3c0eb3a3-4d16-4b1b-b327-44417182d0bb'), - ('eb50003b-a081-4533-92aa-1cbd97f526a8', '10.0.0.2', - '3c0eb3a3-4d16-4b1b-b327-44417182d0bb'), - ('87f8933a-9582-48d8-ad16-9abf6e545002', - '169.254.169.253', - 'aa9ad4f7-baf0-4a41-85c3-1cc8a3066db6'), - ('04627c85-3553-436c-a7c5-0a64f5b87bb9', '1.1.1.2', - '10d20df9-e8ba-4756-ba30-d573ceb2e99a')]), - 'networks': - set([('ecdea1af-7197-43c8-b3b0-34d90f72a2a8', - 'feee0a965cc34274917fb753623dd57d', 'public', - 'ACTIVE', 'True', 'False'), - ('63ce8fbb-12e9-4ecd-9b56-1bbf8b51217d', - 'feee0a965cc34274917fb753623dd57d', 'private', - 'ACTIVE', 'True', 'False')]), - 'security_groups': - set([('e0239062-4243-4798-865f-7055f03786d6', - 'feee0a965cc34274917fb753623dd57d', 'default', - 'Default security group'), - ('a268fc32-1a59-4154-9a7c-f453ef92560c', '', - 'default', 'Default security group')]), - 'external_fixed_ips': - set([('f42dc4f1-f371-48cc-95be-cf1b97112ab8', - '10d20df9-e8ba-4756-ba30-d573ceb2e99a', '1.1.1.2')])} - - def test_update_from_datasource(self): - with base.nested( - mock.patch.object(self.driver.neutron, - "list_networks", - return_value=self.mock_networks), - mock.patch.object(self.driver.neutron, - "list_ports", - return_value=self.mock_ports), - mock.patch.object(self.driver.neutron, - "list_subnets", - return_value=self.mock_subnets), - mock.patch.object(self.driver.neutron, - "list_routers", - return_value=self.mock_routers), - mock.patch.object(self.driver.neutron, - "list_security_groups", - return_value=self.mock_security_groups), - mock.patch.object(self.driver.neutron, - "list_floatingips", - return_value=self.mock_floatingips), - ) as (list_networks, list_ports, list_subnets, list_routers, - list_security_groups, list_floatingips): - self.driver.update_from_datasource() - self.assertEqual(self.expected_state, self.driver.state) - - def test_execute(self): - class NeutronClient(object): - def __init__(self): - self.testkey = None - - def connectNetwork(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - neutron_client = NeutronClient() - self.driver.neutron = neutron_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('connectNetwork', api_args) - - self.assertEqual(expected_ans, neutron_client.testkey) - - def test_update_resource_attrs(self): - args = {'positional': ['port', '1', 'key1', 'val1']} - action_args = {'named': {'port': '1', - 'body': {'port': {'key1': 'val1'}}}} - with mock.patch.object(self.driver, '_execute_api') as mock_ea: - self.driver.update_resource_attrs(args) - mock_ea.assert_called_with(self.driver.neutron, 'update_port', - action_args) diff --git a/congress/tests/datasources/test_nova_driver.py b/congress/tests/datasources/test_nova_driver.py deleted file mode 100644 index e5092e74..00000000 --- a/congress/tests/datasources/test_nova_driver.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -import novaclient - -from congress.datasources import nova_driver -from congress import exception -from congress.tests import base -from congress.tests.datasources import fakes -from congress.tests import helper - - -class TestNovaDriver(base.TestCase): - - def setUp(self): - super(TestNovaDriver, self).setUp() - nova_client = mock.MagicMock() - self.nova = fakes.NovaFakeClient() - with mock.patch.object(novaclient.client.Client, '__init__', - return_value=nova_client): - self.driver = nova_driver.NovaDriver( - name='nova', - args=helper.datasource_openstack_args()) - - def test_driver_called(self): - self.assertIsNotNone(self.driver.nova_client) - - def test_servers(self): - servers_raw = self.nova.servers.list(detailed=True) - self.driver._translate_servers(servers_raw) - server_tuples = self.driver.state[self.driver.SERVERS] - - self.assertEqual(3, len(server_tuples)) - # tuple = (s.id, s.name, s.hostId, s.status, s.tenant_id, - # s.user_id, image, flavor) - for t in server_tuples: - id = t[0] - name = t[1] - host_id = t[2] - status = t[3] - tenant_id = t[4] - user_id = t[5] - image_id = t[6] - flavor_id = t[7] - zone = t[8] - host_name = t[9] - self.assertIn(id, [1234, 5678, 9012]) - # see congress.datasources.tests.unit.fakes for actual values - if id == 1234: - self.assertEqual("sample-server", name) - self.assertEqual("e4d909c290d0fb1ca068ffaddf22cbd0", host_id) - self.assertEqual("BUILD", status) - self.assertEqual("33ea0494-2bdf-4382-a445-9068997430b9", - user_id) - self.assertEqual("50e14867-7c64-4ec9-be8d-ed2470ca1d24", - tenant_id) - self.assertEqual(2, image_id) - self.assertEqual(1, flavor_id) - self.assertEqual('default', zone) - self.assertEqual('host1', host_name) - - elif id == 5678: - self.assertEqual("sample-server2", name) - self.assertEqual("9e107d9d372bb6826bd81d3542a419d6", host_id) - self.assertEqual("ACTIVE", status) - self.assertEqual("33ea0494-2bdf-4382-a445-9068997430b9", - user_id) - self.assertEqual("50e14867-7c64-4ec9-be8d-ed2470ca1d24", - tenant_id) - self.assertEqual(2, image_id) - self.assertEqual(1, flavor_id) - self.assertEqual('None', zone) - self.assertEqual('None', host_name) - - elif id == 9012: - self.assertEqual("sample-server3", name) - self.assertEqual("9e107d9d372bb6826bd81d3542a419d6", host_id) - self.assertEqual("ACTIVE", status) - self.assertEqual("33ea0494-2bdf-4382-a445-9068997430b9", - user_id) - self.assertEqual("50e14867-7c64-4ec9-be8d-ed2470ca1d24", - tenant_id) - self.assertEqual(2, image_id) - self.assertEqual(1, flavor_id) - self.assertEqual('foo', zone) - self.assertEqual('host2', host_name) - - def test_flavors(self): - flavor_raw = self.nova.flavors.list(detailed=True) - self.driver._translate_flavors(flavor_raw) - - flavor_tuples = self.driver.state[self.driver.FLAVORS] - - self.assertEqual(4, len(flavor_tuples)) - # "id", "name", "vcpus", "ram", "disk", "ephemeral", - # "rxtx_factor") - for f in flavor_tuples: - id = f[0] - name = f[1] - vcpus = f[2] - ram = f[3] - disk = f[4] - ephemeral = f[5] - rxtx_factor = f[6] - - self.assertIn(id, [1, 2, 3, 4]) - - # {'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10, - # 'vcpus' : 1, 'OS-FLV-EXT-DATA:ephemeral': 10, - # 'os-flavor-access:is_public': True, 'rxtx_factor' : 1.0, - # 'links': {}}, - if id == 1: - self.assertEqual('256 MB Server', name) - self.assertEqual(256, ram) - self.assertEqual(10, disk) - self.assertEqual(1, vcpus) - self.assertEqual(10, ephemeral) - self.assertEqual(1.0, rxtx_factor) - # {'id': 2, 'name': '512 MB Server', 'ram': 512, 'disk': 20, - # 'vcpus' :2, 'OS-FLV-EXT-DATA:ephemeral': 20, - # 'os-flavor-access:is_public': False, 'rxtx_factor' : 1.0, - # 'links': {}}, - elif id == 2: - self.assertEqual('512 MB Server', name) - self.assertEqual(512, ram) - self.assertEqual(20, disk) - self.assertEqual(2, vcpus) - self.assertEqual(20, ephemeral) - self.assertEqual(1.0, rxtx_factor) - # {'id': 3, 'name': '128 MB Server', 'ram': 128, 'disk': 0, - # 'vcpus' : 4, 'OS-FLV-EXT-DATA:ephemeral': 0, - # 'os-flavor-access:is_public': True, 'rxtx_factor' : 3.0, - # 'links': {}} - elif id == 3: - self.assertEqual('128 MB Server', name) - self.assertEqual(128, ram) - self.assertEqual(0, disk) - self.assertEqual(4, vcpus) - self.assertEqual(0, ephemeral) - self.assertEqual(3.0, rxtx_factor) - # {'id': 4, 'name': '1024 MB Server', 'ram': 1024, 'disk': 10, - # 'vcpus' : 3, 'OS-FLV-EXT-DATA:ephemeral': 10, - # 'os-flavor-access:is_public': True, 'rxtx_factor' : 2.0, - # 'links': {}}, - elif id == 4: - self.assertEqual('1024 MB Server', name) - self.assertEqual(1024, ram) - self.assertEqual(10, disk) - self.assertEqual(3, vcpus) - self.assertEqual(10, ephemeral) - self.assertEqual(2.0, rxtx_factor) - self.assertEqual('1024 MB Server', name) - - def test_hosts(self): - host_list = self.nova.hosts.list() - self.driver._translate_hosts(host_list) - host_tuples = self.driver.state[self.driver.HOSTS] - self.assertEqual(2, len(host_tuples)) - # {'hosts': - # [{'host_name': 'host1', - # 'service': 'nova-compute', - # 'zone': zone}, - # {'host_name': 'host2', - # 'service': 'nova-cert', - # 'zone': zone}]} - for host in host_tuples: - host_name = host[0] - service = host[1] - zone = host[2] - - if host_name == 'host1': - self.assertEqual('nova-compute', service) - self.assertEqual('nova1', str(zone)) - elif host_name == 'host2': - self.assertEqual('nova-cert', service) - self.assertEqual('nova1', str(zone)) - - def test_services(self): - service_list = self.nova.services.list() - self.driver._translate_services(service_list) - expected_ret = { - 1: [1, 'nova-compute', 'nova', 'nova1', 'enabled', 'up', - '2015-07-28T08:28:37.000000', 'None'], - 2: [2, 'nova-schedule', 'nova', 'nova1', 'disabled', 'up', - '2015-07-28T08:28:38.000000', 'daily maintenance'] - } - service_tuples = self.driver.state[self.driver.SERVICES] - - self.assertEqual(2, len(service_tuples)) - - for s in service_tuples: - map(self.assertEqual, expected_ret[s[0]], s) - - def test_availability_zones(self): - az_list = self.nova.availability_zones.list() - self.driver._translate_availability_zones(az_list) - expected_ret = { - 'AZ1': ['AZ1', 'available'], - 'AZ2': ['AZ2', 'not available'] - } - az_tuples = self.driver.state[self.driver.AVAILABILITY_ZONES] - - self.assertEqual(2, len(az_tuples)) - - for az in az_tuples: - map(self.assertEqual, expected_ret[az[0]], az) - - # TODO(dse2): port or not. Unclear why we're doing this with Nova. - # def test_communication(self): - # """Test for communication. - - # Test the module's ability to be loaded into the DSE - # by checking its ability to communicate on the message bus. - # """ - # cage = d6cage.d6Cage() - - # # Create modules. - # # Turn off polling so we don't need to deal with real data. - # args = helper.datasource_openstack_args() - # args['poll_time'] = 0 - # cage.loadModule("NovaDriver", - # helper.data_module_path("nova_driver.py")) - # cage.loadModule("PolicyDriver", helper.policy_module_path()) - # cage.createservice(name="policy", moduleName="PolicyDriver", - # args={'d6cage': cage, - # 'rootdir': helper.data_module_path(''), - # 'log_actions_only': True}) - # cage.createservice(name="nova", moduleName="NovaDriver", args=args) - - # # Check that data gets sent from nova to policy as expected - # nova = cage.service_object('nova') - # policy = cage.service_object('policy') - # policy.debug_mode() - # policy.create_policy('nova') - # policy.set_schema('nova', compile.Schema({'server': (1,)})) - # policy.subscribe('nova', 'server', - # callback=policy.receive_data) - - # # publishing is slightly convoluted b/c deltas are computed - # # automatically. (Not just convenient--useful so that DSE - # # properly handles the initial state problem.) - # # Need to set nova.state and nova.prior_state and then publish - # # anything. - - # # publish server(1), server(2), server(3) - # helper.retry_check_subscribers(nova, [(policy.name, 'server')]) - # nova.prior_state = {} - # nova.state['server'] = set([(1,), (2,), (3,)]) - # nova.publish('server', None) - # helper.retry_check_db_equal( - # policy, 'nova:server(x)', - # 'nova:server(1) nova:server(2) nova:server(3)') - - # # publish server(1), server(4), server(5) - # nova.prior_state['server'] = nova.state['server'] - # nova.state['server'] = set([(1,), (4,), (5,)]) - # nova.publish('server', None) - # helper.retry_check_db_equal( - # policy, 'nova:server(x)', - # 'nova:server(1) nova:server(4) nova:server(5)') - - # TODO(thinrichs): test that Nova's polling functionality - # works properly. Or perhaps could bundle this into the - # tests above if we check self.state results. - # See Neutron's test_polling - - def test_execute(self): - class NovaClient(object): - def __init__(self): - self.testkey = None - - def connectNetwork(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - nova_client = NovaClient() - self.driver.nova_client = nova_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('connectNetwork', api_args) - - self.assertEqual(expected_ans, nova_client.testkey) - - def test_execute_servers_set_meta(self): - args = {'positional': ['1', 'meta-key1', 'meta-value1']} - action_args = {'named': {'server': '1', - 'metadata': {'meta-key1': 'meta-value1'}}} - with mock.patch.object(self.driver, '_execute_api') as mock_ea: - self.driver.servers_set_meta(args) - mock_ea.assert_called_with(self.driver.nova_client, - 'servers.set_meta', - action_args) - - def test_execute_with_non_executable_method(self): - action_args = {'positional': ['1', 'meta-key1', 'meta-value1']} - self.assertRaises(exception.CongressException, - self.driver.execute, - 'get_nova_credentials_v2', action_args) diff --git a/congress/tests/datasources/test_plexxi_driver.py b/congress/tests/datasources/test_plexxi_driver.py deleted file mode 100644 index 17fddee7..00000000 --- a/congress/tests/datasources/test_plexxi_driver.py +++ /dev/null @@ -1,169 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.datasources import plexxi_driver -from congress.tests import base -from congress.tests.datasources import plexxi_fakes -from congress.tests import helper - - -class TestPlexxiDriver(base.TestCase): - def setUp(self): - super(TestPlexxiDriver, self).setUp() - args = helper.datasource_openstack_args() - args['unique_names'] = 'False' - session = plexxi_fakes.MockCoreSession() - self.driver = plexxi_driver.PlexxiDriver(args=args, session=session) - self.driver.exchange = True - vnic1 = plexxi_fakes.MockNIC( - uuid='f318ac0a-9255-4af0-8a41-6f3fbc06c8aa', - mac='B8:ED:0A:4D:82:91') - vnic2 = plexxi_fakes.MockNIC( - uuid='f318ac0a-9255-4af0-8a41-6f3fbc06c8a2', - mac='B8:ED:0A:4D:82:99') - pnic1 = plexxi_fakes.MockNIC( - uuid='f318ac0a-9255-4af0-8a41-6f3fbc06c8ab', - mac='B8:ED:0A:4D:82:92') - pnic2 = plexxi_fakes.MockNIC( - uuid='f318ac0a-9255-4af0-8a41-6f3fbc06c8ac', - mac='B8:ED:0A:4D:82:93') - host1 = plexxi_fakes.MockHost('eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - 'mock1', - 1, - [pnic1]) - vm1 = plexxi_fakes.MockVM('2ca924f6-90aa-4ce8-a986-f62f8f64d14b', - '192.168.90.2', - 'namevm', - host1, - [vnic1]) - host1.addvm(vm1) - switch1 = plexxi_fakes.MockSwitch( - '12da13e3-ecb2-4c26-98a0-26cb07f9c33d', - '192.168.90.3', - 'switch1', - 'HEALTHY', - [pnic2]) - affinity = plexxi_fakes.MockAffinity( - 'fd487ecf-5279-4d3c-9378-7fb214f5dd5a', 'Testfinnity') - affinity2 = plexxi_fakes.MockAffinity( - 'fd487ecf-5279-4d3c-9378-7fb214f5dd5b', 'Testfinnity2') - vswitch = plexxi_fakes.MockVSwitch( - 'fd487ecf-5279-4d3c-9378-7fb214f5dd5c', - [host1], - [vnic2]) - link1 = plexxi_fakes.MockNetworkLink( - 'fd487ecf-5279-4d3c-9378-7fb214f5dd5f', - 'Link1', - host1, - switch1) - port = plexxi_fakes.MockPort('fd487ecf-5279-4d3c-9378-7fb214f5dd5d', - 'Port1', - [link1]) - port2 = plexxi_fakes.MockPort('fd487ecf-5279-4d3c-9378-7fb214f5dd5e', - 'Port2', - None) - - self.hosts = [host1] - self.pswitches = [switch1] - self.affinites = [affinity, affinity2] - self.vswitches = [vswitch] - self.vms = [vm1] - self.ports = [port, port2] - - def test_translate_hosts(self): - self.driver._translate_hosts(self.hosts) - ExpectedHosts = [('eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - 'mock1', - 1, - 1)] - self.assertEqual(ExpectedHosts, self.driver.hosts) - ExpectedHost_Macs = [('eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - 'B8:ED:0A:4D:82:92')] - self.assertEqual(ExpectedHost_Macs, self.driver.mac_list) - ExpectedGuests = [('eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - '2ca924f6-90aa-4ce8-a986-f62f8f64d14b')] - self.assertEqual(ExpectedGuests, self.driver.guest_list) - - def test_translate_pswitches(self): - self.driver._translate_pswitches(self.pswitches) - ExpectedpSwitches = [('12da13e3-ecb2-4c26-98a0-26cb07f9c33d', - '192.168.90.3', - 'HEALTHY')] - self.assertEqual(self.driver.plexxi_switches, ExpectedpSwitches) - ExpectedPSmacs = [('12da13e3-ecb2-4c26-98a0-26cb07f9c33d', - 'B8:ED:0A:4D:82:93')] - self.assertEqual(ExpectedPSmacs, self.driver.ps_macs) - - def test_translate_affinites(self): - self.driver._translate_affinites(self.affinites) - ExpectedAffinities = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5a', - 'Testfinnity'), - ('fd487ecf-5279-4d3c-9378-7fb214f5dd5b', - 'Testfinnity2')] - - self.assertEqual(ExpectedAffinities, self.driver.affinities) - - def test_translate_vswitches(self): - self.driver._translate_vswitches(self.vswitches) - ExpectedvSwitches = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5c', - 1, - 1)] - self.assertEqual(ExpectedvSwitches, self.driver.vswitches) - ExpectedvSwitch_macs = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5c', - 'B8:ED:0A:4D:82:99')] - self.assertEqual(ExpectedvSwitch_macs, self.driver.vswitch_macs) - ExpectedvSwitch_hosts = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5c', - 'eed4ebfc-25e5-4a65-9f37-b70b8e8219d3')] - - self.assertEqual(ExpectedvSwitch_hosts, self.driver.vswitch_hosts) - - def test_translate_vms(self): - self.driver._translate_vms(self.vms) - ExpectedVMs = [('2ca924f6-90aa-4ce8-a986-f62f8f64d14b', - 'namevm', - 'eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - '192.168.90.2', - 1)] - - self.assertEqual(ExpectedVMs, self.driver.vms) - Expectedvm_macs = [('2ca924f6-90aa-4ce8-a986-f62f8f64d14b', - 'B8:ED:0A:4D:82:91')] - self.assertEqual(Expectedvm_macs, self.driver.vm_macs) - - def test_translate_ports(self): - self.driver._translate_ports(self.ports) - ExpectedPorts = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5d', - 'Port1'), - ('fd487ecf-5279-4d3c-9378-7fb214f5dd5e', - 'Port2')] - self.assertEqual(ExpectedPorts, self.driver.ports) - ExpectedLinks = [('fd487ecf-5279-4d3c-9378-7fb214f5dd5f', - 'Link1', - 'fd487ecf-5279-4d3c-9378-7fb214f5dd5d', - '12da13e3-ecb2-4c26-98a0-26cb07f9c33d', - 'switch1', - 'eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - 'mock1')] - self.assertEqual(ExpectedLinks, self.driver.network_links) - - def test_execute(self): - self.driver.add_executable_method('_translate_hosts', []) - self.driver.execute('_translate_hosts', self.hosts) - ExpectedHosts = [('eed4ebfc-25e5-4a65-9f37-b70b8e8219d3', - 'mock1', - 1, - 1)] - self.assertEqual(ExpectedHosts, self.driver.hosts) diff --git a/congress/tests/datasources/test_swift_driver.py b/congress/tests/datasources/test_swift_driver.py deleted file mode 100644 index bdd8ec84..00000000 --- a/congress/tests/datasources/test_swift_driver.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2014 Montavista Software, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import swift_driver -from congress.tests import base -from congress.tests import helper - - -class TestSwiftDriver(base.TestCase): - - def setUp(self): - super(TestSwiftDriver, self).setUp() - self.swift_client = mock.MagicMock() - - args = helper.datasource_openstack_args() - self.driver = swift_driver.SwiftDriver(name='testswift', args=args) - - def test_list_containers(self): - containers_data = [{'count': '1', - 'bytes': '1048', - 'name': 'container1'}, - {'count': '2', - 'bytes': '2086', - 'name': 'container2'}] - - self.driver._translate_containers(containers_data) - container_list = list(self.driver.state[self.driver.CONTAINERS]) - self.assertIsNotNone(container_list) - self.assertEqual(2, len(container_list)) - - if container_list[0][2] == 'container1': - self.assertEqual(('1', '1048', 'container1'), container_list[0]) - self.assertEqual(('2', '2086', 'container2'), container_list[1]) - if container_list[1][2] == 'container1': - self.assertEqual(('1', '1048', 'container1'), container_list[1]) - self.assertEqual(('2', '2086', 'container2'), container_list[0]) - - def test_list_objects(self): - objects_data = [{'bytes': '2200', - 'last_modified': '2014-11-06T05:40:34.052100', - 'hash': '9204776814ca62c92c7996de725ecc6b', - 'name': 'file-1', - 'content_type': 'application/octet-stream', - 'container_name': 'container1'}, - {'bytes': '2350', - 'last_modified': '2014-11-06T05:39:57.424800', - 'hash': 'c2b86044dd50a29d60c0e92e23e3ceea', - 'name': 'file-2', - 'content_type': 'application/octet-stream', - 'container_name': 'container2'}] - - self.driver._translate_objects(objects_data) - object_list = list(self.driver.state[self.driver.OBJECTS]) - self.assertIsNotNone(object_list) - self.assertEqual(2, len(object_list)) - - if object_list[0][5] == 'container1': - self.assertEqual(('2200', '2014-11-06T05:40:34.052100', - '9204776814ca62c92c7996de725ecc6b', 'file-1', - 'application/octet-stream', - 'container1'), object_list[0]) - - self.assertEqual(('2350', '2014-11-06T05:39:57.424800', - 'c2b86044dd50a29d60c0e92e23e3ceea', 'file-2', - 'application/octet-stream', - 'container2'), object_list[1]) - - if object_list[1][5] == 'container1': - self.assertEqual(('2200', '2014-11-06T05:40:34.052100', - '9204776814ca62c92c7996de725ecc6b', 'file-1', - 'application/octet-stream', - 'container1'), object_list[1]) - - self.assertEqual(('2350', '2014-11-06T05:39:57.424800', - 'c2b86044dd50a29d60c0e92e23e3ceea', 'file-2', - 'application/octet-stream', - 'container2'), object_list[0]) - - def test_execute(self): - class SwiftClient(object): - def __init__(self): - self.testkey = None - - def updateObject(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - swift_client = SwiftClient() - self.driver.swift_service = swift_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('updateObject', api_args) - - self.assertEqual(expected_ans, swift_client.testkey) diff --git a/congress/tests/datasources/test_vCenter_driver.py b/congress/tests/datasources/test_vCenter_driver.py deleted file mode 100644 index 56782ce7..00000000 --- a/congress/tests/datasources/test_vCenter_driver.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock - -from congress.datasources import vCenter_driver -from congress.tests import base -from congress.tests.datasources import vCenter_fakes -from congress.tests import helper - - -class TestvCenterDriver(base.TestCase): - def setUp(self): - super(TestvCenterDriver, self).setUp() - args = helper.datasource_openstack_args() - args['max_hosts'] = 999 - args['max_vms'] = 999 - self.driver = vCenter_driver.VCenterDriver(args=args, - session="Testing") - self.mock_rawhosts = {} - h1_obj = {} - h1_obj['value'] = 'Host1' - mock_host1 = vCenter_fakes.MockvCenterHost(h1_obj) - h1_uuid = vCenter_fakes.MockProperty( - '9912c61d-79e0-4423-bb43-d79926e0d1f0', - 'hardware.systemInfo.uuid') - h1_name = vCenter_fakes.MockProperty('Host1', 'name') - h1_DNS_obj = vCenter_fakes.MockDNSInfo(['10.11.12.1', '10.11.12.2']) - h1_DNS = vCenter_fakes.MockProperty(h1_DNS_obj, - 'config.network.dnsConfig.address') - h1_pnic1 = {} - h1_pnic1['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f1' - h1_pnic1['mac'] = '3F-0B-DD-8A-F3-B9' - h1_pnic1['device'] = 'vmnic1' - h1_pnic1['spec'] = {} - h1_pnic1['spec']['ip'] = {} - h1_pnic1['spec']['ip']['ipAddress'] = '10.11.13.1' - h1_pnic1['spec']['ip']['subnetMask'] = '255.255.255.0' - h1_pnic2 = {} - h1_pnic2['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f2' - h1_pnic2['mac'] = '3F-0B-DD-8A-F3-BA' - h1_pnic2['device'] = 'vmnic2' - h1_pnic2['spec'] = {} - h1_pnic2['spec']['ip'] = {} - h1_pnic2['spec']['ip']['ipAddress'] = '10.11.13.2' - h1_pnic2['spec']['ip']['subnetMask'] = '255.255.255.0' - h1_pnic_list = (h1_pnic1, h1_pnic2) - h1_pnic_obj = vCenter_fakes.MockNicContainer(h1_pnic_list) - h1_pnics = vCenter_fakes.MockProperty(h1_pnic_obj, - 'config.network.pnic') - h1_vnic1 = {} - h1_vnic1['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f3' - h1_vnic1['device'] = 'vmk1' - h1_vnic1['portgroup'] = 'Management' - h1_vnic1['spec'] = {} - h1_vnic1['spec']['mac'] = '3F-0B-DD-8A-F3-BB' - h1_vnic1['spec']['ip'] = {} - h1_vnic1['spec']['ip']['ipAddress'] = '10.11.13.3' - h1_vnic1['spec']['ip']['subnetMask'] = '255.255.255.0' - h1_vnic2 = {} - h1_vnic2['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f4' - h1_vnic2['device'] = 'vmk2' - h1_vnic2['portgroup'] = 'Public' - h1_vnic2['spec'] = {} - h1_vnic2['spec']['mac'] = '3F-0B-DD-8A-F3-BC' - h1_vnic2['spec']['ip'] = {} - h1_vnic2['spec']['ip']['ipAddress'] = '10.11.13.4' - h1_vnic2['spec']['ip']['subnetMask'] = '255.255.255.0' - h1_vnic_list = [h1_vnic1, h1_vnic2] - h1_vnic_obj = vCenter_fakes.MockNicContainer(h1_vnic_list) - h1_vnics = vCenter_fakes.MockProperty(h1_vnic_obj, - 'config.network.vnic') - mock_host1['propSet'] = [h1_uuid, h1_name, h1_DNS, h1_pnics, h1_vnics] - h2_obj = {} - h2_obj['value'] = 'Host2' - mock_host2 = vCenter_fakes.MockvCenterHost(h2_obj) - h2_uuid = vCenter_fakes.MockProperty( - '9912c61d-79e0-4423-bb43-d79926e0d1f5', - 'hardware.systemInfo.uuid') - h2_name = vCenter_fakes.MockProperty('Host2', 'name') - h2_DNS_obj = vCenter_fakes.MockDNSInfo(['10.11.12.1', '10.11.12.2']) - h2_DNS = vCenter_fakes.MockProperty(h2_DNS_obj, - 'config.network.dnsConfig.address') - h2_pnic1 = {} - h2_pnic1['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f6' - h2_pnic1['mac'] = '3F-0B-DD-8A-F3-BD' - h2_pnic1['device'] = 'vmnic1' - h2_pnic1['spec'] = {} - h2_pnic1['spec']['ip'] = {} - h2_pnic1['spec']['ip']['ipAddress'] = '10.11.14.1' - h2_pnic1['spec']['ip']['subnetMask'] = '255.255.255.0' - h2_pnic2 = {} - h2_pnic2['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f7' - h2_pnic2['mac'] = '3F-0B-DD-8A-F3-BE' - h2_pnic2['device'] = 'vmnic2' - h2_pnic2['spec'] = {} - h2_pnic2['spec']['ip'] = {} - h2_pnic2['spec']['ip']['ipAddress'] = '10.11.14.2' - h2_pnic2['spec']['ip']['subnetMask'] = '255.255.255.0' - h2_pnic_list = (h2_pnic1, h2_pnic2) - h2_pnic_obj = vCenter_fakes.MockNicContainer(h2_pnic_list) - h2_pnics = vCenter_fakes.MockProperty(h2_pnic_obj, - 'config.network.pnic') - h2_vnic1 = {} - h2_vnic1['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f8' - h2_vnic1['device'] = 'vmk1' - h2_vnic1['portgroup'] = 'Management' - h2_vnic1['spec'] = {} - h2_vnic1['spec']['mac'] = '3F-0B-DD-8A-F3-BF' - h2_vnic1['spec']['ip'] = {} - h2_vnic1['spec']['ip']['ipAddress'] = '10.11.14.3' - h2_vnic1['spec']['ip']['subnetMask'] = '255.255.255.0' - h2_vnic2 = {} - h2_vnic2['uuid'] = '9912c61d-79e0-4423-bb43-d79926e0d1f9' - h2_vnic2['device'] = 'vmk2' - h2_vnic2['portgroup'] = 'Public' - h2_vnic2['spec'] = {} - h2_vnic2['spec']['mac'] = '3F-0B-DD-8A-F3-C0' - h2_vnic2['spec']['ip'] = {} - h2_vnic2['spec']['ip']['ipAddress'] = '10.11.14.4' - h2_vnic2['spec']['ip']['subnetMask'] = '255.255.255.0' - h2_vnic_list = [h2_vnic1, h2_vnic2] - h2_vnic_obj = vCenter_fakes.MockNicContainer(h2_vnic_list) - h2_vnics = vCenter_fakes.MockProperty(h2_vnic_obj, - 'config.network.vnic') - mock_host2['propSet'] = [h2_uuid, h2_name, h2_DNS, h2_pnics, h2_vnics] - mock_hostlist = [mock_host1, mock_host2] - self.mock_rawhosts['objects'] = mock_hostlist - self.mock_rawvms = {} - mock_vm1 = {} - mock_vm1['value'] = 'VM1' - vm1_name = vCenter_fakes.MockProperty('VM1', 'name') - vm1_uuid = vCenter_fakes.MockProperty( - '9912c61d-79e0-4423-bb43-d79926e0d200', - 'config.uuid') - vm1_annotation = vCenter_fakes.MockProperty('First VM', - 'config.annotation') - vm1_path = vCenter_fakes.MockProperty('[Datastore] VM1/VM1.vmtx', - 'summary.config.vmPathName') - vm1_memSize = vCenter_fakes.MockProperty(4096, - 'summary.config.memorySizeMB') - vm1_status = vCenter_fakes.MockProperty('green', - 'summary.overallStatus') - host1_referance = {} - host1_referance['value'] = 'Host1' - vm1_host = vCenter_fakes.MockProperty(host1_referance, 'runtime.host') - vm1_quickstats = {} - vm1_quickstats['guestMemoryUsage'] = 245 - vm1_quickstats['overallCpuDemand'] = 216 - vm1_quickstats['overallCpuUsage'] = 192 - vm1_quickstats_property = vCenter_fakes.MockProperty( - vm1_quickstats, 'summary.quickStats') - vm1_storage = {} - vm1_storage['committed'] = 25294964803 - vm1_storage['uncommitted'] = 32812040762 - vm1_storage_property = vCenter_fakes.MockProperty(vm1_storage, - 'summary.storage') - mock_vm1['propSet'] = [vm1_name, vm1_uuid, vm1_annotation, vm1_path, - vm1_memSize, vm1_status, vm1_host, - vm1_quickstats_property, vm1_storage_property] - mock_vm2 = {} - mock_vm2['value'] = 'VM2' - vm2_name = vCenter_fakes.MockProperty('VM2', 'name') - vm2_uuid = vCenter_fakes.MockProperty( - '9912c61d-79e0-4423-bb43-d79926e0d201', - 'config.uuid') - vm2_annotation = vCenter_fakes.MockProperty('Second VM', - 'config.annotation') - vm2_path = vCenter_fakes.MockProperty('[Datastore] VM2/VM2.vmtx', - 'summary.config.vmPathName') - vm2_memSize = vCenter_fakes.MockProperty(4096, - 'summary.config.memorySizeMB') - vm2_status = vCenter_fakes.MockProperty('green', - 'summary.overallStatus') - host2_referance = {} - host2_referance['value'] = 'Host2' - vm2_host = vCenter_fakes.MockProperty(host2_referance, 'runtime.host') - vm2_quickstats = {} - vm2_quickstats['guestMemoryUsage'] = 0 - vm2_quickstats['overallCpuDemand'] = 0 - vm2_quickstats['overallCpuUsage'] = 0 - vm2_quickstats_property = vCenter_fakes.MockProperty( - vm1_quickstats, - 'summary.quickStats') - vm2_storage = {} - vm2_storage['committed'] = 6271694636 - vm2_storage['uncommitted'] = 34110177822 - vm2_storage_property = vCenter_fakes.MockProperty(vm2_storage, - 'summary.storage') - mock_vm2['propSet'] = [vm2_name, vm2_uuid, vm2_annotation, vm2_path, - vm2_memSize, vm2_status, vm2_host, - vm2_quickstats_property, vm2_storage_property] - mock_vmlist = [mock_vm1, mock_vm2] - self.mock_rawvms['objects'] = mock_vmlist - - def test_translators(self): - with mock.patch.object(self.driver, '_get_hosts_from_vcenter', - return_value=self.mock_rawhosts): - hosts, pnics, vnics = self.driver._get_hosts_and_nics() - self.driver._translate_hosts(hosts) - self.driver._translate_pnics(pnics) - self.driver._translate_vnics(vnics) - expected_hosts = set([('Host1', - '9912c61d-79e0-4423-bb43-d79926e0d1f0', - '895f69d340dac8cd4c9550e745703c77'), - ('Host2', - '9912c61d-79e0-4423-bb43-d79926e0d1f5', - '895f69d340dac8cd4c9550e745703c77')]) - self.assertEqual(expected_hosts, self.driver.state['hosts']) - expected_DNS = set([('895f69d340dac8cd4c9550e745703c77', - '10.11.12.1'), - ('895f69d340dac8cd4c9550e745703c77', - '10.11.12.2')]) - self.assertEqual(expected_DNS, self.driver.state['host.DNS_IPs']) - expected_pnics = set([('9912c61d-79e0-4423-bb43-d79926e0d1f0', - 'vmnic1', - '3F-0B-DD-8A-F3-B9', - '10.11.13.1', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f0', - 'vmnic2', - '3F-0B-DD-8A-F3-BA', - '10.11.13.2', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f5', - 'vmnic1', - '3F-0B-DD-8A-F3-BD', - '10.11.14.1', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f5', - 'vmnic2', - '3F-0B-DD-8A-F3-BE', - '10.11.14.2', - '255.255.255.0')]) - self.assertEqual(expected_pnics, self.driver.state['host.PNICs']) - expected_vnics = set([('9912c61d-79e0-4423-bb43-d79926e0d1f0', - 'vmk1', - '3F-0B-DD-8A-F3-BB', - 'Management', - '10.11.13.3', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f0', - 'vmk2', - '3F-0B-DD-8A-F3-BC', - 'Public', - '10.11.13.4', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f5', - 'vmk1', - '3F-0B-DD-8A-F3-BF', - 'Management', - '10.11.14.3', - '255.255.255.0'), - ('9912c61d-79e0-4423-bb43-d79926e0d1f5', - 'vmk2', - '3F-0B-DD-8A-F3-C0', - 'Public', - '10.11.14.4', - '255.255.255.0')]) - self.assertEqual(expected_vnics, self.driver.state['host.VNICs']) - with mock.patch.object(self.driver, '_get_vms_from_vcenter', - return_value=self.mock_rawvms): - vms = self.driver._get_vms() - self.driver._translate_vms(vms) - expected_vms = set([('VM1', - '9912c61d-79e0-4423-bb43-d79926e0d200', - '9912c61d-79e0-4423-bb43-d79926e0d1f0', - '[Datastore] VM1/VM1.vmtx', - 'green', - 216, - 192, - 4096, - 245, - 25294964803, - 32812040762, - 'First VM'), - ('VM2', - '9912c61d-79e0-4423-bb43-d79926e0d201', - '9912c61d-79e0-4423-bb43-d79926e0d1f5', - '[Datastore] VM2/VM2.vmtx', - 'green', - 216, - 192, - 4096, - 245, - 6271694636, - 34110177822, - 'Second VM')]) - self.assertEqual(expected_vms, self.driver.state['vms']) - - def test_execute(self): - class vCenterClient(object): - def __init__(self): - self.testkey = None - - def connectNetwork(self, arg1): - self.testkey = 'arg1=%s' % arg1 - - vcenter_client = vCenterClient() - self.driver.session = vcenter_client - api_args = { - 'positional': ['1'] - } - expected_ans = 'arg1=1' - - self.driver.execute('connectNetwork', api_args) - - self.assertEqual(expected_ans, vcenter_client.testkey) diff --git a/congress/tests/datasources/util.py b/congress/tests/datasources/util.py deleted file mode 100644 index dda7e404..00000000 --- a/congress/tests/datasources/util.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -class ResponseObj(object): - """Allows callers to use dot notation to access a dictionary.""" - def __init__(self, values): - self.values = values - - def __getattr__(self, name): - return self.values.get(name) - - def to_dict(self): - return self.values diff --git a/congress/tests/datasources/vCenter_fakes.py b/congress/tests/datasources/vCenter_fakes.py deleted file mode 100644 index 6e813d35..00000000 --- a/congress/tests/datasources/vCenter_fakes.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -class MockProperty(object): - def __init__(self, val, name): - self.val = val - self.name = name - - -class MockDNSInfo(object): - def __init__(self, string): - self.string = string - - -class MockvCenterHost(dict): - def __init__(self, obj): - self.obj = obj - - -class MockNicContainer(object): - def __init__(self, nicList): - self.PhysicalNic = nicList - self.HostVirtualNic = nicList diff --git a/congress/tests/db/__init__.py b/congress/tests/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/db/test_datasources.py b/congress/tests/db/test_datasources.py deleted file mode 100644 index 32d02418..00000000 --- a/congress/tests/db/test_datasources.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_utils import uuidutils - -from congress.db import datasources -from congress.db import db_ds_table_data -from congress.tests import base - - -class TestDbDatasource(base.SqlTestCase): - - def test_add_datasource(self): - id_ = uuidutils.generate_uuid() - source = datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - self.assertEqual(id_, source.id) - self.assertEqual("hiya", source.name) - self.assertEqual("foo", source.driver) - self.assertEqual("hello", source.description) - self.assertEqual('"{user: foo}"', source.config) - self.assertTrue(source.enabled) - - def test_delete_datasource(self): - id_ = uuidutils.generate_uuid() - datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - self.assertTrue(datasources.delete_datasource(id_)) - self.assertIsNone(datasources.get_datasource(id_)) - - def test_delete_non_existing_datasource(self): - self.assertFalse(datasources.delete_datasource('no_id')) - - def test_delete_datasource_with_data(self): - id_ = uuidutils.generate_uuid() - datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - db_ds_table_data.store_ds_table_data( - ds_id=id_, - tablename='bar', - tabledata=set([('a1', 'b1'), ('a2', 'b2'), ('a3', 'a4')]) - ) - self.assertTrue(datasources.delete_datasource_with_data(id_)) - self.assertEqual(db_ds_table_data.get_ds_table_data(id_), []) - - def test_get_datasource_by_name(self): - id_ = uuidutils.generate_uuid() - datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - source = datasources.get_datasource_by_name('hiya') - self.assertEqual(id_, source.id) - self.assertEqual("hiya", source.name) - self.assertEqual("foo", source.driver) - self.assertEqual("hello", source.description) - self.assertEqual('"{user: foo}"', source.config) - self.assertTrue(source.enabled) - - def test_get_datasource_by_id(self): - id_ = uuidutils.generate_uuid() - datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - source = datasources.get_datasource(id_) - self.assertEqual(id_, source.id) - self.assertEqual("hiya", source.name) - self.assertEqual("foo", source.driver) - self.assertEqual("hello", source.description) - self.assertEqual('"{user: foo}"', source.config) - self.assertTrue(source.enabled) - - def test_get_datasource(self): - id_ = uuidutils.generate_uuid() - datasources.add_datasource( - id_=id_, - name="hiya", - driver="foo", - config='{user: foo}', - description="hello", - enabled=True) - sources = datasources.get_datasources() - self.assertEqual(id_, sources[0].id) - self.assertEqual("hiya", sources[0].name) - self.assertEqual("foo", sources[0].driver) - self.assertEqual("hello", sources[0].description) - self.assertEqual('"{user: foo}"', sources[0].config) - self.assertTrue(sources[0].enabled) diff --git a/congress/tests/db/test_db_ds_table_data.py b/congress/tests/db/test_db_ds_table_data.py deleted file mode 100644 index f836c88a..00000000 --- a/congress/tests/db/test_db_ds_table_data.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_utils import uuidutils - -from congress.db import db_ds_table_data -from congress.tests import base - - -class TestDbDsTableData(base.SqlTestCase): - - def test_store_ds_table_data(self): - ds_id = uuidutils.generate_uuid() - - # store new data - source = db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table1', - tabledata=set([('a', 0)])) - self.assertEqual(ds_id, source.ds_id) - self.assertEqual('table1', source.tablename) - self.assertEqual('[["a", 0]]', source.tabledata) - - # update exsting data - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table1', - tabledata=set([('a', 0), ('new', 1)])) - data = db_ds_table_data.get_ds_table_data(ds_id, 'table1') - self.assertEqual(set([('a', 0), ('new', 1)]), data) - - def test_delete_ds_table_data(self): - ds_id = uuidutils.generate_uuid() - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table1', - tabledata=set([('a', 0), ('b', 1)])) - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table2', - tabledata=set([('a', 0), ('b', 2)])) - self.assertTrue(db_ds_table_data.delete_ds_table_data(ds_id, 'table1')) - self.assertIsNone(db_ds_table_data.get_ds_table_data(ds_id, 'table1')) - self.assertEqual(set([('a', 0), ('b', 2)]), - db_ds_table_data.get_ds_table_data(ds_id, 'table2')) - - def test_delete_ds_table_data_by_ds(self): - ds_id = uuidutils.generate_uuid() - ds2_id = uuidutils.generate_uuid() - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table1', - tabledata=set([('a', 0), ('b', 1)])) - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table2', - tabledata=set([('a', 0), ('b', 2)])) - db_ds_table_data.store_ds_table_data( - ds_id=ds2_id, - tablename='table3', - tabledata=set([('a', 0), ('b', 3)])) - self.assertTrue(db_ds_table_data.delete_ds_table_data(ds_id)) - self.assertIsNone(db_ds_table_data.get_ds_table_data(ds_id, 'table1')) - self.assertIsNone(db_ds_table_data.get_ds_table_data(ds_id, 'table2')) - self.assertEqual(set([('a', 0), ('b', 3)]), - db_ds_table_data.get_ds_table_data(ds2_id, 'table3')) - - def test_delete_non_existing_ds_table_data(self): - self.assertFalse(db_ds_table_data.delete_ds_table_data('none', 'none')) - - def test_get_ds_table_data(self): - ds_id = uuidutils.generate_uuid() - ds2_id = uuidutils.generate_uuid() - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table1', - tabledata=set([('a', 0), ('b', 1)])) - db_ds_table_data.store_ds_table_data( - ds_id=ds_id, - tablename='table2', - tabledata=set([('a', 0), ('b', 2)])) - db_ds_table_data.store_ds_table_data( - ds_id=ds2_id, - tablename='table3', - tabledata=set([('a', 0), ('b', 3)])) - data = db_ds_table_data.get_ds_table_data(ds_id, 'table1') - self.assertEqual(set([('a', 0), ('b', 1)]), data) - - data = db_ds_table_data.get_ds_table_data(ds_id) - self.assertEqual(2, len(data)) - self.assertIn( - {'tablename': 'table1', - 'tabledata': set([('a', 0), ('b', 1)])}, - data) - self.assertIn( - {'tablename': 'table2', - 'tabledata': set([('a', 0), ('b', 2)])}, - data) diff --git a/congress/tests/db/test_db_library_policies.py b/congress/tests/db/test_db_library_policies.py deleted file mode 100644 index 571d90da..00000000 --- a/congress/tests/db/test_db_library_policies.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2017 VMware Inc. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from congress.db import db_library_policies -from congress.tests import base - - -class TestDbLibraryPolicies(base.SqlTestCase): - - def setUp(self): - super(TestDbLibraryPolicies, self).setUp() - db_library_policies.delete_policies() # delete preloaded policies - - def test_add_policy_no_name(self): - self.assertRaises( - KeyError, db_library_policies.add_policy, {'rules': []}) - - def test_add_policy_no_rules(self): - self.assertRaises(KeyError, db_library_policies.add_policy, - {'name': 'policy1'}) - - def test_add_policy(self): - res = db_library_policies.add_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - self.assertEqual(res.to_dict(include_rules=True), - {'id': res['id'], - 'abbreviation': 'abbr', - 'kind': 'database', - 'name': 'policy1', - 'description': 'descrip', - 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - def test_add_policy_duplicate(self): - db_library_policies.add_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': []}) - self.assertRaises( - KeyError, db_library_policies.add_policy, - {'name': 'policy1', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - def test_get_policy_empty(self): - res = db_library_policies.get_policies() - self.assertEqual(res, []) - - self.assertRaises(KeyError, db_library_policies.get_policy, - 'nosuchpolicy') - - def test_create_get_policy(self): - policy1 = db_library_policies.add_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - res = db_library_policies.get_policies() - - res = db_library_policies.get_policies() - self.assertEqual([p.to_dict(include_rules=True) for p in res], - [{'id': policy1['id'], - 'abbreviation': 'abbr', - 'kind': 'database', - 'name': 'policy1', - 'description': 'descrip', - 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}]) - - res = db_library_policies.get_policy(policy1['id']) - self.assertEqual(res.to_dict(include_rules=True), - {'id': policy1['id'], - 'abbreviation': 'abbr', - 'kind': 'database', - 'name': 'policy1', - 'description': 'descrip', - 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - self.assertRaises(KeyError, db_library_policies.get_policy, - 'no_such_policy') - - def test_delete_policy(self): - db_library_policies.delete_policy('policy1') - - policy1 = db_library_policies.add_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - policy2 = db_library_policies.add_policy( - {'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - res = db_library_policies.get_policies() - self.assertEqual(len(res), 2) - - db_library_policies.delete_policy('no_such_policy') - - res = db_library_policies.get_policies() - self.assertEqual(len(res), 2) - - db_library_policies.delete_policy(policy1['id']) - - res = db_library_policies.get_policies() - self.assertEqual(len(res), 1) - - db_library_policies.delete_policy(policy2['id']) - - res = db_library_policies.get_policies() - self.assertEqual(len(res), 0) - - def test_delete_policies(self): - db_library_policies.delete_policies() - res = db_library_policies.get_policies() - self.assertEqual(len(res), 0) - - db_library_policies.add_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - db_library_policies.add_policy( - {'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - db_library_policies.delete_policies() - res = db_library_policies.get_policies() - self.assertEqual(len(res), 0) diff --git a/congress/tests/db/test_db_policy_rules.py b/congress/tests/db/test_db_policy_rules.py deleted file mode 100644 index 0e8adc34..00000000 --- a/congress/tests/db/test_db_policy_rules.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_utils import uuidutils - -from congress.db import db_policy_rules -from congress.tests import base - - -class TestPolicyRulesDb(base.SqlTestCase): - - def test_add_policy_rule(self): - id = uuidutils.generate_uuid() - rule_str = "p(x) :- q(x)" - policy_name = "classification" - comment = "None" - rule = db_policy_rules.add_policy_rule(id=id, - policy_name=policy_name, - rule=rule_str, - comment=comment) - self.assertEqual(id, rule.id) - self.assertEqual(policy_name, rule.policy_name) - self.assertEqual(rule_str, rule.rule) - self.assertEqual(comment, rule.comment) - - def test_add_policy_rule_with_name(self): - id = uuidutils.generate_uuid() - rule_str = "p(x) :- q(x)" - policy_name = "classification" - comment = "None" - rule_name = "classification_rule" - rule = db_policy_rules.add_policy_rule(id=id, - policy_name=policy_name, - rule=rule_str, - comment=comment, - rule_name=rule_name) - self.assertEqual(id, rule.id) - self.assertEqual(policy_name, rule.policy_name) - self.assertEqual(rule_str, rule.rule) - self.assertEqual(comment, rule.comment) - self.assertEqual(rule_name, rule.name) - - def test_add_get_policy_rule(self): - id = uuidutils.generate_uuid() - rule_str = "p(x) :- q(x)" - policy_name = "classification" - comment = "None" - db_policy_rules.add_policy_rule(id=id, - policy_name=policy_name, - rule=rule_str, - comment=comment) - rule = db_policy_rules.get_policy_rule(id, policy_name) - self.assertEqual(id, rule.id) - self.assertEqual(policy_name, rule.policy_name) - self.assertEqual(rule_str, rule.rule) - self.assertEqual(comment, rule.comment) - - def test_add_delete_get_policy_rule(self): - id = uuidutils.generate_uuid() - rule_str = "p(x) :- q(x)" - policy_name = "classification" - comment = "None" - db_policy_rules.add_policy_rule(id=id, - policy_name=policy_name, - rule=rule_str, - comment=comment) - db_policy_rules.delete_policy_rule(id) - rule = db_policy_rules.get_policy_rule(id, policy_name) - self.assertIsNone(rule) - - def test_add_delete_get_deleted_policy_rule(self): - id = uuidutils.generate_uuid() - rule_str = "p(x) :- q(x)" - policy_name = "classification" - comment = "None" - rule1 = db_policy_rules.add_policy_rule(id=id, - policy_name=policy_name, - rule=rule_str, - comment=comment) - db_policy_rules.delete_policy_rule(id) - rule2 = db_policy_rules.get_policy_rule(id, policy_name, deleted=True) - self.assertEqual(rule1.id, rule2.id) - self.assertNotEqual(rule1.deleted, rule2.deleted) - - def test_add_two_rules_and_get(self): - id1 = uuidutils.generate_uuid() - rule1_str = "p(x) :- q(x)" - id2 = uuidutils.generate_uuid() - rule2_str = "z(x) :- q(x)" - policy_name = "classification" - comment = "None" - db_policy_rules.add_policy_rule(id=id1, - policy_name=policy_name, - rule=rule1_str, - comment=comment) - - db_policy_rules.add_policy_rule(id=id2, - policy_name=policy_name, - rule=rule2_str, - comment=comment) - - rules = db_policy_rules.get_policy_rules(policy_name) - self.assertEqual(len(rules), 2) - self.assertEqual(id1, rules[0].id) - self.assertEqual(policy_name, rules[0].policy_name) - self.assertEqual(rule1_str, rules[0].rule) - self.assertEqual(comment, rules[0].comment) - self.assertEqual(id2, rules[1].id) - self.assertEqual(policy_name, rules[1].policy_name) - self.assertEqual(rule2_str, rules[1].rule) - self.assertEqual(comment, rules[1].comment) - self.assertEqual(len(db_policy_rules.get_policy_rules()), 2) - - def test_is_soft_deleted_not_deleted(self): - uuid = uuidutils.generate_uuid() - self.assertEqual('', db_policy_rules.is_soft_deleted(uuid, False)) - - def test_is_soft_deleted_is_deleted(self): - uuid = uuidutils.generate_uuid() - self.assertEqual(uuid, db_policy_rules.is_soft_deleted(uuid, True)) diff --git a/congress/tests/dse2/__init__.py b/congress/tests/dse2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/dse2/test_data_service.py b/congress/tests/dse2/test_data_service.py deleted file mode 100644 index 12a1ce4c..00000000 --- a/congress/tests/dse2/test_data_service.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import json -import mock -import time - -from congress.dse2 import data_service -from congress.tests import base - - -class TestDataServiceInfo(base.TestCase): - TESTDATA = {'service_id': 'test-service-id', 'node_id': 'test-node-id', - 'published_tables': ['t1', 't2'], - 'subscribed_tables': ['s1', 's2', 's3'], - 'rpc_endpoints_info': ['call1', 'call2']} - - def test_from_json(self): - s = data_service.DataServiceInfo.from_json(json.dumps(self.TESTDATA)) - for a in data_service.DataServiceInfo.MARSHALL_ATTRS: - self.assertEqual(getattr(s, a), self.TESTDATA[a], - "Attr '%s' set properly in from_dict" % a) - self.assertRaises(KeyError, data_service.DataServiceInfo.from_json, - '{"bad_attr": 123}') - - def test_to_json(self): - s = data_service.DataServiceInfo(**self.TESTDATA) - self.assertEqual(json.loads(s.to_json()), self.TESTDATA, - 'JSON representation matches constructed data') - s.last_hb_time = time.time() - self.assertEqual(json.loads(s.to_json()), self.TESTDATA, - 'JSON representation ignores last_hb_time') - - def test_from_dict(self): - s = data_service.DataServiceInfo.from_dict(self.TESTDATA) - for a in data_service.DataServiceInfo.MARSHALL_ATTRS: - self.assertEqual(getattr(s, a), self.TESTDATA[a], - "Attr '%s' set properly in from_dict" % a) - self.assertRaises(KeyError, data_service.DataServiceInfo.from_dict, - {'bad_attr': 123}) - - def test_to_dict(self): - s = data_service.DataServiceInfo(**self.TESTDATA) - self.assertEqual(s.to_dict(), self.TESTDATA, - 'dict representation matches constructed data') - s.last_hb_time = time.time() - self.assertEqual(s.to_dict(), self.TESTDATA, - 'dict representation ignores last_hb_time') - - -class TestDataService(base.TestCase): - - def test_info(self): - ds = data_service.DataService("svc1") - node = mock.MagicMock() - node.node_id = 'testnode' - ds.node = node - info = ds.info - self.assertEqual(info.service_id, 'svc1') - self.assertEqual(info.node_id, 'testnode') - self.assertEqual(info.published_tables, []) - self.assertEqual(info.subscribed_tables, []) - self.assertEqual(info.rpc_endpoints_info, []) - - def test_start_stop(self): - ds = data_service.DataService("svc1") - ds.node = mock.MagicMock() - ds._rpc_server = mock.MagicMock() - self.assertFalse(ds._running, - "Newly created service is marked as not running") - ds.start() - self.assertTrue(ds._running, "Started service is marked as running") - ds.stop() - self.assertFalse(ds._running, - "Stopped service is marked as not running") - - def test_service_info(self): - ds = data_service.DataService("svc1") - ds.node = mock.MagicMock() - ds.node.node_id = 'node-id' - ds._published_tables_with_subscriber = set(['table1']) - - expected_result = { - 'service_id': 'svc1', - 'node_id': 'node-id', - 'published_tables': [], - 'subscribed_tables': set(['table1']), - 'rpc_endpoints_info': [] - } - - self.assertEqual(expected_result, ds.info.to_dict()) - - -# TODO(pballand): replace with congress unit test framework when convenient -if __name__ == '__main__': - import unittest - unittest.main(verbosity=2) diff --git a/congress/tests/dse2/test_datasource.py b/congress/tests/dse2/test_datasource.py deleted file mode 100644 index 6da7d55a..00000000 --- a/congress/tests/dse2/test_datasource.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -from oslo_config import cfg -from oslo_db import exception as db_exc - -from congress.db import datasources as datasource_db -from congress.dse2 import dse_node -from congress import exception as congressException -from congress.tests.api import base as api_base -from congress.tests import base -from congress.tests import fake_datasource - - -class TestDataSource(base.SqlTestCase): - - def setUp(self): - super(TestDataSource, self).setUp() - config = api_base.setup_config(with_fake_datasource=False, api=False, - policy=False) - self.dseNode = config['node'] - self.ds_manager = config['ds_manager'] - - def _get_datasource_request(self): - # leave ID out--generated during creation - return {'name': 'aaron', - 'driver': 'fake_datasource', - 'description': 'hello world!', - 'enabled': True, - 'type': None, - 'config': {'auth_url': 'foo', - 'username': 'armax', - 'password': 'password', - 'tenant_name': 'armax'}} - - def test_add_datasource(self): - req = self._get_datasource_request() - result = self.ds_manager.add_datasource(req) - # test equality of return value except for 'id' field - del(result['id']) - self.assertEqual(req, result) - # check that service actually on dseNode - services = self.dseNode.get_services() - self.assertEqual(len(services), 1) - self.assertEqual(services[0].service_id, req['name']) - self.assertIsInstance(services[0], - fake_datasource.FakeDataSource) - obj = self.dseNode.invoke_service_rpc( - req['name'], 'get_status', {'source_id': None, 'params': None}) - self.assertIsNotNone(obj) - - @mock.patch.object(datasource_db, 'add_datasource') - def test_add_datasource_db_error(self, add_ds): - add_ds.side_effect = db_exc.DBError('Error in db.') - - req = self._get_datasource_request() - self.assertRaises(congressException.DatasourceCreationError, - self.ds_manager.add_datasource, req) - - @mock.patch.object(dse_node.DseNode, 'register_service') - def test_add_datasource_synchronizer_error(self, register_ds): - register_ds.side_effect = Exception('Error in registering service') - - req = self._get_datasource_request() - self.assertRaises(congressException.DatasourceCreationError, - self.ds_manager.add_datasource, req) - ds = datasource_db.get_datasource_by_name(req['name']) - self.assertIsNone(ds) - - def test_get_datasource(self): - req = self._get_datasource_request() - ds = self.ds_manager.add_datasource(req) - result = self.dseNode.get_datasource(ds['id']) - # test equality except for 'id' field - del(result['id']) - self.assertEqual(req, result) - - def test_get_datasources(self): - req = self._get_datasource_request() - self.ds_manager.add_datasource(req) - result = self.dseNode.get_datasources() - self.assertEqual(len(result), 1) - result = result[0] - # test equality except for 'id' field - del(result['id']) - self.assertEqual(req, result) - - def test_get_datasources2(self): - req1 = self._get_datasource_request() - req1['name'] = 'datasource1' - result1 = self.ds_manager.add_datasource(req1) - req2 = self._get_datasource_request() - req2['name'] = 'datasource2' - result2 = self.ds_manager.add_datasource(req2) - # check results of add_datasource - for key, value in req1.items(): - self.assertEqual(value, result1[key]) - for key, value in req2.items(): - self.assertEqual(value, result2[key]) - # check services actually on dseNode - services = self.dseNode.get_services() - self.assertEqual(len(services), 2) - self.assertEqual(set([s.service_id for s in services]), - set(['datasource1', 'datasource2'])) - self.assertIsInstance(services[0], - fake_datasource.FakeDataSource) - self.assertIsInstance(services[1], - fake_datasource.FakeDataSource) - # check results of get_datasources - resultall = self.dseNode.get_datasources() - self.assertEqual(len(resultall), 2) - # check equality except for 'id' field - byname = {x['name']: x for x in resultall} - for x in byname.values(): - del(x['id']) - self.assertEqual(byname, {'datasource1': req1, 'datasource2': req2}) - - def test_get_datasources_hide_secret(self): - req = self._get_datasource_request() - self.ds_manager.add_datasource(req) - result = self.dseNode.get_datasources(filter_secret=True) - result = result[0] - # check equality except that 'config'/'password' is hidden - req['config']['password'] = "" - del(result['id']) - self.assertEqual(result, req) - - def test_create_datasource_duplicate_name(self): - req = self._get_datasource_request() - self.ds_manager.add_datasource(req) - self.assertRaises(congressException.DatasourceNameInUse, - self.ds_manager.add_datasource, req) - - def test_delete_datasource(self): - req = self._get_datasource_request() - result = self.ds_manager.add_datasource(req) - self.ds_manager.delete_datasource(result) - # check that service is actually deleted - services = self.dseNode.get_services() - self.assertEqual(len(services), 0) - self.assertRaises( - congressException.NotFound, self.dseNode.invoke_service_rpc, - req['name'], 'get_status', {'source_id': None, 'params': None}) - # TODO(thinrichs): test that we've actually removed - # the row from the DB - - # TODO(dse2): this test relies on coordination between dseNode and - # policy engine. Much harder in distributed system. Need to decide - # if we want that kind of invariant and if so implement it. - # def test_delete_datasource_error(self): - # req = self._get_datasource_request() - # req['driver'] = 'fake_datasource' - # req['config'] = {'auth_url': 'foo', - # 'username': 'armax', - # 'password': 'password', - # 'tenant_name': 'armax'} - # # let driver generate this for us. - # del req['id'] - # result = self.datasource_mgr.add_datasource(req) - # engine = self.dseNode.service_object('engine') - # engine.create_policy('alice') - # engine.insert('p(x) :- %s:q(x)' % req['name'], 'alice') - # self.assertRaises(exception.DanglingReference, - # self.datasource_mgr.delete_datasource, - # result['id']) - - def test_delete_invalid_datasource(self): - req = self._get_datasource_request() - req['id'] = 'fake-id' - self.assertRaises(congressException.DatasourceNotFound, - self.ds_manager.delete_datasource, req) - - # TODO(dse2): Doesn't seem like we need this (or it will be moved to API). - # def test_get_driver_schema(self): - # schema = self.datasource_mgr.get_driver_schema( - # 'fake_datasource') - # self.assertEqual( - # schema, - # fake_datasource.FakeDataSource.get_schema()) - - def test_duplicate_driver_name_raises(self): - # Load the driver twice - cfg.CONF.set_override( - 'drivers', - ['congress.tests.fake_datasource.FakeDataSource', - 'congress.tests.fake_datasource.FakeDataSource']) - self.assertRaises(congressException.BadConfig, - self.dseNode.load_drivers) diff --git a/congress/tests/dse2/test_dse2.py b/congress/tests/dse2/test_dse2.py deleted file mode 100644 index 60d2e9d6..00000000 --- a/congress/tests/dse2/test_dse2.py +++ /dev/null @@ -1,584 +0,0 @@ -# Copyright (c) 2013 VMware, Styra. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import tenacity -import time - -# Note(ekcs): this is needed for direct unit test because Dse2Runtime import, -# which takes place before the confFixture is setup, fails w/o it -from novaclient import client as nova_client -from oslo_config import cfg -cfg.CONF.datasource_sync_period = 0 -from oslo_messaging import conffixture - -from congress.api import base as api_base -from congress.datalog import base as datalog_base -from congress.datalog import compile -from congress.datasources import nova_driver -from congress import exception as congressException -from congress.policy_engines import agnostic -from congress.tests.api import base as test_api_base -from congress.tests import base -from congress.tests import fake_datasource -from congress.tests import helper - - -class TestDSE(base.TestCase): - - def setUp(self): - super(TestDSE, self).setUp() - mc_fixture = conffixture.ConfFixture(cfg.CONF) - mc_fixture.conf.transport_url = 'kombu+memory://' - self.messaging_config = mc_fixture.conf - self.messaging_config.rpc_response_timeout = 1 - - def test_intranode_pubsub(self): - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node.register_service(test1) - node.register_service(test2) - - test1.subscribe('test2', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - test2.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - node.stop() - - def test_intranode_pubsub2(self): - # same as test_intranode_pubsub but with opposite ordering. - # (Ordering does matter with internode_pubsub). - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node.register_service(test1) - node.register_service(test2) - - test2.subscribe('test1', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(test2, 'last_msg'), True) - test1.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test2.last_msg['data'], 42) - self.assertFalse(hasattr(test1, "last_msg")) - node.stop() - - def test_intranode_partial_unsub(self): - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node.register_service(test1) - node.register_service(test2) - - test1.subscribe('test2', 'p') - test1.subscribe('test2', 'q') - test1.unsubscribe('test2', 'q') # unsub from q should not affect p - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - test2.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - node.stop() - - def test_sub_before_service_exists(self): - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - node.register_service(test1) - - test1.subscribe('test2', 'p') - self.assertFalse(hasattr(test1, "last_msg")) - test2 = fake_datasource.FakeDataSource('test2') - node.register_service(test2) - test2.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - node.stop() - node.wait() - - def test_internode_pubsub(self): - node1 = helper.make_dsenode_new_partition('testnode1') - test1 = fake_datasource.FakeDataSource('test1') - node1.register_service(test1) - node2 = helper.make_dsenode_same_partition(node1, 'testnode2') - test2 = fake_datasource.FakeDataSource('test2') - node2.register_service(test2) - - test1.subscribe('test2', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - test2.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - node1.stop() - node2.stop() - - def test_internode_partial_unsub(self): - node1 = helper.make_dsenode_new_partition('testnode1') - node2 = helper.make_dsenode_same_partition(node1, 'testnode2') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node1.register_service(test1) - node2.register_service(test2) - - test1.subscribe('test2', 'p') - test1.subscribe('test2', 'q') - test1.unsubscribe('test2', 'q') # unsub from q should not affect p - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - test2.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - node1.stop() - node2.stop() - - def test_multiservice_pubsub(self): - node1 = helper.make_dsenode_new_partition('testnode1') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node1.register_service(test1) - node1.register_service(test2) - node2 = helper.make_dsenode_same_partition(node1, 'testnode2') - test3 = fake_datasource.FakeDataSource('test3') - node2.register_service(test3) - - test1.subscribe('test3', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - test3.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test1.last_msg['data'], 42) - self.assertFalse(hasattr(test2, "last_msg")) - self.assertFalse(hasattr(test3, "last_msg")) - node1.stop() - node2.stop() - - def test_subscribe_snapshot(self): - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - test2 = fake_datasource.FakeDataSource('test2') - node.register_service(test1) - node.register_service(test2) - - test1.subscribe('test2', 'fake_table') - helper.retry_check_function_return_value( - lambda: hasattr(test1, 'last_msg'), True) - self.assertEqual(test1.last_msg['data'], test2.state['fake_table']) - node.stop() - - @mock.patch.object(nova_client, 'Client', spec_set=True, autospec=True) - def test_datasource_sub(self, nova_mock): - node = helper.make_dsenode_new_partition('testnode') - nova = nova_driver.NovaDriver( - name='nova', args=helper.datasource_openstack_args()) - test = fake_datasource.FakeDataSource('test') - node.register_service(nova) - node.register_service(test) - - nova.subscribe('test', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(nova, 'last_msg'), True) - test.publish('p', 42) - helper.retry_check_function_return_value( - lambda: nova.last_msg['data'], 42) - self.assertFalse(hasattr(test, "last_msg")) - node.stop() - - @mock.patch.object(nova_client, 'Client', spec_set=True, autospec=True) - def test_datasource_unsub(self, nova_mock): - node = helper.make_dsenode_new_partition('testnode') - nova = nova_driver.NovaDriver( - name='nova', args=helper.datasource_openstack_args()) - test = fake_datasource.FakeDataSource('test') - node.register_service(nova) - node.register_service(test) - - nova.subscribe('test', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(nova, 'last_msg'), True) - test.publish('p', 42) - helper.retry_check_function_return_value( - lambda: nova.last_msg['data'], 42) - self.assertFalse(hasattr(test, "last_msg")) - nova.unsubscribe('test', 'p') - test.publish('p', 43) - # hard to test that the message is never delivered - time.sleep(0.2) - self.assertEqual(nova.last_msg['data'], 42) - node.stop() - - @mock.patch.object(nova_client, 'Client', spec_set=True, autospec=True) - def test_datasource_pub(self, nova_mock): - node = helper.make_dsenode_new_partition('testnode') - nova = nova_driver.NovaDriver( - name='nova', args=helper.datasource_openstack_args()) - test = fake_datasource.FakeDataSource('test') - node.register_service(nova) - node.register_service(test) - - test.subscribe('nova', 'p') - helper.retry_check_function_return_value( - lambda: hasattr(test, 'last_msg'), True) - nova.publish('p', 42) - helper.retry_check_function_return_value( - lambda: test.last_msg['data'], 42) - self.assertFalse(hasattr(nova, "last_msg")) - node.stop() - - def test_auto_resub(self): - config = test_api_base.setup_config(with_fake_datasource=False, - api=False, policy=False) - node = config['node'] - config['ds_manager'].synchronizer.start() - sub = fake_datasource.FakeDataSource('sub') - pub = fake_datasource.FakeDataSource('pub') - node.register_service(sub) - node.register_service(pub) - sub.subscribe('pub', 'p') - - helper.retry_check_function_return_value( - lambda: hasattr(sub, 'last_msg'), True) - helper.retry_check_function_return_value( - lambda: sub.last_msg['data'], set([])) - - sub.receive_data_sequenced( - 'pub', 'p', [[1, 1]], 1, is_snapshot=True) - helper.retry_check_function_return_value( - lambda: sub.last_msg['data'], set([(1, 1)])) - # skipping seqnum 2 - sub.receive_data_sequenced( - 'pub', 'p', [[3, 3]], 3, is_snapshot=True) - # check that out-of-sequence update not applied - self.assertRaises( - tenacity.RetryError, - helper.retry_check_function_return_value, - lambda: sub.last_msg['data'], set([(3, 3)])) - # check that resub takes place, setting data to initial state - helper.retry_check_function_return_value( - lambda: sub.last_msg['data'], set([])) - node.stop() - - def test_datasource_poll(self): - node = helper.make_dsenode_new_partition('testnode') - node.always_snapshot = True # Note(ekcs): this test expects snapshot - pub = fake_datasource.FakeDataSource('pub') - sub = fake_datasource.FakeDataSource('sub') - node.register_service(pub) - node.register_service(sub) - - sub.subscribe('pub', 'fake_table') - pub.state = {'fake_table': set([(1, 2)])} - pub.poll() - helper.retry_check_function_return_value( - lambda: sub.last_msg['data'], set(pub.state['fake_table'])) - self.assertFalse(hasattr(pub, "last_msg")) - node.stop() - - def test_policy_data(self): - """Test policy correctly processes initial data snapshot.""" - node = helper.make_dsenode_new_partition('testnode') - node.always_snapshot = False - data = fake_datasource.FakeDataSource('data') - engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - node.register_service(data) - node.register_service(engine) - - engine.create_policy('policy1') - engine.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) - self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') - data.state = {'fake_table': set([(1,), (2,)])} - data.poll() - helper.retry_check_db_equal( - engine, 'p(x)', 'p(1) p(2)', target='policy1') - self.assertFalse(hasattr(engine, "last_msg")) - node.stop() - - def test_policy_data_update(self): - """Test policy correctly processes initial data snapshot and update.""" - node = helper.make_dsenode_new_partition('testnode') - node.always_snapshot = False - data = fake_datasource.FakeDataSource('data') - engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - node.register_service(data) - node.register_service(engine) - - engine.create_policy('policy1') - engine.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) - self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') - data.state = {'fake_table': set([(1,), (2,)])} - data.poll() - helper.retry_check_db_equal( - engine, 'p(x)', 'p(1) p(2)', target='policy1') - data.state = {'fake_table': set([(1,), (2,), (3,)])} - data.poll() - helper.retry_check_db_equal( - engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') - self.assertFalse(hasattr(engine, "last_msg")) - node.stop() - - def test_policy_data_late_sub(self): - """Test policy correctly processes data on late subscribe.""" - node = helper.make_dsenode_new_partition('testnode') - node.always_snapshot = False - data = fake_datasource.FakeDataSource('data') - engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - node.register_service(data) - node.register_service(engine) - - engine.create_policy('policy1') - engine.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) - data.state = {'fake_table': set([(1,), (2,)])} - data.poll() - self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') - helper.retry_check_db_equal( - engine, 'p(x)', 'p(1) p(2)', target='policy1') - data.state = {'fake_table': set([(1,), (2,), (3,)])} - data.poll() - helper.retry_check_db_equal( - engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') - self.assertFalse(hasattr(engine, "last_msg")) - node.stop() - - def insert_rule(self, engine, statement, target=None): - statement = compile.parse1(statement) - if target is None: - e = compile.Event(statement) - else: - e = compile.Event(statement, target=target) - engine.process_policy_update([e]) - - def test_unregister(self): - node = helper.make_dsenode_new_partition('testnode') - test1 = fake_datasource.FakeDataSource('test1') - node.register_service(test1) - obj = node.invoke_service_rpc( - 'test1', 'get_status', {'source_id': None, 'params': None}) - self.assertIsNotNone(obj) - node.unregister_service('test1') - helper.retry_til_exception( - congressException.NotFound, - lambda: node.invoke_service_rpc( - 'test1', 'get_status', {'source_id': None, 'params': None})) - node.stop() - - def _create_node_with_services(self, nodes, services, num, partition_id): - nid = 'cbd_node%s' % num - nodes.append(helper.make_dsenode_same_partition(partition_id, nid)) - ns = [] - for s in range(num): - # intentionally starting different number services - ns.append( - fake_datasource.FakeDataSource('cbd-%d_svc-%d' % (num, s))) - nodes[-1].register_service(ns[-1]) - services.append(ns) - return nodes[-1] - - def test_subs_list_update_aggregated_by_service(self): - part = helper.get_new_partition() - nodes = [] - services = [] - num_nodes = 3 - - for i in range(num_nodes): - n = self._create_node_with_services(nodes, services, i, part) - n.start() - - # add subscriptions - for i in range(2, num_nodes): - for s2 in services[i]: - for s1 in services[i-1]: - s1.subscribe(s2.service_id, 'table-A') - s2.subscribe(s1.service_id, 'table-B') - services[1][0].subscribe(services[2][0].service_id, 'table-C') - services[2][1].subscribe(services[2][0].service_id, 'table-D') - - # constructed expected results - expected_subbed_tables = {} - expected_subbed_tables[nodes[1].node_id] = {} - expected_subbed_tables[nodes[2].node_id] = {} - expected_subbed_tables[nodes[1].node_id][ - services[1][0].service_id] = set(['table-B']) - expected_subbed_tables[nodes[2].node_id][ - services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) - expected_subbed_tables[nodes[2].node_id][ - services[2][1].service_id] = set(['table-A']) - - # validate - def _validate_subbed_tables(node): - for s in node.get_services(): - sid = s.service_id - subscribed_tables = node.service_object( - sid)._published_tables_with_subscriber - self.assertEqual( - subscribed_tables, - expected_subbed_tables[node.node_id][sid], - '%s has incorrect subscribed tables list' % sid) - return True - for n in nodes: - helper.retry_check_function_return_value( - lambda: _validate_subbed_tables(n), True) - - # selectively unsubscribe - services[1][0].unsubscribe(services[2][0].service_id, 'table-A') - # note that services[2][1] still subscribes to 'table-B' - services[2][0].unsubscribe(services[1][0].service_id, 'table-B') - # extraneous unsubscribe - services[2][0].unsubscribe(services[1][0].service_id, 'table-None') - - # update expected results - expected_subbed_tables[nodes[2].node_id][ - services[2][0].service_id] = set(['table-C', 'table-D']) - - for n in nodes: - helper.retry_check_function_return_value( - lambda: _validate_subbed_tables(n), True) - - # resubscribe - services[1][0].subscribe(services[2][0].service_id, 'table-A') - services[2][0].subscribe(services[1][0].service_id, 'table-B') - - # update expected results - expected_subbed_tables[nodes[2].node_id][ - services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) - - for n in nodes: - helper.retry_check_function_return_value( - lambda: _validate_subbed_tables(n), True) - - def test_policy_table_publish(self): - """Policy table result publish - - Test basic DSE functionality with policy engine and table result - publish. - """ - node = helper.make_dsenode_new_partition('testnode') - data = fake_datasource.FakeDataSource('data') - policy = agnostic.DseRuntime('policy') - policy2 = agnostic.DseRuntime('policy2') - node.register_service(data) - node.register_service(policy) - node.register_service(policy2) - policy.synchronizer = mock.MagicMock() - policy2.synchronizer = mock.MagicMock() - - policy.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) - policy.create_policy('classification') - policy.set_schema('data', compile.Schema({'q': (1,)})) - policy.insert('p(x):-data:q(x),gt(x,2)', target='classification') - - policy.insert('q(3)', target='data') - # TODO(ekcs): test that no publish triggered (because no subscribers) - - policy2.create_policy('policy') - policy2.subscribe('policy', 'classification:p') - helper.retry_check_function_return_value( - lambda: 'classification:p' in - policy._published_tables_with_subscriber, True) - self.assertEqual(list(policy.policySubData.keys()), - [('p', 'classification', None)]) - - helper.retry_check_db_equal( - policy2, 'policy:classification:p(x)', - 'policy:classification:p(3)') - - policy.insert('q(4)', target='data') - helper.retry_check_db_equal( - policy2, 'policy:classification:p(x)', - ('policy:classification:p(3)' - ' policy:classification:p(4)')) - - # test that no change to p means no publish triggered - policy.insert('q(2)', target='data') - # TODO(ekcs): test no publish triggered - - policy.delete('q(4)', target='data') - helper.retry_check_db_equal( - policy2, 'policy:classification:p(x)', - 'policy:classification:p(3)') - - policy2.unsubscribe('policy', 'classification:p') - # trigger removed - helper.retry_check_function_return_value( - lambda: len(policy._published_tables_with_subscriber) == 0, True) - self.assertEqual(list(policy.policySubData.keys()), []) - - policy.insert('q(4)', target='data') - # TODO(ekcs): test that no publish triggered (because no subscribers) - node.stop() - - def test_replicated_pe_exec(self): - """Test correct local leader behavior with 2 PEs requesting exec""" - node1 = helper.make_dsenode_new_partition('testnode1') - node2 = helper.make_dsenode_same_partition(node1, 'testnode2') - dsd = fake_datasource.FakeDataSource('dsd') - # faster time-out for testing - dsd.LEADER_TIMEOUT = 2 - pe1 = agnostic.DseRuntime('pe1') - pe2 = agnostic.DseRuntime('pe2') - node1.register_service(pe1) - node2.register_service(pe2) - node1.register_service(dsd) - assert dsd._running - assert node1._running - assert node2._running - assert node1._control_bus._running - - # first exec request obeyed and leader set - pe2.rpc('dsd', 'request_execute', - {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, - 'wait': True}) - helper.retry_check_function_return_value( - lambda: len(dsd.exec_history), 1) - self.assertEqual(dsd._leader_node_id, 'testnode2') - - # second exec request from leader obeyed and leader remains - pe2.rpc('dsd', 'request_execute', - {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, - 'wait': True}) - helper.retry_check_function_return_value( - lambda: len(dsd.exec_history), 2) - self.assertEqual(dsd._leader_node_id, 'testnode2') - - # exec request from non-leader not obeyed - pe1.rpc('dsd', 'request_execute', - {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, - 'wait': True}) - self.assertRaises( - tenacity.RetryError, - helper.retry_check_function_return_value, - lambda: len(dsd.exec_history), 3) - - # leader vacated after heartbeat stops - node2.stop() - node2.wait() - helper.retry_check_function_return_value( - lambda: dsd._leader_node_id, None) - - # next exec request obeyed and new leader set - pe1.rpc('dsd', 'request_execute', - {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, - 'wait': True}) - helper.retry_check_function_return_value( - lambda: len(dsd.exec_history), 3) - self.assertEqual(dsd._leader_node_id, 'testnode1') - node1.stop() - node2.stop() diff --git a/congress/tests/dse2/test_dse_node.py b/congress/tests/dse2/test_dse_node.py deleted file mode 100644 index c286808e..00000000 --- a/congress/tests/dse2/test_dse_node.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import eventlet -import mock - -from oslo_config import cfg -from oslo_messaging import conffixture - -from congress import exception - -from congress.dse2 import data_service -from congress.dse2 import datasource_manager as ds_manager -from congress.dse2 import dse_node -from congress.tests.api import base as api_base -from congress.tests import base -from congress.tests import helper - - -# Leave this in place for manual testing. -# For manual testing, support using rabbit driver instead of fake -USE_RABBIT = False -# if len(sys.argv) > 1: -# driver_flg = sys.argv[1].lower() -# if driver_flg == '--rabbit': -# USE_RABBIT = True -# elif driver_flg != '--fake': -# print("Usage: %s [--fake | --rabbit]" % sys.argv[0]) -# sys.exit(1) -# sys.argv[1:] = sys.argv[2:] - - -class _PingRpcEndpoint(object): - def __init__(self, node_id): - self.node_id = node_id - self.ping_receive_count = 0 - self.ping_received_from = [] - - def ping(self, client_ctxt, **args): - return args - - def ping_test(self, client_ctxt, **args): - self.ping_receive_count += 1 - self.ping_received_from.append(client_ctxt) - return args - - -class _PingRpcService(data_service.DataService): - def __init__(self, service_id, node_id): - self.endpoints = [_PingRpcEndpoint(node_id)] - super(_PingRpcService, self).__init__(service_id) - - def rpc_endpoints(self): - return self.endpoints - - -class TestDseNode(base.SqlTestCase): - - def setUp(self): - super(TestDseNode, self).setUp() - - if USE_RABBIT: - self.messaging_config = cfg.CONF - else: - mc_fixture = conffixture.ConfFixture(cfg.CONF) - mc_fixture.conf.transport_url = 'kombu+memory://' - self.messaging_config = mc_fixture.conf - self.messaging_config.rpc_response_timeout = 1 - - def test_start_stop(self): - # create node and register services - node = helper.make_dsenode_new_partition('test_node', - self.messaging_config, []) - services = [] - for i in range(2): - service = data_service.DataService('test-service-%s' % i) - node.register_service(service) - services.append(service) - for s in node.get_services(True): - self.assertTrue(s._running, - "Service '%s' started" % str(s)) - self.assertEqual(set(services), set(node.get_services()), - "All services accounted for on node.") - self.assertTrue(node._rpc_server._started, - "RPC server is started") - self.assertTrue(node._control_bus._running, - "Control Bus is started") - - # stop node - node.stop() - node.wait() - self.assertFalse(node._running, - "Node is stopped after node start") - for idx, s in enumerate(node.get_services(True)): - self.assertFalse(s._running, - "Service '%s' stopped after node stop" % str(s)) - # TODO(pballand): fix bug - # self.assertFalse(node._rpc_server._started, - # "RPC server is stopped after node stop") - self.assertFalse(node._control_bus._running, - "Control Bus is stopped after node stop") - - # restart node - node.start() - for s in node.get_services(True): - self.assertTrue(s._running, - "Service '%s' started" % str(s)) - self.assertEqual(set(services), set(node.get_services()), - "All services accounted for on node.") - self.assertTrue(node._rpc_server._started, - "RPC server is started") - self.assertTrue(node._control_bus._running, - "Control Bus is started") - - def test_context(self): - # Context must not only rely on node_id to prohibit multiple instances - # of a node_id on the DSE - part = helper.get_new_partition() - n1 = helper.make_dsenode_same_partition(part, 'node_id', - self.messaging_config, []) - n2 = helper.make_dsenode_same_partition(part, 'node_id', - self.messaging_config, []) - self.assertEqual(n1._message_context, n1._message_context, - "Comparison of context from the same node is equal") - self.assertNotEqual(n1._message_context, n2._message_context, - "Comparison of context from the different nodes " - "is not equal") - - def test_node_rpc(self): - """Validate calling RPCs on DseNode""" - part = helper.get_new_partition() - nodes = [] - endpoints = [] - for i in range(3): - nid = 'rpcnode%s' % i - endpoints.append(_PingRpcEndpoint(nid)) - nodes.append( - helper.make_dsenode_same_partition( - part, nid, self.messaging_config, [endpoints[-1]])) - - # Send from each node to each other node - for i, source in enumerate(nodes): - # intentionally including self in RPC target - for j, target in enumerate(nodes): - scount = endpoints[j].ping_receive_count - args = {'arg1': 1, 'arg2': 'a'} - ret = source.invoke_node_rpc(target.node_id, 'ping_test', args) - self.assertEqual(ret, args, "Ping echoed arguments") - ecount = endpoints[j].ping_receive_count - self.assertEqual(ecount - scount, 1, - "Node %s received ping (%s was sending)" - % (nodes[j].node_id, nodes[i].node_id)) - self.assertEqual( - endpoints[j].ping_received_from[-1]['node_id'], - nodes[i].node_id, - "Last ping received on %s was from %s" % ( - nodes[j].node_id, nodes[i].node_id)) - - def test_node_broadcast_rpc(self): - """Validate calling RPCs on DseNode""" - part = helper.get_new_partition() - nodes = [] - endpoints = [] - for i in range(3): - nid = 'rpcnode%s' % i - endpoints.append(_PingRpcEndpoint(nid)) - nodes.append( - helper.make_dsenode_same_partition( - part, nid, self.messaging_config, [endpoints[-1]])) - - # Send from each node to all other nodes - for i, source in enumerate(nodes): - scounts = [] - for j, target in enumerate(nodes): - scounts.append(endpoints[j].ping_receive_count) - source.broadcast_node_rpc('ping_test', {'arg1': 1, 'arg2': 'a'}) - eventlet.sleep(0.5) # wait for async delivery - for j, target in enumerate(nodes): - ecount = endpoints[j].ping_receive_count - self.assertEqual(ecount - scounts[j], 1, - "Node %s received ping (%s was sending)" - % (nodes[j].node_id, source.node_id)) - self.assertEqual( - endpoints[j].ping_received_from[-1]['node_id'], - source.node_id, - "Last ping received on %s was from %s" % ( - nodes[j].node_id, source.node_id)) - - def test_service_rpc(self): - part = helper.get_new_partition() - nodes = [] - services = [] - for i in range(3): - nid = 'svc_rpc_node%s' % i - node = helper.make_dsenode_same_partition( - part, nid, self.messaging_config) - service = _PingRpcService('srpc_node_svc%s' % i, nid) - node.register_service(service) - nodes.append(node) - services.append(service) - - # Send from each node to each other node - for i, source in enumerate(nodes): - # intentionally including self in RPC target - for j, service in enumerate(services): - ep = nodes[j]._services[-1].endpoints[0] - scount = ep.ping_receive_count - args = {'arg1': 1, 'arg2': 'a'} - ret = source.invoke_service_rpc( - service.service_id, 'ping_test', args) - self.assertEqual(ret, args, "Ping echoed arguments") - ecount = ep.ping_receive_count - self.assertEqual(ecount - scount, 1, - "Node %s received ping (%s was sending)" - % (nodes[j].node_id, nodes[i].node_id)) - self.assertEqual( - ep.ping_received_from[-1]['node_id'], - nodes[i].node_id, - "Last ping received on %s was from %s" % ( - nodes[j].node_id, nodes[i].node_id)) - - def test_broadcast_service_rpc(self): - part = helper.get_new_partition() - nodes = [] - services = [] - for i in range(3): - nid = 'svc_rpc_node%s' % i - node = helper.make_dsenode_same_partition( - part, nid, self.messaging_config) - service = _PingRpcService('tbsr_svc', nid) - node.register_service(service) - nodes.append(node) - services.append(service) - - # Send from each node to all services - for i, source in enumerate(nodes): - scounts = [] - for j, target in enumerate(nodes): - ep = nodes[j]._services[-1].endpoints[0] - scounts.append(ep.ping_receive_count) - source.broadcast_service_rpc( - 'tbsr_svc', 'ping_test', {'arg1': 1, 'arg2': 'a'}) - eventlet.sleep(0.5) # wait for async delivery - for j, target in enumerate(nodes): - ep = nodes[j]._services[-1].endpoints[0] - ecount = ep.ping_receive_count - self.assertEqual(ecount - scounts[j], 1, - "Node %s received ping (%s was sending)" - % (nodes[j].node_id, source.node_id)) - self.assertEqual( - ep.ping_received_from[-1]['node_id'], - source.node_id, - "Last ping received on %s was from %s" % ( - nodes[j].node_id, source.node_id)) - - def test_get_global_service_names(self): - node = helper.make_dsenode_new_partition('test_node', - self.messaging_config, []) - test1 = _PingRpcService('test1', 'test1') - test2 = _PingRpcService('test2', 'test2') - node.register_service(test1) - node.register_service(test2) - actual = set(node.get_global_service_names()) - self.assertEqual(actual, set(['test1', 'test2'])) - - def test_unregister_service(self): - node = helper.make_dsenode_new_partition('test_node', - self.messaging_config, []) - test1 = _PingRpcService('test1', 'test1') - uuid1 = '1c5d6da0-64ae-11e6-8852-000c29242e6f' - test1.ds_id = uuid1 - test2 = _PingRpcService('test2', 'test2') - uuid2 = 'd36d3781-e9e4-4278-bbf4-9f5fef7c5101' - test2.ds_id = uuid2 - node.register_service(test1) - node.register_service(test2) - actual = set(node.get_global_service_names()) - self.assertEqual(actual, set(['test1', 'test2'])) - - # unregister by service_id - node.unregister_service(service_id='test1') - actual = set(node.get_global_service_names()) - self.assertEqual(actual, set(['test2'])) - - # unregister by uuid - node.unregister_service(uuid_=uuid2) - actual = set(node.get_global_service_names()) - self.assertEqual(actual, set()) - - def _get_datasource_request(self): - # leave ID out--generated during creation - return {'name': 'datasource1', - 'driver': 'fake_datasource', - 'description': 'hello world!', - 'enabled': True, - 'type': None, - 'config': {'auth_url': 'foo', - 'username': 'armax', - 'password': '', - 'tenant_name': 'armax'}} - - @mock.patch.object(dse_node.DseNode, 'get_driver_info') - def test_missing_driver_datasources(self, mock_driver_info): - services = api_base.setup_config(api=False, policy=False) - node = services['node'] - ds_manager = services['ds_manager'] - ds = self._get_datasource_request() - ds_manager.add_datasource(ds) - mock_driver_info.side_effect = [exception.DriverNotFound] - node.delete_missing_driver_datasources() - self.assertRaises(exception.DatasourceNotFound, - node.get_datasource, 'datasource1') - - -class TestDSManagerService(base.TestCase): - - def setUp(self): - super(TestDSManagerService, self).setUp() - - def test_ds_manager_endpoints_add_ds(self): - ds_manager_service = ds_manager.DSManagerService('test_mgr') - node_mock = mock.MagicMock() - ds_manager_service.add_datasource = mock.MagicMock() - ds_manager_service.add_datasource.return_value = 'add_datasource' - ds_manager_service.node = node_mock - endpoints = ds_manager.DSManagerEndpoints(ds_manager_service) - - expect_ret = 'add_datasource' - self.assertEqual(expect_ret, endpoints.add_datasource('context', {})) - - ds_manager_service.add_datasource.assert_called_with({}) - - def test_ds_manager_endpoints_delete_ds(self): - ds_manager_service = ds_manager.DSManagerService('test_mgr') - node_mock = mock.MagicMock() - ds_manager_service.delete_datasource = mock.MagicMock() - ds_manager_service.delete_datasource.return_value = 'delete_datasource' - ds_manager_service.node = node_mock - endpoints = ds_manager.DSManagerEndpoints(ds_manager_service) - - expect_ret = 'delete_datasource' - self.assertEqual(expect_ret, - endpoints.delete_datasource('context', 'ds-id')) - - ds_manager_service.delete_datasource.assert_called_with('ds-id') - - -# Leave this to make manual testing with RabbitMQ easy -# if __name__ == '__main__': -# import unittest -# unittest.main(verbosity=2) diff --git a/congress/tests/etc/api-paste.ini b/congress/tests/etc/api-paste.ini deleted file mode 100644 index 39be570b..00000000 --- a/congress/tests/etc/api-paste.ini +++ /dev/null @@ -1,34 +0,0 @@ -[composite:congress] -use = egg:Paste#urlmap -/: congressversions -/v1: congress_api_v1 - -[pipeline:congressversions] -pipeline = cors catch_errors congressversionapp - -[app:congressversionapp] -paste.app_factory = congress.api.versions:Versions.factory - -[composite:congress_api_v1] -use = call:congress.auth:pipeline_factory -keystone = cors request_id catch_errors authtoken keystonecontext congress_api -noauth = cors request_id catch_errors congress_api - -[app:congress_api] -paste.app_factory = congress.service:congress_app_factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:catch_errors] -paste.filter_factory = oslo_middleware:CatchErrors.factory - -[filter:keystonecontext] -paste.filter_factory = congress.auth:CongressKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = congress diff --git a/congress/tests/etc/congress.conf.test b/congress/tests/etc/congress.conf.test deleted file mode 100644 index 5a10dd39..00000000 --- a/congress/tests/etc/congress.conf.test +++ /dev/null @@ -1,3 +0,0 @@ -[database] -connection = 'sqlite://' -# connection = mysql+pymysql://root:password@127.0.0.1/congress?charset=utf8 \ No newline at end of file diff --git a/congress/tests/etc/congress.conf.test.ha_pe1 b/congress/tests/etc/congress.conf.test.ha_pe1 deleted file mode 100644 index 71619d41..00000000 --- a/congress/tests/etc/congress.conf.test.ha_pe1 +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -bind_port = 4001 -auth_strategy = noauth -datasource_sync_period = 5 -debug = True -replicated_policy_engine = True - -[database] -# connection = mysql+pymysql://root:password@127.0.0.1/congress?charset=utf8 -connection = 'sqlite:///congress/tests/haht/test.db' \ No newline at end of file diff --git a/congress/tests/etc/congress.conf.test.ha_pe2 b/congress/tests/etc/congress.conf.test.ha_pe2 deleted file mode 100644 index 40b16a5a..00000000 --- a/congress/tests/etc/congress.conf.test.ha_pe2 +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -bind_port = 4002 -auth_strategy = noauth -datasource_sync_period = 5 -debug = True -replicated_policy_engine = True - -[database] -# connection = mysql+pymysql://root:password@127.0.0.1/congress?charset=utf8 -connection = 'sqlite:///congress/tests/haht/test.db' \ No newline at end of file diff --git a/congress/tests/fake_datasource.py b/congress/tests/fake_datasource.py deleted file mode 100644 index 2a500cb0..00000000 --- a/congress/tests/fake_datasource.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_log import log as logging - -from congress.datasources import datasource_driver -from congress.datasources import datasource_utils - -LOG = logging.getLogger(__name__) - - -class FakeDataSource(datasource_driver.PollingDataSourceDriver, - datasource_driver.PushedDataSourceDriver, - datasource_driver.ExecutionDriver): - - value_trans = {'translation-type': 'VALUE'} - fake_translator = { - 'translation-type': 'HDICT', - 'table-name': 'fake_table', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans})} - - TRANSLATORS = [fake_translator] - - def __init__(self, name='', args=None): - super(FakeDataSource, self).__init__(name, args) - datasource_driver.ExecutionDriver.__init__(self) - self.add_executable_method('fake_act', - [{'name': 'server_id', - 'description': 'server to act'}], - 'fake action') - - self.update_number = 0 - self.initialize_update_method() - self.exec_history = [] - self._init_end_start_poll() - - @staticmethod - def get_datasource_info(): - result = {} - result['id'] = 'fake_datasource' - result['description'] = 'This is a fake driver used for testing' - result['config'] = datasource_utils.get_openstack_required_config() - result['secret'] = ['password'] - return result - - def initialize_update_method(self): - self.add_update_method(self.update_fake_table, self.fake_translator) - - def update_fake_table(self): - LOG.info("fake:: update_from_datasource") - self.update_number += 1 - - def execute(self, action, action_args): - self.exec_history.append((action, action_args)) diff --git a/congress/tests/fake_policy.py b/congress/tests/fake_policy.py deleted file mode 100644 index d84ba4ea..00000000 --- a/congress/tests/fake_policy.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - - -policy_data = """ -{ - "context_is_admin": "role:admin", - "admin_only": "rule:context_is_admin", - "regular_user": "", - "default": "rule:admin_only" -} -""" diff --git a/congress/tests/fake_wsgi.py b/congress/tests/fake_wsgi.py deleted file mode 100644 index 32517a01..00000000 --- a/congress/tests/fake_wsgi.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2015 Huawei, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import paste.urlmap - -from congress.api import application -from congress.api import versions - - -def wsgi_app(): - - mapper = paste.urlmap.URLMap() - mapper['/'] = versions.Versions() - - api_resource_mgr = application.ResourceManager() - api_resource_mgr.register_handler(versions.VersionV1Handler(r'/v1[/]?')) - app = application.ApiApplication(api_resource_mgr) - mapper['/v1'] = app - - return mapper diff --git a/congress/tests/haht/__init__.py b/congress/tests/haht/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/haht/test.db.clean b/congress/tests/haht/test.db.clean deleted file mode 100644 index 8e92f21f..00000000 Binary files a/congress/tests/haht/test.db.clean and /dev/null differ diff --git a/congress/tests/haht/test_congress_haht.py b/congress/tests/haht/test_congress_haht.py deleted file mode 100644 index c264a1c1..00000000 --- a/congress/tests/haht/test_congress_haht.py +++ /dev/null @@ -1,511 +0,0 @@ -# -# Copyright (c) 2016 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_congress_haht ----------------------------------- - -Replicated policy engine high availability tests for `congress` module. -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import shutil -import subprocess -import sys -import tempfile -import time - -# Note: monkey patch to allow running this test standalone under 'nose' -import eventlet -eventlet.monkey_patch() -from oslo_log import log as logging -import requests -import tenacity - -from congress.db import api as db -from congress.db import db_policy_rules -from congress.tests import base -from congress.tests import helper - - -LOG = logging.getLogger(__name__) - - -class TestCongressHAHT(base.SqlTestCase): - - class client(object): - version = '/v1' - - def __init__(self, port, host='0.0.0.0'): - self.host = host - self.port = port - self.base_url = 'http://' + host + ':%d' % port - - def url(self, suffix=None): - if suffix is None: - return self.base_url - else: - return self.base_url + self.version + '/' + suffix - - def get(self, suffix=None): - return requests.get(self.url(suffix)) - - def delete(self, suffix=None): - return requests.delete(self.url(suffix)) - - def post(self, suffix=None, json=None): - x = requests.post(self.url(suffix), json=json) - # print("status: %s, text: %s" % (x.status_code, x.text)) - return x - - def setUp(self): - super(TestCongressHAHT, self).setUp() - assert sys.executable is not None,\ - 'test cannot proceed when sys.executable is None' - - # establish clean starting DB - self.clean_db() - shutil.copy(helper.test_path('haht/test.db.clean'), - helper.test_path('haht/test.db')) - - self.clients = [] - self.procs = [] - self.outfiles = {} - self.errfiles = {} - - self.pe1 = self.start_pe(1, 4001) - self.pe2 = self.start_pe(2, 4002) - - def dump_nodes_logs(self): - LOG.error('PE1 process output:\n%s' % - self.read_output_file(self.outfiles[1])) - LOG.error('PE2 process output:\n%s' % - self.read_output_file(self.outfiles[2])) - - def clean_db(self): - session = db.get_session() - with session.begin(subtransactions=True): - session.query(db_policy_rules.Policy).delete() - session.query(db_policy_rules.PolicyRule).delete() - - def start_pe(self, num, port): - self.outfiles[num] = tempfile.NamedTemporaryFile( - mode='a+', suffix='.out', - prefix='congress-pe%d-%d-' % (num, port), - dir='/tmp') - - self.errfiles[num] = tempfile.NamedTemporaryFile( - mode='a+', suffix='.err', - prefix='congress-pe%d-%d-' % (num, port), - dir='/tmp') - - args = [sys.executable, - 'congress/server/congress_server.py', - '--node-id', - 'node_%d' % num, - '--api', - '--policy-engine', - '--config-file', - 'congress/tests/etc/congress.conf.test.ha_pe%d' % num] - pe = subprocess.Popen(args, - stdout=self.outfiles[num], - stderr=self.outfiles[num], - cwd=helper.root_path()) - self.addCleanup(pe.kill) - pe = self.client(port) - try: - helper.retry_check_function_return_value( - lambda: pe.get().status_code, 200) - except tenacity.RetryError: - out = self.read_output_file(self.outfiles[num]) - LOG.error('PE%d failed to start. Process output:\n%s' % (num, out)) - raise - return pe - - def read_output_file(self, file): - file.flush() - file.seek(0) - return ''.join(file.readlines()) - - def tail(self, thing, length=20): - lines = thing.split('\n') - return '\n'.join(lines[-length:]) - - def test_policy_create_delete(self): - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - # check policy alice in PE1 - self.assertEqual(self.pe1.get('policies/alice').status_code, 200) - # check policy alice in PE2 - helper.retry_check_function_return_value( - lambda: self.pe2.get('policies/alice').status_code, 200) - # create policy bob in PE2 - self.assertEqual(self.pe2.post( - suffix='policies', json={'name': 'bob'}).status_code, 201) - # check policy bob in PE2 - self.assertEqual(self.pe2.get('policies/bob').status_code, 200) - # check policy bob in PE1 - helper.retry_check_function_return_value( - lambda: self.pe1.get('policies/bob').status_code, 200) - - # check policy listings - self.assertEqual(len(self.pe1.get('policies').json()['results']), 4) - self.assertEqual(len(self.pe2.get('policies').json()['results']), 4) - - # delete policy alice in PE2, and check deleted on both PE - self.assertEqual(self.pe2.delete('policies/alice').status_code, 200) - self.assertEqual(self.pe2.get('policies/alice').status_code, 404) - helper.retry_check_function_return_value( - lambda: self.pe1.get('policies/alice').status_code, 404) - - # delete policy bob in PE2, and check deleted on both PE - self.assertEqual(self.pe2.delete('policies/bob').status_code, 200) - self.assertEqual(self.pe2.get('policies/bob').status_code, 404) - helper.retry_check_function_return_value( - lambda: self.pe1.get('policies/bob').status_code, 404) - - def test_policy_rule_crud(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - # add rule to PE1 - j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - self.assertEqual(self.pe1.post( - suffix='policies/alice/rules', json=j).status_code, 201) - self.assertEqual( - self.pe1.get('policies/alice/rules').status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 1) - # retry necessary because of synchronization - helper.retry_check_function_return_value( - lambda: len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - # add rule to PE2 - j = {'rule': 'q(1)', 'name': 'rule2'} - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 201) - # check 2 rule in each pe - self.assertEqual(len( - self.pe2.get('policies/alice/rules').json()['results']), 2) - self.assertEqual(len( - self.pe1.get('policies/alice/rules').json()['results']), 2) - - # grab rule IDs - rules = self.pe2.get('policies/alice/rules').json()['results'] - id1 = next(x['id'] for x in rules if x['name'] == 'rule1') - id2 = next(x['id'] for x in rules if x['name'] == 'rule2') - - # show rules by id - self.assertEqual( - self.pe1.get('policies/alice/rules/%s' % id1).status_code, 200) - self.assertEqual( - self.pe2.get('policies/alice/rules/%s' % id1).status_code, 200) - self.assertEqual( - self.pe1.get('policies/alice/rules/%s' % id2).status_code, 200) - self.assertEqual( - self.pe2.get('policies/alice/rules/%s' % id2).status_code, 200) - - # list tables - self.assertEqual(len( - self.pe1.get('policies/alice/tables').json()['results']), 2) - self.assertEqual(len( - self.pe2.get('policies/alice/tables').json()['results']), 2) - - # show tables - self.assertEqual( - self.pe1.get('policies/alice/tables/p').status_code, 200) - self.assertEqual( - self.pe2.get('policies/alice/tables/p').status_code, 200) - self.assertEqual( - self.pe1.get('policies/alice/tables/q').status_code, 200) - self.assertEqual( - self.pe2.get('policies/alice/tables/q').status_code, 200) - - # delete from PE1 and check both have 1 rule left - self.assertEqual(self.pe1.delete( - suffix='policies/alice/rules/%s' % id1).status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 1) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - # delete from PE2 and check both have 0 rules left - self.assertEqual(self.pe2.delete( - suffix='policies/alice/rules/%s' % id2).status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 0) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 0) - except Exception: - self.dump_nodes_logs() - raise - - def test_conflicting_policy_create_delete(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - self.assertEqual(self.pe2.post( - suffix='policies', json={'name': 'alice'}).status_code, 409) - - # create policy bob in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'bob'}).status_code, 201) - self.assertEqual(self.pe2.delete( - suffix='policies/bob').status_code, 200) - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'bob'}).status_code, 201) - except Exception: - LOG.error('PE1 process output:\n%s' % - self.read_output_file(self.outfiles[1])) - LOG.error('PE2 process output:\n%s' % - self.read_output_file(self.outfiles[2])) - raise - - def test_policy_rule_create_delete(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - # add rule to PE1 (retry since 500 on first attempt) - j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - self.assertEqual(self.pe1.post( - suffix='policies/alice/rules', json=j).status_code, 201) - self.assertEqual( - self.pe1.get('policies/alice/rules').status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 1) - time.sleep(10) # wait for sync before reading from PE2 - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - # add rule to PE2 - j = {'rule': 'q(1)', 'name': 'rule2'} - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 201) - # check 2 rule in each pe - self.assertEqual(len( - self.pe2.get('policies/alice/rules').json()['results']), 2) - self.assertEqual(len( - self.pe1.get('policies/alice/rules').json()['results']), 2) - # grab rule IDs - rules = self.pe2.get('policies/alice/rules').json()['results'] - id1 = next(x['id'] for x in rules if x['name'] == 'rule1') - id2 = next(x['id'] for x in rules if x['name'] == 'rule2') - # delete from PE1 and check both have 1 rule left - self.assertEqual(self.pe1.delete( - suffix='policies/alice/rules/%s' % id1).status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 1) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - # delete from PE2 and check both have 0 rules left - self.assertEqual(self.pe2.delete( - suffix='policies/alice/rules/%s' % id2).status_code, 200) - self.assertEqual( - len(self.pe1.get('policies/alice/rules'). - json()['results']), 0) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 0) - except Exception: - self.dump_nodes_logs() - raise - - def test_policy_rule_create_delete_interference(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - - rule_create_res = self.pe2.post( - suffix='policies/alice/rules', json=j) - self.assertEqual(rule_create_res.status_code, 201) - rule_id = rule_create_res.json()['id'] - self.assertEqual(self.pe1.delete( - suffix='policies/alice/rules/%s' % rule_id).status_code, 200) - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 201) - - except Exception: - self.dump_nodes_logs() - raise - - def test_policy_rule_duplicate(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 201) - - self.assertEqual(self.pe1.post( - suffix='policies/alice/rules', json=j).status_code, 409) - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 409) - - self.assertEqual( - self.pe1.get('policies/alice/rules').status_code, 200) - self.assertLessEqual( - len(self.pe1.get('policies/alice/rules').json()['results']), - 1) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - except Exception: - self.dump_nodes_logs() - raise - - def test_policy_rule_recursion(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - r1 = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - r2 = {'rule': 'q(x) :- p(x)', 'name': 'rule2'} - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=r1).status_code, 201) - - self.assertEqual(self.pe1.post( - suffix='policies/alice/rules', json=r2).status_code, 400) - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=r2).status_code, 400) - - self.assertEqual( - self.pe1.get('policies/alice/rules').status_code, 200) - self.assertLessEqual( - len(self.pe1.get('policies/alice/rules').json()['results']), - 1) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - except Exception: - self.dump_nodes_logs() - raise - - def test_policy_rule_schema_mismatch(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - r1 = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} - r2 = {'rule': 'p(x) :- q(x, x)', 'name': 'rule2'} - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=r1).status_code, 201) - - self.assertEqual(self.pe1.post( - suffix='policies/alice/rules', json=r2).status_code, 400) - - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=r2).status_code, 400) - - self.assertEqual( - self.pe1.get('policies/alice/rules').status_code, 200) - self.assertLessEqual( - len(self.pe1.get('policies/alice/rules').json()['results']), - 1) - self.assertEqual( - self.pe2.get('policies/alice/rules').status_code, 200) - self.assertEqual( - len(self.pe2.get('policies/alice/rules'). - json()['results']), 1) - except Exception: - self.dump_nodes_logs() - raise - - def test_policy_rule_evaluation(self): - try: - # create policy alice in PE1 - self.assertEqual(self.pe1.post( - suffix='policies', json={'name': 'alice'}).status_code, 201) - # add rule to PE1 - j = {'rule': 'p(x) :- q(x)', 'name': 'rule0'} - res = self.pe1.post( - suffix='policies/alice/rules', json=j) - self.assertEqual(res.status_code, 201) - r_id = res.json()['id'] - - # add data to PE1 - j = {'rule': ' q( 1 ) ', 'name': 'rule1'} - res = self.pe1.post( - suffix='policies/alice/rules', json=j) - self.assertEqual(res.status_code, 201) - q1_id = res.json()['id'] - - # # add data to PE2 - j = {'rule': ' q ( 2 ) ', 'name': 'rule2'} - self.assertEqual(self.pe2.post( - suffix='policies/alice/rules', json=j).status_code, 201) - - # eval on PE1 - helper.retry_check_function_return_value_table( - lambda: [x['data'] for x in - self.pe1.get('policies/alice/tables/p/rows').json()[ - 'results']], - [[1], [2]]) - - # eval on PE2 - helper.retry_check_function_return_value_table( - lambda: [x['data'] for x in - self.pe2.get('policies/alice/tables/p/rows').json()[ - 'results']], - [[1], [2]]) - - self.assertEqual(self.pe1.delete( - suffix='policies/alice/rules/%s' % q1_id).status_code, 200) - - # eval on PE1 - helper.retry_check_function_return_value_table( - lambda: [x['data'] for x in - self.pe1.get('policies/alice/tables/p/rows').json()[ - 'results']], - [[2]]) - - # eval on PE2 - helper.retry_check_function_return_value_table( - lambda: [x['data'] for x in - self.pe2.get('policies/alice/tables/p/rows').json()[ - 'results']], - [[2]]) - - self.assertEqual(self.pe2.delete( - suffix='policies/alice/rules/%s' % r_id).status_code, 200) - helper.retry_check_function_return_value(lambda: self.pe1.get( - 'policies/alice/tables/p/rows').status_code, 404) - helper.retry_check_function_return_value(lambda: self.pe2.get( - 'policies/alice/tables/p/rows').status_code, 404) - - except Exception: - self.dump_nodes_logs() - raise diff --git a/congress/tests/helper.py b/congress/tests/helper.py deleted file mode 100644 index 2ba5e6e2..00000000 --- a/congress/tests/helper.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import os - -import tenacity -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_messaging import conffixture - -from congress.datalog import compile -from congress.datalog import unify -from congress.policy_engines import agnostic - -from congress.dse2 import dse_node - - -LOG = logging.getLogger(__name__) - -ROOTDIR = os.path.dirname(__file__) -ETCDIR = os.path.join(ROOTDIR, 'etc') - -# single, global variable used to ensure different tests from -# different subclasses of TestCase all can get a unique ID -# so that the tests do not interact on oslo-messaging -partition_counter = 0 - - -def make_dsenode_new_partition(node_id, - messaging_config=None, - node_rpc_endpoints=None): - """Get new DseNode in it's own new DSE partition.""" - messaging_config = messaging_config or generate_messaging_config() - node_rpc_endpoints = node_rpc_endpoints or [] - return dse_node.DseNode(messaging_config, node_id, node_rpc_endpoints, - partition_id=get_new_partition()) - - -def make_dsenode_same_partition(existing, - node_id, - messaging_config=None, - node_rpc_endpoints=None): - """Get new DseNode in the same DSE partition as existing (node or part).""" - partition_id = (existing.partition_id if - isinstance(existing, dse_node.DseNode) else existing) - - messaging_config = messaging_config or generate_messaging_config() - node_rpc_endpoints = node_rpc_endpoints or [] - return dse_node.DseNode( - messaging_config, node_id, node_rpc_endpoints, partition_id) - - -def get_new_partition(): - """Create a new partition number, unique within each process.""" - global partition_counter - old = partition_counter - partition_counter += 1 - return old - - -def generate_messaging_config(): - mc_fixture = conffixture.ConfFixture(cfg.CONF) - mc_fixture.conf.transport_url = 'kombu+memory://' - messaging_config = mc_fixture.conf - messaging_config.rpc_response_timeout = 10 - return messaging_config - - -def etcdir(*p): - return os.path.join(ETCDIR, *p) - - -def root_path(): - """Return path to root of source code.""" - x = os.path.realpath(__file__) - x, y = os.path.split(x) # drop "helper.py" - x, y = os.path.split(x) # drop "tests" - x, y = os.path.split(x) # drop "congress" - return x - - -def source_path(): - """Return path to root of source code.""" - x = os.path.realpath(__file__) - x, y = os.path.split(x) # drop "helper.py" - x, y = os.path.split(x) # drop "tests" - return x - - -def data_module_path(file): - """Return path to dataservice module with given FILEname.""" - path = source_path() - path = os.path.join(path, "datasources") - path = os.path.join(path, file) - return path - - -def policy_module_path(): - """Return path to policy engine module.""" - path = source_path() - path = os.path.join(path, "policy_engines") - path = os.path.join(path, "agnostic.py") - return path - - -def api_module_path(): - """Return path to api module.""" - path = source_path() - path = os.path.join(path, "datasources") - path = os.path.join(path, "test_driver.py") - return path - - -def test_path(file=None): - """Return path to root of top-level tests. Joined with file if provided.""" - path = source_path() - path = os.path.join(path, "tests") - if file is not None: - path = os.path.join(path, file) - return path - - -def datasource_config_path(): - """Return path to configuration info for datasources.""" - path = test_path() - path = os.path.join(path, "datasources.conf") - return path - - -def datasource_openstack_args(): - """Return basic args for creating an openstack datasource.""" - return {'username': '', - 'password': '', - 'auth_url': '', - 'tenant_name': '', - 'poll_time': 1} - - -def pause(factor=1): - """Timeout so other threads can run.""" - time.sleep(factor * 1) - - -def datalog_same(actual_code, correct_code, msg=None): - return datalog_equal( - actual_code, correct_code, msg=msg, - equal=lambda x, y: unify.same(x, y) is not None) - - -def datalog_equal(actual_code, correct_code, - msg=None, equal=None, theories=None, - output_diff=True): - """Check equality. - - Check if the strings given by actual_code - and CORRECT_CODE represent the same datalog. - """ - def minus(iter1, iter2, invert=False): - extra = [] - for i1 in iter1: - found = False - for i2 in iter2: - # for asymmetric equality checks - if invert: - test_result = equal(i2, i1) - else: - test_result = equal(i1, i2) - if test_result: - found = True - break - if not found: - extra.append(i1) - return extra - if equal is None: - equal = lambda x, y: x == y - - LOG.debug("** Checking equality: %s **", msg) - actual = compile.parse(actual_code, theories=theories) - correct = compile.parse(correct_code, theories=theories) - extra = minus(actual, correct) - # in case EQUAL is asymmetric, always supply actual as the first arg - # and set INVERT to true - missing = minus(correct, actual, invert=True) - if output_diff: - output_diffs(extra, missing, msg) - LOG.debug("** Finished equality: %s **", msg) - is_equal = len(extra) == 0 and len(missing) == 0 - if not is_equal: - LOG.debug('datalog_equal failed, extras: %s, missing: %s', extra, - missing) - return is_equal - - -def db_equal(actual_string, correct_string, output_diff=True): - """Check if two strings representing data theories are the same.""" - actual = agnostic.string_to_database(actual_string) - correct = agnostic.string_to_database(correct_string) - return check_db_diffs(actual, correct, output_diff=output_diff) - - -def check_db_diffs(actual, correct, output_diff=True): - extra = actual - correct - missing = correct - actual - extra = [e for e in extra if not e[0].startswith("___")] - missing = [m for m in missing if not m[0].startswith("___")] - if output_diff: - output_diffs(extra, missing, actual=actual) - return len(extra) == 0 and len(missing) == 0 - - -def output_diffs(extra, missing, actual=None): - if len(extra) > 0: - print("Extra tuples") - print(", ".join([str(x) for x in extra])) - if len(missing) > 0: - print("Missing tuples") - print(", ".join([str(x) for x in missing])) - if len(extra) > 0 or len(missing) > 0: - print("Resulting database: {}".format(str(actual))) - - -def str2form(formula_string, theories=None): - return compile.parse1(formula_string, theories=theories) - - -def str2pol(policy_string, theories=None): - return compile.parse(policy_string, theories=theories) - - -def pol2str(policy): - return " ".join(str(x) for x in policy) - - -def form2str(formula): - return str(formula) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_for_last_message(obj): - if not hasattr(obj, "last_msg"): - raise AttributeError("Missing 'last_msg' attribute") - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_for_message_to_arrive(obj): - if not hasattr(obj.msg, "body"): - raise AttributeError("Missing 'body' attribute") - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_for_message_data(obj, data): - if not hasattr(obj.msg, "body"): - raise AttributeError("Missing 'body' attribute") - if obj.get_msg_data() != data: - raise TestFailureException("Missing expected data in msg") - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_nonempty_last_policy_change(obj): - if not hasattr(obj, "last_policy_change"): - raise AttributeError("Missing 'last_policy_change' attribute") - if obj.last_policy_change is None: - raise TestFailureException("last_policy_change == None") - if len(obj.last_policy_change) == 0: - raise TestFailureException("last_policy_change == 0") - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_empty_last_policy_change(obj): - if not hasattr(obj, "last_policy_change"): - raise AttributeError("Missing 'last_policy_change' attribute") - if len(obj.last_policy_change) != 0: - raise TestFailureException("last_policy_change != 0") - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_db_equal(policy, query, correct, target=None): - if not hasattr(policy, "select"): - raise AttributeError("Missing 'select' attribute") - if target is None: - actual = policy.select(query) - else: - actual = policy.select(query, target=target) - if not db_equal(actual, correct, output_diff=False): - raise TestFailureException( - "Query {} produces {}, should produce {}".format( - str(query), str(actual), str(correct))) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_number_of_updates(deepsix, value): - if not hasattr(deepsix, "number_of_updates"): - raise AttributeError("Missing 'number_of_updates' attribute") - if deepsix.number_of_updates != value: - raise TestFailureException("number_of_updates is {}, not {}".format( - deepsix.number_of_updates, value)) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_subscriptions(deepsix, subscription_list): - if not check_subscriptions(deepsix, subscription_list): - raise TestFailureException( - "{} does not have subscription list {}".format( - deepsix.name, str(subscription_list))) - - -def check_subscriptions(deepsix, subscription_list): - """Check subscriptions. - - Check that the instance DEEPSIX is subscribed to all of the - (key, dataindex) pairs in KEY_DATAINDEX_LIST. Return True if - all subscriptions exists; otherwise returns False. - """ - actual = set([(value.key, value.dataindex) - for value in deepsix.subdata.values()]) - correct = set(subscription_list) - missing = correct - actual - if missing: - LOG.debug("Missing key/dataindex subscriptions: %s", missing) - return not missing - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_subscribers(deepsix, subscriber_list): - if not check_subscribers(deepsix, subscriber_list): - raise TestFailureException( - "{} does not have subscriber list {}".format( - deepsix.name, str(subscriber_list))) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(1000), - wait=tenacity.wait_fixed(0.1)) -def retry_check_no_subscribers(deepsix, subscriber_list): - """Check that deepsix has none of the subscribers in subscriber_list""" - if check_subscribers(deepsix, subscriber_list, any_=True): - raise TestFailureException( - "{} still has some subscribers in list {}".format( - deepsix.name, str(subscriber_list))) - - -def check_subscribers(deepsix, subscriber_list, any_=False): - """Check subscribers. - - Check that the instance DEEPSIX includes subscriptions for all of - the (name, dataindex) pairs in SUBSCRIBER_LIST. Return True if - all subscribers exist; otherwise returns False. - - If any_=True, then return True if ANY subscribers exist in subscriber_list - """ - actual = set([(name, pubdata.dataindex) - for pubdata in deepsix.pubdata.copy().values() - for name in pubdata.subscribers]) - correct = set(subscriber_list) - missing = correct - actual - if missing: - LOG.debug("Missing name/dataindex subscribers: %s", missing) - if any_: - return (len(missing) < len(actual)) - return not missing - - -@tenacity.retry(stop=tenacity.stop_after_attempt(20), - wait=tenacity.wait_fixed(1)) -def retry_check_function_return_value(f, expected_value): - """Check if function f returns expected key.""" - result = f() - if result != expected_value: - raise TestFailureException( - "Expected value '%s' not received. " - "Got %s instead." % (expected_value, result)) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(10), - wait=tenacity.wait_fixed(0.5)) -def retry_check_function_return_value_not_eq(f, value): - """Check if function f does not return expected value.""" - result = f() - if result == value: - raise TestFailureException( - "Actual value '%s' should be different " - "from '%s'" % (result, value)) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(10), - wait=tenacity.wait_fixed(0.5)) -def retry_til_exception(expected_exception, f): - """Check if function f does not return expected value.""" - try: - val = f() - raise TestFailureException("No exception thrown; received %s" % val) - except expected_exception: - return - except Exception as e: - raise TestFailureException("Wrong exception thrown: %s" % e) - - -@tenacity.retry(stop=tenacity.stop_after_attempt(20), - wait=tenacity.wait_fixed(1)) -def retry_check_function_return_value_table(f, expected_values): - """Check if function f returns expected table.""" - result = f() - actual = set(tuple(x) for x in result) - correct = set(tuple(x) for x in expected_values) - extra = actual - correct - missing = correct - actual - if len(extra) > 0 or len(missing) > 0: - s = "Actual: %s\nExpected: %s\n" % (result, expected_values) - if len(extra) > 0: - s += "Extra: %s\n" % extra - if len(missing) > 0: - s += "Missing: %s\n" % missing - raise TestFailureException(s) - - -class FakeRequest(object): - def __init__(self, body): - self.body = json.dumps(body) - - -class FakeServiceObj(object): - def __init__(self): - self.state = {} - - -class TestFailureException(Exception): - """Custom exception thrown on test failure - - Facilitates using assertRaises to check for failure on retry tests - (generic Exception in assertRaises disallowed by pep8 check/gate) - """ - def __init__(self, *args, **kwargs): - Exception.__init__(self, *args, **kwargs) diff --git a/congress/tests/library_service/test_library_service.py b/congress/tests/library_service/test_library_service.py deleted file mode 100644 index 29ccec61..00000000 --- a/congress/tests/library_service/test_library_service.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2017 VMware Inc. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy - -from congress import exception -from congress.library_service import library_service -from congress.tests import base - - -class TestLibraryService(base.SqlTestCase): - - def setUp(self): - super(TestLibraryService, self).setUp() - self.library = library_service.LibraryService('lib-test') - self.library.delete_all_policies() # clear pre-loaded library policies - - self.policy1 = {'name': 'policy1', 'abbreviation': 'abbr', - 'kind': 'database', 'description': 'descrip', - 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]} - - self.policy2 = {'name': 'policy2', 'abbreviation': 'abbr', - 'kind': 'database', 'description': 'descrip', - 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]} - - self.policy1_meta = copy.deepcopy(self.policy1) - self.policy2_meta = copy.deepcopy(self.policy2) - del self.policy1_meta['rules'] - del self.policy2_meta['rules'] - - def test_create_policy_no_name(self): - self.assertRaises(exception.InvalidPolicyInput, - self.library.create_policy, {'rules': []}) - - def test_create_policy_no_rules(self): - self.assertRaises(exception.InvalidPolicyInput, - self.library.create_policy, {'name': 'policy1'}) - - def test_create_policy_bad_name(self): - self.assertRaises(exception.PolicyException, - self.library.create_policy, - {'name': 'disallowed-hyphen', 'rules': []}) - - def test_create_policy_default(self): - res = self.library.create_policy({'name': 'policy1', 'rules': []}) - self.assertEqual(res, {'id': res['id'], 'abbreviation': 'polic', - 'kind': 'nonrecursive', 'name': 'policy1', - 'description': '', 'rules': []}) - - def test_create_policy(self): - policy_obj = self.library.create_policy(self.policy1) - self.policy1['id'] = policy_obj['id'] - self.assertEqual(policy_obj, self.policy1) - - def test_create_policy_duplicate(self): - self.library.create_policy({'name': 'policy1', 'rules': []}) - self.assertRaises(KeyError, self.library.create_policy, - {'name': 'policy1', 'rules': []}) - res = self.library.get_policies() - self.assertEqual(len(res), 1) - - def test_get_policy_empty(self): - res = self.library.get_policies() - self.assertEqual(res, []) - - self.assertRaises(KeyError, self.library.get_policy, - 'nosuchpolicy') - - def test_create_get_policy(self): - policy_obj = self.library.create_policy(self.policy1) - self.policy1['id'] = policy_obj['id'] - self.policy1_meta['id'] = policy_obj['id'] - res = self.library.get_policies() - self.assertEqual(res, [self.policy1]) - - res = self.library.get_policy(policy_obj['id']) - self.assertEqual(res, self.policy1) - - res = self.library.get_policies(include_rules=True) - self.assertEqual(res, [self.policy1]) - - res = self.library.get_policy(policy_obj['id'], include_rules=False) - self.assertEqual(res, self.policy1_meta) - - self.assertRaises(KeyError, self.library.get_policy, 'no_such_policy') - - def test_delete_policy(self): - self.assertRaises(KeyError, self.library.delete_policy, - 'policy1') - - policy_obj = self.library.create_policy(self.policy1) - self.policy1['id'] = policy_obj['id'] - - policy_obj = self.library.create_policy(self.policy2) - self.policy2['id'] = policy_obj['id'] - - res = self.library.get_policies() - self.assertEqual(len(res), 2) - self.assertTrue(all(p in res - for p in [self.policy1, self.policy2])) - - self.assertRaises(KeyError, self.library.delete_policy, - 'no_such_policy') - - res = self.library.delete_policy(self.policy1['id']) - self.assertEqual(res, self.policy1) - - res = self.library.get_policies() - self.assertEqual(len(res), 1) - self.assertEqual(res[0], self.policy2) - - res = self.library.delete_policy(self.policy2['id']) - self.assertEqual(res, self.policy2) - - res = self.library.get_policies() - self.assertEqual(len(res), 0) - - def test_delete_policies(self): - self.library.delete_all_policies() - res = self.library.get_policies() - self.assertEqual(len(res), 0) - - self.library.create_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - self.library.create_policy( - {'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - self.library.delete_all_policies() - res = self.library.get_policies() - self.assertEqual(len(res), 0) - - def test_replace_policy(self): - policy1 = self.library.create_policy( - {'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - policy2 = self.library.create_policy( - {'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database', - 'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)', - 'comment': 'test comment', - 'name': 'testname'}]}) - - replacement_policy = { - "name": "new_name", - "description": "new test policy2 description", - "kind": "nonrecursive", - "abbreviation": "newab", - "rules": [{"rule": "r(x) :- c(x)", "comment": "test comment", - "name": "test name"}] - } - - # update non-existent item - self.assertRaises(KeyError, - self.library.replace_policy, 'no_such_id', - replacement_policy) - - # update existing item - self.library.replace_policy(policy2['id'], replacement_policy) - - replacement_policy_w_id = copy.deepcopy(replacement_policy) - replacement_policy_w_id['id'] = policy2['id'] - - ret = self.library.get_policies() - self.assertEqual(len(ret), 2) - self.assertTrue(all(p in ret - for p in [policy1, - replacement_policy_w_id])) diff --git a/congress/tests/policy_engines/__init__.py b/congress/tests/policy_engines/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress/tests/policy_engines/brokentest_agnostic.py b/congress/tests/policy_engines/brokentest_agnostic.py deleted file mode 100644 index 8a892095..00000000 --- a/congress/tests/policy_engines/brokentest_agnostic.py +++ /dev/null @@ -1,422 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import os - -from oslo_log import log as logging - -from congress.datalog import compile -from congress.datalog import unify -from congress.datalog import utility -from congress.policy_engines import agnostic -from congress.tests import base - -LOG = logging.getLogger(__name__) - -NREC_THEORY = 'non-recursive theory' -DB_THEORY = 'database' -MAT_THEORY = 'materialized' - - -# This file contains tests that are likely broken. But the tests -# are good ones once we get the underlying data structures fixed. -# TODO(thinrichs): fix tests so they are working again. - -class TestRuntime(base.TestCase): - - def prep_runtime(self, code=None, msg=None, target=None): - # compile source - if msg is not None: - LOG.debug(msg) - if code is None: - code = "" - if target is None: - target = MAT_THEORY - run = agnostic.Runtime() - run.theory[NREC_THEORY] = agnostic.NonrecursiveRuleTheory() - run.theory[DB_THEORY] = agnostic.Database() - run.theory[MAT_THEORY] = agnostic.MaterializedViewTheory() - run.debug_mode() - run.insert(code, target=target) - return run - - def check_class(self, run, correct_database_code, msg=None): - """Test MAT_THEORY. - - Check that runtime RUN's MAT_THEORY theory - has exactly the same contents as CORRECT_DATABASE_CODE. - """ - self.open(msg) - db_class = run.theory[MAT_THEORY].database - # self.showdb(run) - correct = agnostic.string_to_database(correct_database_code) - self.check_db_diffs(db_class, correct, msg) - self.close(msg) - - def check_db(self, run, correct_database_code, msg=None): - """Test DB_THEORY. - - Check that runtime RUN.theory[DB_THEORY] is - equal to CORRECT_DATABASE_CODE. - """ - # extract correct answer from correct_database_code - self.open(msg) - correct_database = agnostic.string_to_database(correct_database_code) - self.check_db_diffs(run.theory[DB_THEORY], - correct_database, msg) - self.close(msg) - - def check_db_diffs(self, actual, correct, msg): - extra = actual - correct - missing = correct - actual - extra = [e for e in extra if not e[0].startswith("___")] - missing = [m for m in missing if not m[0].startswith("___")] - self.output_diffs(extra, missing, msg, actual=actual) - - def output_diffs(self, extra, missing, msg, actual=None): - if len(extra) > 0: - LOG.debug("Extra tuples") - LOG.debug(", ".join([str(x) for x in extra])) - if len(missing) > 0: - LOG.debug("Missing tuples") - LOG.debug(", ".join([str(x) for x in missing])) - if len(extra) > 0 or len(missing) > 0: - LOG.debug("Resulting database: %s", actual) - self.assertEqual(0, len(extra), msg) - self.assertEqual(0, len(missing), msg) - - def check_equal(self, actual_code, correct_code, msg=None, equal=None): - def minus(iter1, iter2, invert=False): - extra = [] - for i1 in iter1: - found = False - for i2 in iter2: - # for asymmetric equality checks - if invert: - test_result = equal(i2, i1) - else: - test_result = equal(i1, i2) - if test_result: - found = True - break - if not found: - extra.append(i1) - return extra - if equal is None: - equal = lambda x, y: x == y - LOG.debug("** Checking equality: %s **", msg) - actual = compile.parse(actual_code) - correct = compile.parse(correct_code) - extra = minus(actual, correct) - # in case EQUAL is asymmetric, always supply actual as the first arg - missing = minus(correct, actual, invert=True) - self.output_diffs(extra, missing, msg) - LOG.debug("** Finished equality: %s **", msg) - - def check_same(self, actual_code, correct_code, msg=None): - """Checks if ACTUAL_CODE is a variable-renaming of CORRECT_CODE.""" - return self.check_equal( - actual_code, correct_code, msg=msg, - equal=lambda x, y: unify.same(x, y) is not None) - - def check_instance(self, actual_code, correct_code, msg=None): - """Checks if ACTUAL_CODE is an instance of CORRECT_CODE.""" - return self.check_equal( - actual_code, correct_code, msg=msg, - equal=lambda x, y: unify.instance(x, y) is not None) - - def check_proofs(self, run, correct, msg=None): - """Test proofs. - - Check that the proofs stored in runtime RUN are exactly - those in CORRECT. - """ - # example - # check_proofs(run, {'q': {(1,): - # Database.ProofCollection([{'x': 1, 'y': 2}])}}) - - errs = [] - checked_tables = set() - for table in run.database.table_names(): - if table in correct: - checked_tables.add(table) - for dbtuple in run.database[table]: - if dbtuple.tuple in correct[table]: - if dbtuple.proofs != correct[table][dbtuple.tuple]: - errs.append( - "For table {} tuple {}\n Computed: {}\n " - "Correct: {}".format( - table, str(dbtuple), - str(dbtuple.proofs), - str(correct[table][dbtuple.tuple]))) - for table in set(correct.keys()) - checked_tables: - errs.append("Table {} had a correct answer but did not exist " - "in the database".format(table)) - if len(errs) > 0: - # LOG.debug("Check_proof errors:\n%s", "\n".join(errs)) - self.fail("\n".join(errs)) - - def showdb(self, run): - LOG.debug("Resulting DB: %s", - run.theory[run.CLASSIFY_THEORY].database | - run.theory[run.DATABASE] | - run.theory[run.ENFORCEMENT_THEORY].database) - - def insert(self, run, alist, target=None): - if target is None: - target = MAT_THEORY - run.insert(tuple(alist)) - - def delete(self, run, alist): - run.delete(tuple(alist)) - - def test_remediation(self): - """Test remediation computation.""" - def check(action_code, classify_code, query, correct, msg): - run = self.prep_runtime() - actth = run.ACTION_THEORY - clsth = run.CLASSIFY_THEORY - run.insert(action_code, target=actth) - run.insert(class_code, target=clsth) - self.showdb(run) - self.check_equal(run.remediate(query), correct, msg) - - # simple - action_code = ('action("a")' - 'p-(x) :- a(x)') - class_code = ('err(x) :- p(x)' - 'p(1)') - check(action_code, class_code, 'err(1)', 'p-(1) :- a(1)', 'Monadic') - - # rules in action theory - action_code = ('action("a")' - 'p-(x) :- q(x)' - 'q(x) :- a(x)') - class_code = ('err(x) :- p(x)' - 'p(1)') - check(action_code, class_code, 'err(1)', 'p-(1) :- a(1)', - 'Monadic, indirect') - - # multiple conditions in error - action_code = ('action("a")' - 'action("b")' - 'p-(x) :- a(x)' - 'q-(x) :- b(x)') - class_code = ('err(x) :- p(x), q(x)' - 'p(1)' - 'q(1)') - check(action_code, class_code, 'err(1)', - 'p-(1) :- a(1) q-(1) :- b(1)', - 'Monadic, two conditions, two actions') - - def test_access_control(self): - """Test access control: whether a given action is permitted.""" - def create(ac_code, class_code): - run = self.prep_runtime() - - acth = run.ACCESSCONTROL_THEORY - permitted, errors = run.insert(ac_code, target=acth) - self.assertTrue(permitted, - "Error in access control policy: {}".format( - utility.iterstr(errors))) - - clsth = run.CLASSIFY_THEORY - permitted, errors = run.insert(class_code, target=clsth) - self.assertTrue(permitted, "Error in classifier policy: {}".format( - utility.iterstr(errors))) - return run - - def check_true(run, query, support='', msg=None): - result = run.access_control(query, support) - self.assertTrue(result, - "Error in access control test {}".format(msg)) - - def check_false(run, query, support='', msg=None): - result = run.access_control(query, support) - self.assertFalse(result, - "Error in access control test {}".format(msg)) - - # Only checking basic I/O interface for the access_control request. - # Basic inference algorithms are tested elsewhere. - - # Simple - ac_code = ('action(x) :- q(x)') - classify_code = 'q(2)' - run = create(ac_code, classify_code) - check_true(run, "action(2)", msg="Simple true action") - check_false(run, "action(1)", msg="Simple false action") - - # Options - ac_code = ('action(x, y) :- q(x), options:value(y, "name", name), ' - 'r(name)') - classify_code = 'q(2) r("alice")' - run = create(ac_code, classify_code) - check_true(run, 'action(2,18)', 'options:value(18, "name", "alice")', - msg="Single option true") - check_false(run, 'action(2,18)', 'options:value(18, "name", "bob")', - msg="Single option false") - - # Multiple Options - ac_code = ('action(x, y) :- q(x), options:value(y, "name", name), ' - 'r(name), options:value(y, "age", 30)') - classify_code = 'q(2) r("alice")' - run = create(ac_code, classify_code) - check_true(run, 'action(2,18)', 'options:value(18, "name", "alice") ' - 'options:value(18, "age", 30)', msg="Multiple option true") - check_false(run, 'action(2, 18)', 'options:value(18, "name", "bob") ' - 'options:value(18, "age", 30)', - msg="Multiple option false") - - def test_enforcement(self): - """Test enforcement.""" - def prep_runtime(enforce_theory, action_theory, class_theory): - run = agnostic.Runtime() - run.insert(enforce_theory, target=run.ENFORCEMENT_THEORY) - run.insert(action_theory, target=run.ACTION_THEORY) - run.insert(class_theory, target=run.CLASSIFY_THEORY) - return run - enforce = 'act(x) :- p(x)' - action = 'action("act")' - run = prep_runtime(enforce, action, "") - run.insert('p(1)') - self.check_equal(run.logger.content(), 'act(1)', 'Insert') - run.logger.empty() - run.insert('p(1)') - self.check_equal(run.logger.content(), '', 'Insert again') - run.insert('p(2)') - self.check_equal(run.logger.content(), 'act(2)', 'Insert different') - run.logger.empty() - run.delete('p(2)') - self.check_equal(run.logger.content(), '', 'Delete') - - def test_neutron_actions(self): - """Test our encoding of the Neutron actions basics by simulation.""" - def check(query, action_sequence, correct, msg): - actual = run.simulate(query, action_sequence) - LOG.debug("Simulate results: %s", actual) - self.check_instance(actual, correct, msg) - - full_path = os.path.realpath(__file__) - path = os.path.dirname(full_path) - neutron_path = path + "/../../../examples/neutron.action" - run = agnostic.Runtime() - run.debug_mode() - # load_file does not exist any longer. - permitted, errs = run.load_file(neutron_path, target=run.ACTION_THEORY) - if not permitted: - self.assertTrue(permitted, "Error in Neutron file: {}".format( - "\n".join([str(x) for x in errs]))) - return - - # Ports - query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)' - acts = 'neutron:create_port("net1", 17), sys:user("tim") :- true' - correct = ('neutron:port(id, "net1", name, mac, "null",' - '"null", z, w, "tim")') - check(query, acts, correct, 'Simple port creation') - - query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)' - # result(uuid): simulation-specific table that holds the results - # of the last action invocation - acts = ('neutron:create_port("net1", 17), sys:user("tim") :- true ' - 'neutron:update_port(uuid, 18), sys:user("tim"), ' - ' options:value(18, "name", "tims port") :- result(uuid) ') - correct = ('neutron:port(id, "net1", "tims port", mac, "null",' - '"null", z, w, "tim")') - check(query, acts, correct, 'Port create, update') - - query = 'neutron:port(x1, x2, x3, x4, x5, x6, x7, x8, x9)' - # result(uuid): simulation-specific table that holds the results - # of the last action invocation - acts = ('neutron:create_port("net1", 17), sys:user("tim") :- true ' - 'neutron:update_port(uuid, 18), sys:user("tim"), ' - ' options:value(18, "name", "tims port") :- result(uuid) ' - 'neutron:delete_port(uuid), sys:user("tim")' - ' :- result(uuid) ') - correct = '' - check(query, acts, correct, 'Port create, update, delete') - - # Networks - query = ('neutron:network(id, name, status, admin_state, shared,' - 'tenenant_id)') - acts = 'neutron:create_network(17), sys:user("tim") :- true' - correct = 'neutron:network(id, "", status, "true", "true", "tim")' - check(query, acts, correct, 'Simple network creation') - - query = ('neutron:network(id, name, status, admin_state, ' - 'shared, tenenant_id)') - acts = ('neutron:create_network(17), sys:user("tim") :- true ' - 'neutron:update_network(uuid, 18), sys:user("tim"), ' - ' options:value(18, "admin_state", "false") :- result(uuid)') - correct = 'neutron:network(id, "", status, "false", "true", "tim")' - check(query, acts, correct, 'Network creation, update') - - query = ('neutron:network(id, name, status, admin_state, shared, ' - 'tenenant_id)') - acts = ('neutron:create_network(17), sys:user("tim") :- true ' - 'neutron:update_network(uuid, 18), sys:user("tim"), ' - ' options:value(18, "admin_state", "false") :- result(uuid)' - 'neutron:delete_network(uuid) :- result(uuid)') - correct = '' - check(query, acts, correct, 'Network creation, update') - - # Subnets - query = ('neutron:subnet(id, name, network_id, ' - 'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)') - acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), ' - 'sys:user("tim") :- true') - correct = ('neutron:subnet(id, "", "net1", gateway_ip, 4, ' - '"10.0.0.1/24", "true", "tim")') - check(query, acts, correct, 'Simple subnet creation') - - query = ('neutron:subnet(id, name, network_id, ' - 'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)') - acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), ' - 'sys:user("tim") :- true ' - 'neutron:update_subnet(uuid, 17), sys:user("tim"), ' - ' options:value(17, "enable_dhcp", "false") :- result(uuid)') - correct = ('neutron:subnet(id, "", "net1", gateway_ip, 4, ' - '"10.0.0.1/24", "false", "tim")') - check(query, acts, correct, 'Subnet creation, update') - - query = ('neutron:subnet(id, name, network_id, ' - 'gateway_ip, ip_version, cidr, enable_dhcp, tenant_id)') - acts = ('neutron:create_subnet("net1", "10.0.0.1/24", 17), ' - 'sys:user("tim") :- true ' - 'neutron:update_subnet(uuid, 17), sys:user("tim"), ' - ' options:value(17, "enable_dhcp", "false") :- result(uuid)' - 'neutron:delete_subnet(uuid) :- result(uuid)') - correct = '' - check(query, acts, correct, 'Subnet creation, update, delete') - - -def str2form(formula_string): - return compile.parse1(formula_string) - - -def str2pol(policy_string): - return compile.parse(policy_string) - - -def pol2str(policy): - return " ".join(str(x) for x in policy) - - -def form2str(formula): - return str(formula) diff --git a/congress/tests/policy_engines/disabled_test_vmplacement.py b/congress/tests/policy_engines/disabled_test_vmplacement.py deleted file mode 100644 index 52543d1c..00000000 --- a/congress/tests/policy_engines/disabled_test_vmplacement.py +++ /dev/null @@ -1,779 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import eventlet -from oslo_log import log as logging - -from congress.datalog import arithmetic_solvers -from congress.policy_engines import vm_placement -from congress.tests import base -from congress.tests import helper - -LOG = logging.getLogger(__name__) - -NREC_THEORY = 'non-recursive theory' - - -class TestEngine(base.TestCase): - - def test_parse(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - f = engine.parse1('nova:q(1)') - self.assertTrue(f.table.table, 'nova:q') - self.assertIsNone(f.table.service) - - f = engine.parse1('p(x) :- q(x)') - self.assertEqual(f.head.table.table, 'p') - self.assertEqual(f.body[0].table.table, 'q') - - def test_select(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.insert('p(x) :- q(x)') - engine.insert('q(1)') - ans = engine.select('p(x)') - self.assertTrue(helper.datalog_equal(ans, 'p(1)')) - - def test_theory_in_head(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.policy.insert(engine.parse1('p(x) :- nova:q(x)')) - engine.policy.insert(engine.parse1('nova:q(1)')) - ans = engine.policy.select(engine.parse1('p(x)')) - ans = " ".join(str(x) for x in ans) - self.assertTrue(helper.datalog_equal(ans, 'p(1)')) - - -class TestSetPolicy(base.TestCase): - """Tests for setting policy.""" - - def setUp(self): - # create DSE and add vm-placement engine and fake datasource - super(TestSetPolicy, self).setUp() - self.cage = helper.make_dsenode_new_partition("perf") - kwds = {} - kwds['name'] = 'fake' - kwds['args'] = helper.datasource_openstack_args() - self.fake = self.cage.create_service( - "congress.tests.fake_datasource.FakeDataSource", kwds) - self.fake.poll_time = 0 - self.cage.register_service(self.fake) - - kwds = {} - kwds['name'] = 'vmplace' - kwds['args'] = helper.datasource_openstack_args() - self.vmplace = self.cage.create_service( - "congress.policy_engines.vm_placement.ComputePlacementEngine", - kwds) - self.vmplace.debug_mode() - self.vmplace.poll_time = 0 - self.cage.register_service(self.vmplace) - - def test_set_policy_subscriptions(self): - self.vmplace.set_policy('p(x) :- fake:q(x)') - helper.retry_check_subscriptions( - self.vmplace, [(self.fake.name, 'q')]) - helper.retry_check_subscribers( - self.fake, [(self.vmplace.name, 'q')]) - - def test_set_policy(self): - LOG.info("set_policy") - self.vmplace.set_policy('p(x) :- fake:q(x)') - self.fake.state = {'q': set([tuple([1]), tuple([2])])} - self.fake.poll() - ans = ('p(1) p(2)') - helper.retry_check_db_equal(self.vmplace, 'p(x)', ans) - - # TODO(thinrichs): add tests for data update - # Annoying since poll() saves self.state, invokes - # update_from_datasource (which updates self.state), - # computes deltas, and publishes. No easy way to inject - # a new value for self.state and get it to send non-empty - # deltas over the message bus. Probably want to extend - # fake_datasource to include a client (default to None), make - # update_from_datasource use that client to set self.state, - # and then mock out the client. - - # TODO(thinrichs): add tests for setting policy to something that - # requires tables to be unsubscribed from - - # TODO(thinrichs): test production_mode() - - -class TestLpLang(base.TestCase): - """Test the DatalogLp language.""" - def test_variables(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1, 3.1) - var2 = arithmetic_solvers.LpLang.makeVariable('alice', 1, 3.1) - var3 = arithmetic_solvers.LpLang.makeVariable('alice', 1, 4.0) - self.assertEqual(var1, var2) - self.assertNotEqual(var1, var3) - - def test_or(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - var3 = arithmetic_solvers.LpLang.makeVariable('charlie', 1) - p1 = arithmetic_solvers.LpLang.makeOr(var1, var2) - p2 = arithmetic_solvers.LpLang.makeOr(var1, var2) - p3 = arithmetic_solvers.LpLang.makeOr(var1, var3) - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - p4 = arithmetic_solvers.LpLang.makeOr(var1) - self.assertEqual(p4, var1) - p5 = arithmetic_solvers.LpLang.makeOr(var2, var1) - self.assertEqual(p1, p5) - - def test_and(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - var3 = arithmetic_solvers.LpLang.makeVariable('charlie', 1) - p1 = arithmetic_solvers.LpLang.makeAnd(var1, var2) - p2 = arithmetic_solvers.LpLang.makeAnd(var1, var2) - p3 = arithmetic_solvers.LpLang.makeAnd(var1, var3) - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - p4 = arithmetic_solvers.LpLang.makeAnd(var1) - self.assertEqual(p4, var1) - p5 = arithmetic_solvers.LpLang.makeAnd(var2, var1) - self.assertEqual(p1, p5) - - def test_equal(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - var3 = arithmetic_solvers.LpLang.makeVariable('charlie', 1) - p1 = arithmetic_solvers.LpLang.makeEqual(var1, var2) - p2 = arithmetic_solvers.LpLang.makeEqual(var1, var2) - p3 = arithmetic_solvers.LpLang.makeEqual(var1, var3) - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - p4 = arithmetic_solvers.LpLang.makeEqual(var2, var1) - self.assertEqual(p1, p4) - - def test_notequal(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - var3 = arithmetic_solvers.LpLang.makeVariable('charlie', 1) - p1 = arithmetic_solvers.LpLang.makeNotEqual(var1, var2) - p2 = arithmetic_solvers.LpLang.makeNotEqual(var1, var2) - p3 = arithmetic_solvers.LpLang.makeNotEqual(var1, var3) - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - p4 = arithmetic_solvers.LpLang.makeNotEqual(var2, var1) - self.assertEqual(p1, p4) - - def test_arith(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - p1 = arithmetic_solvers.LpLang.makeArith('lt', var1, var2) - p2 = arithmetic_solvers.LpLang.makeArith('lt', var1, var2) - p3 = arithmetic_solvers.LpLang.makeArith('gt', var1, var2) - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - - def test_complex(self): - var1 = arithmetic_solvers.LpLang.makeVariable('alice', 1) - var2 = arithmetic_solvers.LpLang.makeVariable('bob', 1) - arith1 = arithmetic_solvers.LpLang.makeArith('lt', var1, var2) - arith2 = arithmetic_solvers.LpLang.makeArith('lt', var1, var2) - arith3 = arithmetic_solvers.LpLang.makeArith('gt', var1, var2) - p1 = arithmetic_solvers.LpLang.makeEqual( - var1, arithmetic_solvers.LpLang.makeOr(arith1, arith2)) - p2 = arithmetic_solvers.LpLang.makeEqual( - var1, arithmetic_solvers.LpLang.makeOr(arith2, arith1)) - p3 = arithmetic_solvers.LpLang.makeEqual( - var1, arithmetic_solvers.LpLang.makeOr(arith1, arith3)) - - # equality - self.assertEqual(p1, p2) - self.assertNotEqual(p1, p3) - - # sets - s1 = set([arith1, p1]) - s2 = set([p1, arith1]) - s3 = set([arith1, arith2]) - self.assertEqual(s1, s2) - self.assertNotEqual(s1, s3) - - -class TestDatalogToLp(base.TestCase): - def check(self, code, data, query, ans, possibility=None): - if possibility is None: - possibility = [] - engine = vm_placement.ComputePlacementEngine( - arithmetic_solvers.LpLang()) - engine.debug_mode() - engine.insert(code) - for d in data: - engine.insert(d) - query = engine.parse1(query) - (rules, variables) = engine.datalog_to_lp(query, possibility) - LOG.info("Checking equality") - if not same_sets(rules, ans): - LOG.info("-- actual --") - for rule in rules: - LOG.info("%s", rule) - LOG.info("-- correct --") - for rule in ans: - LOG.info("%s", rule) - self.fail("actual and correct mismatch") - - def test_basic(self): - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lt(avg, three_quarters_mem)') - data = ('nova:host(123, "dmz", 10) ', - 'legacy:special_zone("dmz") ', - 'ceilometer:mem_consumption(123, 15)') # ignored - query = 'warning(x)' - ans = arithmetic_solvers.LpLang.makeExpr(['eq', - ['var', 'warning', 123], - ['lt', - ['var', 'hMemUse', 123], - 7.5]]) - self.check(code, data, query, [ans]) - - def test_multiple_rows(self): - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lt(avg, three_quarters_mem)') - data = ('nova:host(123, "dmz", 10) ', - 'nova:host(456, "dmz", 20) ', - 'legacy:special_zone("dmz") ', - 'ceilometer:mem_consumption(123, 15)') # ignored - query = 'warning(x)' - ans1 = arithmetic_solvers.LpLang.makeExpr(['eq', - ['var', 'warning', 123], - ['lt', - ['var', 'hMemUse', 123], - 7.5]]) - ans2 = arithmetic_solvers.LpLang.makeExpr(['eq', - ['var', 'warning', 456], - ['lt', - ['var', 'hMemUse', 456], - 15.0]]) - self.check(code, data, query, [ans1, ans2]) - - # def test_disjunction(self): - # code = ('warning(id) :- ' - # ' nova:host(id, zone, memory_capacity), ' - # ' legacy:special_zone(zone), ' - # ' ceilometer:mem_consumption(id, avg), ' - # ' mul(0.75, memory_capacity, three_quarters_mem),' - # ' lt(avg, three_quarters_mem)') - # data = ('nova:host(123, "dmz", 10) ', - # 'nova:host(456, "dmz", 20) ', - # 'nova:host(456, "dmz", 30) ', # doesn't really make sense - # 'legacy:special_zone("dmz") ', - # 'ceilometer:mem_consumption(123, 15)') # ignored - # query = 'warning(x)' - # ans1 = LpLang.makeExpr(['eq', - # ['var', 'warning', 123], - # ['lt', ['var', 'hMemUse', 123], 7.5]]) - # ans2 = LpLang.makeExpr(['eq', - # ['var', 'warning', 456], - # ['or', - # ['lt', ['var', 'hMemUse', 123], 7.5], - # ['lt', ['var', 'hMemUse', 456], 15.0]]]) - # self.check(code, data, query, [ans1, ans2]) - - def test_multiple_constraints(self): - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lt(avg, three_quarters_mem),' - ' lt(avg, 100)') - data = ('nova:host(123, "dmz", 10) ', - 'legacy:special_zone("dmz") ', - 'ceilometer:mem_consumption(123, 15)') # ignored - query = 'warning(x)' - ans1 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - ['var', 'warning', 123], - ['and', ['lt', ['var', 'hMemUse', 123], 7.5], - ['lt', ['var', 'hMemUse', 123], 100]]]) - self.check(code, data, query, [ans1]) - - -class TestDatalogPolicyToLp(base.TestCase): - def check(self, actual, correct): - extra = diff(actual, correct) - missing = diff(correct, actual) - if len(extra) or len(missing): - LOG.info("-- missing --") - for rule in missing: - LOG.info("%s", rule) - LOG.info("-- extra --") - for rule in extra: - LOG.info("%s", rule) - self.fail("actual and correct mismatch") - - def test_domain_axioms(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.insert('nova:host(123, 1, 10)') - engine.insert('nova:host(456, 1, 10)') - engine.insert('nova:server(789, "alice", 123)') - engine.insert('nova:server(101, "bob", 123)') - engine.insert('ceilometer:mem_consumption(789, 10)') - engine.insert('ceilometer:mem_consumption(101, 20)') - self.assertEqual(set(engine.get_hosts()), set([123, 456])) - self.assertEqual(set(engine.get_guests()), set([789, 101])) - ans1 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - ['var', 'hMemUse', 456], - ['plus', - ['times', ['var', 'assign', 101, 456], 20], - ['times', ['var', 'assign', 789, 456], 10]]]) - ans2 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - ['var', 'hMemUse', 123], - ['plus', - ['times', ['var', 'assign', 101, 123], 20], - ['times', ['var', 'assign', 789, 123], 10]]]) - ans3 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - 1, - ['plus', - ['var', 'assign', 101, 123], - ['var', 'assign', 101, 456]]]) - ans4 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - 1, - ['plus', - ['var', 'assign', 789, 123], - ['var', 'assign', 789, 456]]]) - self.check(engine.domain_axioms(), [ans1, ans2, ans3, ans4]) - - def test_policy_to_lp(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.insert('nova:host(123, 1, 10)') - engine.insert('nova:host(456, 1, 10)') - engine.insert('nova:server(789, "alice", 123)') - engine.insert('nova:server(101, "bob", 123)') - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lt(avg, three_quarters_mem)') - engine.insert(code) - engine.insert('legacy:special_zone(1)') - engine.insert('ceilometer:mem_consumption(123, 15)') - engine.insert('ceilometer:mem_consumption(456, 20)') - engine.insert('ceilometer:mem_consumption(789, 5)') - engine.insert('ceilometer:mem_consumption(101, 10)') - opt, constraints = engine.policy_to_lp() - - optans = arithmetic_solvers.LpLang.makeExpr(['or', - ['var', 'warning', 456], - ['var', 'warning', 123]]) - ans1 = arithmetic_solvers.LpLang.makeExpr(['eq', - ['var', 'warning', 123], - ['lt', - ['var', 'hMemUse', 123], - 7.5]]) - ans2 = arithmetic_solvers.LpLang.makeExpr(['eq', - ['var', 'warning', 456], - ['lt', - ['var', 'hMemUse', 456], - 7.5]]) - ans3 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - ['var', 'hMemUse', 456], - ['plus', - ['times', ['var', 'assign', 101, 456], 10], - ['times', ['var', 'assign', 789, 456], 5]]]) - ans4 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - ['var', 'hMemUse', 123], - ['plus', - ['times', ['var', 'assign', 101, 123], 10], - ['times', ['var', 'assign', 789, 123], 5]]]) - ans5 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - 1, - ['plus', - ['var', 'assign', 101, 123], - ['var', 'assign', 101, 456]]]) - ans6 = arithmetic_solvers.LpLang.makeExpr( - ['eq', - 1, - ['plus', - ['var', 'assign', 789, 123], - ['var', 'assign', 789, 456]]]) - - self.check([opt], [optans]) - self.check(constraints, [ans1, ans2, ans3, ans4, ans5, ans6]) - - -class TestPureLp(base.TestCase): - """Test conversion of Datalog LP to pure LP.""" - def check(self, expr, bounds, correct): - lang = arithmetic_solvers.LpLang() - actual = lang.pure_lp(lang.makeExpr(expr), bounds) - correct = [lang.makeExpr(x) for x in correct] - if not same_sets(actual, correct): - LOG.info("-- actual --") - for rule in actual: - LOG.info("%s", rule) - LOG.info("-- correct --") - for rule in correct: - LOG.info("%s", rule) - self.fail("actual and correct mismatch") - - def test_real_1(self): - """Test a real use case.""" - exp = ['eq', ['var', 'warning', 123], - ['lteq', ['var', 'hMemUse', 123], 7.5]] - bounds = {('VAR', 'hMemUse', 123): 100} - y = ['var', 'warning', 123] - x = ['minus', - ['minus', ['var', 'hMemUse', 123], 7.5], - arithmetic_solvers.LpLang.MIN_THRESHOLD] - upperx = 101 - c1 = ['lteq', ['times', -1, x], ['times', y, upperx]] - c2 = ['lt', x, ['times', ['minus', 1, y], upperx]] - self.check(exp, bounds, [c1, c2]) - - def test_real_2(self): - e = ['eq', - ['var', 'hMemUse', 456], - ['plus', - ['times', ['var', 'assign', 101, 456], ['var', 'gMemUse', 101]], - ['times', - ['var', 'assign', 789, 456], - ['var', 'gMemUse', 789]]]] - ans1 = ['eq', - ['var', 'internal', 0], - ['times', - ['var', 'assign', 101, 456], - ['var', 'gMemUse', 101]]] - ans2 = ['eq', - ['var', 'internal', 1], - ['times', - ['var', 'assign', 789, 456], - ['var', 'gMemUse', 789]]] - ans3 = ['eq', - ['var', 'hMemUse', 456], - ['plus', ['var', 'internal', 0], ['var', 'internal', 1]]] - self.check(e, {}, [ans1, ans2, ans3]) - - -class TestFlatten(base.TestCase): - """Test reformulation of embedded operators into flattened formulas.""" - def check(self, input_expression, correct, correct_support): - lang = arithmetic_solvers.LpLang() - input_expr = lang.makeExpr(input_expression) - LOG.info("input_expression: %s", input_expr) - actual, actual_support = lang.flatten(input_expr, indicator=False) - correct = lang.makeExpr(correct) - correct_support = [lang.makeExpr(x) for x in correct_support] - if (actual != correct or not same_sets( - actual_support, correct_support)): - LOG.info("-- actual: %s", actual) - LOG.info("-- actual support --") - for rule in actual_support: - LOG.info("%s", rule) - LOG.info("-- correct: %s", correct) - LOG.info("-- correct support --") - for rule in correct_support: - LOG.info("%s", rule) - self.fail("actual and correct mismatch") - - def test_flat(self): - self.check(['or', 1, 2], ['or', 1, 2], []) - self.check(['and', 1, 2], ['and', 1, 2], []) - - def test_nested(self): - self.check(['or', 1, ['or', 2, 3]], # orig - ['or', 1, ['var', 'internal', 0]], # flat - [['eq', ['var', 'internal', 0], # support - ['or', 2, 3]]]) - self.check(['or', 1, ['and', 2, 3]], # orig - ['or', 1, ['var', 'internal', 0]], # flat - [['eq', ['var', 'internal', 0], # support - ['and', 2, 3]]]) - self.check(['eq', # orig - ['var', 1, 2], - ['or', - ['and', 3, 4], - ['and', 5, 6]]], - ['eq', # flat - ['var', 1, 2], - ['or', ['var', 'internal', 0], ['var', 'internal', 1]]], - [['eq', ['var', 'internal', 0], ['and', 3, 4]], # support - ['eq', ['var', 'internal', 1], ['and', 5, 6]]]) - - def test_real(self): - self.check(['eq', ['var', 'warning', 123], - ['lt', ['var', 'hMemUse', 123], 7.5]], - ['eq', ['var', 'warning', 123], - ['lt', ['var', 'hMemUse', 123], 7.5]], - []) - self.check( - ['eq', # orig - ['var', 'hMemUse', 456], - ['or', - ['and', ['var', 'assign', 101, 456], ['var', 'gMemUse', 101]], - ['and', ['var', 'assign', 789, 456], ['var', 'gMemUse', 789]]]], - ['eq', # flat - ['var', 'hMemUse', 456], - ['or', ['var', 'internal', 0], ['var', 'internal', 1]]], - [['eq', # support - ['var', 'internal', 0], - ['and', ['var', 'assign', 101, 456], ['var', 'gMemUse', 101]]], - ['eq', - ['var', 'internal', 1], - ['and', ['var', 'assign', 789, 456], ['var', 'gMemUse', 789]]]]) - - -class TestIndicatorElim(base.TestCase): - """Test binary indicator variable elimination.""" - def check(self, input_expression, bounds, correct_expressions): - lang = arithmetic_solvers.LpLang() - input_expr = lang.makeExpr(input_expression) - LOG.info("input_expression: %s", input_expr) - actual = lang.indicator_to_pure_lp(input_expr, bounds) - correct = [lang.makeExpr(x) for x in correct_expressions] - extra = diff(actual, correct) - missing = diff(correct, actual) - if len(extra) or len(missing): - LOG.info("-- missing --") - for rule in missing: - LOG.info("%s", rule) - LOG.info("-- extra --") - for rule in extra: - LOG.info("%s", rule) - self.fail("actual and correct mismatch") - - def test_simple(self): - exp = ['eq', ['var', 'warning', 123], - ['lteq', ['var', 'hMemUse', 123], 7.5]] - bounds = {('VAR', 'hMemUse', 123): 100} - y = ['var', 'warning', 123] - x = ['minus', ['minus', ['var', 'hMemUse', 123], 7.5], - arithmetic_solvers.LpLang.MIN_THRESHOLD] - upperx = 101 - c1 = ['lteq', ['times', -1, x], ['times', y, upperx]] - c2 = ['lt', x, ['times', ['minus', 1, y], upperx]] - self.check(exp, bounds, [c1, c2]) - - -class TestToLtZero(base.TestCase): - """Test conversion of inequality to form A < 0.""" - small = arithmetic_solvers.LpLang.MIN_THRESHOLD - - def check(self, expr, correct): - lang = arithmetic_solvers.LpLang() - actual = lang.arith_to_lt_zero(lang.makeExpr(expr)) - self.assertEqual(actual, lang.makeExpr(correct)) - - def test_lt(self): - expr = ['lt', 7, 8] - self.check(expr, ['lt', ['minus', 7, 8], 0]) - - def test_lteq(self): - expr = ['lteq', 10, 11] - self.check(expr, ['lt', ['minus', ['minus', 10, 11], self.small], 0]) - - def test_gt(self): - expr = ['gt', 12, 13] - self.check(expr, ['lt', ['minus', 13, 12], 0]) - - def test_gteq(self): - expr = ['gteq', 14, 15] - self.check(expr, ['lt', ['minus', ['minus', 15, 14], self.small], 0]) - - -class TestUpperBound(base.TestCase): - """Test upper bound computation.""" - def check(self, expr, bounds, correct): - lang = arithmetic_solvers.LpLang() - actual = lang.upper_bound(lang.makeExpr(expr), bounds) - self.assertEqual(actual, correct) - - def test_times(self): - exp = ['times', ['VAR', 'a', 1], ['VAR', 'a', 2]] - bounds = {('VAR', 'a', 1): 2, ('VAR', 'a', 2): 3} - self.check(exp, bounds, 6) - - def test_plus(self): - exp = ['plus', ['VAR', 'a', 1], ['VAR', 'a', 2]] - bounds = {('VAR', 'a', 1): 2, ('VAR', 'a', 2): 3} - self.check(exp, bounds, 5) - - def test_minus(self): - exp = ['minus', ['VAR', 'a', 1], ['VAR', 'a', 2]] - bounds = {('VAR', 'a', 1): 2, ('VAR', 'a', 2): 3} - self.check(exp, bounds, 2) - - def test_nested(self): - exp = ['plus', - ['times', ['VAR', 'a', 1], ['VAR', 'a', 2]], - ['minus', ['VAR', 'a', 3], ['VAR', 'a', 4]]] - bounds = {('VAR', 'a', 1): 5, - ('VAR', 'a', 2): 7, - ('VAR', 'a', 3): 2, - ('VAR', 'a', 4): 1} - self.check(exp, bounds, 37) - - def test_nested2(self): - exp = ['times', - ['times', ['VAR', 'a', 1], ['VAR', 'a', 2]], - ['minus', ['VAR', 'a', 3], ['VAR', 'a', 4]]] - bounds = {('VAR', 'a', 1): 5, - ('VAR', 'a', 2): 7, - ('VAR', 'a', 3): 2, - ('VAR', 'a', 4): 1} - self.check(exp, bounds, 70) - - -class TestComputeVmAssignment(base.TestCase): - """Test full computation of VM assignment.""" - def test_two_servers(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.insert('nova:host(123, 1, 10)') - engine.insert('nova:host(456, 1, 5)') - engine.insert('nova:server(789, "alice", 123)') - engine.insert('nova:server(101, "bob", 123)') - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lteq(avg, three_quarters_mem)') - engine.insert(code) - engine.insert('legacy:special_zone(1)') - engine.insert('ceilometer:mem_consumption(789, 2)') - engine.insert('ceilometer:mem_consumption(101, 2)') - ans = engine.calculate_vm_assignment() - LOG.info("assignment: %s", ans) - self.assertEqual(ans, {101: 456, 789: 456}) - - def test_three_servers(self): - engine = vm_placement.ComputePlacementEngine() - engine.debug_mode() - engine.insert('nova:host(100, 1, 6)') - engine.insert('nova:host(101, 1, 10)') - engine.insert('nova:server(200, "alice", 100)') - engine.insert('nova:server(201, "bob", 100)') - engine.insert('nova:server(202, "bob", 101)') - code = ('warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lteq(avg, three_quarters_mem)') - engine.insert(code) - engine.insert('legacy:special_zone(1)') - engine.insert('ceilometer:mem_consumption(200, 2)') - engine.insert('ceilometer:mem_consumption(201, 2)') - engine.insert('ceilometer:mem_consumption(202, 2)') - ans = engine.calculate_vm_assignment() - LOG.info("assignment: %s", ans) - self.assertEqual({200: 100, 201: 100, 202: 100}, ans) - - def test_set_policy(self): - engine = vm_placement.ComputePlacementEngine(inbox=eventlet.Queue(), - datapath=eventlet.Queue()) - engine.debug_mode() - p = ( - 'nova:host(100, 1, 6)' - 'nova:host(101, 1, 10)' - 'nova:server(200, "alice", 100)' - 'nova:server(201, "bob", 100)' - 'nova:server(202, "bob", 101)' - 'warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lteq(avg, three_quarters_mem)' - 'legacy:special_zone(1)' - 'ceilometer:mem_consumption(100, 4)' - 'ceilometer:mem_consumption(101, 2)' - 'ceilometer:mem_consumption(200, 2)' - 'ceilometer:mem_consumption(201, 2)' - 'ceilometer:mem_consumption(202, 2)') - engine.set_policy(p) - LOG.info("assignment: %s", engine.guest_host_assignment) - self.assertEqual({200: 100, 201: 100, 202: 100}, - engine.guest_host_assignment) - - def test_set_policy_with_dashes(self): - engine = vm_placement.ComputePlacementEngine(inbox=eventlet.Queue(), - datapath=eventlet.Queue()) - engine.debug_mode() - p = ( - 'nova:host("Server-100", 1, 6)' - 'nova:host("Server-101", 1, 10)' - 'nova:server(200, "alice", "Server-100")' - 'nova:server(201, "bob", "Server-100")' - 'nova:server(202, "bob", "Server-101")' - 'warning(id) :- ' - ' nova:host(id, zone, memory_capacity), ' - ' legacy:special_zone(zone), ' - ' ceilometer:mem_consumption(id, avg), ' - ' mul(0.75, memory_capacity, three_quarters_mem),' - ' lteq(avg, three_quarters_mem)' - 'legacy:special_zone(1)' - 'ceilometer:mem_consumption("Server-100", 4)' - 'ceilometer:mem_consumption("Server-101", 2)' - 'ceilometer:mem_consumption(200, 2)' - 'ceilometer:mem_consumption(201, 2)' - 'ceilometer:mem_consumption(202, 2)') - engine.set_policy(p) - LOG.info("assignment: %s", engine.guest_host_assignment) - self.assertEqual({200: 'Server-100', 201: 'Server-100', - 202: 'Server-100'}, - engine.guest_host_assignment) - - -def diff(iter1, iter2): - iter1 = list(iter1) - iter2 = list(iter2) - ans = [] - for x in iter1: - if x not in iter2: - ans.append(x) - return ans - - -def same_sets(iter1, iter2): - """Can't use set(iter1) == set(iter2) b/c hash doesn't respect OR.""" - iter1 = list(iter1) # make sure iter1 and iter2 are not Python sets - iter2 = list(iter2) - for x in iter1: - if x not in iter2: - return False - for x in iter2: - if x not in iter1: - return False - return True diff --git a/congress/tests/policy_engines/test_agnostic.py b/congress/tests/policy_engines/test_agnostic.py deleted file mode 100644 index bfacb8c6..00000000 --- a/congress/tests/policy_engines/test_agnostic.py +++ /dev/null @@ -1,1919 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -from oslo_config import cfg -from oslo_log import log as logging - -from congress.datalog import base as datalog_base -from congress.datalog import compile -from congress.datalog import database -from congress.datalog import materialized -from congress.datalog import nonrecursive -from congress.datalog import utility -from congress.db import db_policy_rules -from congress import exception -from congress.policy_engines import agnostic -from congress.synchronizer import policy_rule_synchronizer as synchronizer -from congress.tests import base -from congress.tests import helper - - -LOG = logging.getLogger(__name__) - -NREC_THEORY = 'non-recursive theory' - - -class TestRuntime(base.TestCase): - """Tests for Runtime that are not specific to any theory.""" - - def check_equal(self, actual_string, correct_string, msg): - self.assertTrue(helper.datalog_equal( - actual_string, correct_string, msg)) - - def test_theory_inclusion(self): - """Test evaluation routines when one theory includes another.""" - # spread out across inclusions - th1 = nonrecursive.NonrecursiveRuleTheory() - th2 = nonrecursive.NonrecursiveRuleTheory() - th3 = nonrecursive.NonrecursiveRuleTheory() - th1.includes.append(th2) - th2.includes.append(th3) - - th1.insert(helper.str2form('p(x) :- q(x), r(x), s(2)')) - th2.insert(helper.str2form('q(1)')) - th1.insert(helper.str2form('r(1)')) - th3.insert(helper.str2form('s(2)')) - - self.check_equal( - helper.pol2str(th1.select(helper.str2form('p(x)'))), - 'p(1)', 'Data spread across inclusions') - - def test_multi_policy_update(self): - """Test updates that apply to multiple policies.""" - def check_equal(actual, correct): - e = helper.datalog_equal(actual, correct) - self.assertTrue(e) - - run = agnostic.Runtime() - run.create_policy('th1') - run.create_policy('th2') - - events1 = [compile.Event(formula=x, insert=True, target='th1') - for x in helper.str2pol("p(1) p(2) q(1) q(3)")] - events2 = [compile.Event(formula=x, insert=True, target='th2') - for x in helper.str2pol("r(1) r(2) t(1) t(4)")] - run.update(events1 + events2) - - check_equal(run.select('p(x)', 'th1'), 'p(1) p(2)') - check_equal(run.select('q(x)', 'th1'), 'q(1) q(3)') - check_equal(run.select('r(x)', 'th2'), 'r(1) r(2)') - check_equal(run.select('t(x)', 'th2'), 't(1) t(4)') - - def test_initialize_tables(self): - """Test initialize_tables() functionality of agnostic.""" - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(1) p(2)') - facts = [compile.Fact('p', (3,)), compile.Fact('p', (4,))] - run.initialize_tables(['p'], facts) - e = helper.datalog_equal(run.select('p(x)'), 'p(3) p(4)') - self.assertTrue(e) - - def test_single_policy(self): - """Test ability to create/delete single policies.""" - # single policy - run = agnostic.Runtime() - original = run.policy_names() - run.create_policy('test1') - run.insert('p(x) :- q(x)', 'test1') - run.insert('q(1)', 'test1') - self.assertEqual( - run.select('p(x)', 'test1'), 'p(1)', 'Policy creation') - self.assertEqual( - run.select('p(x)', 'test1'), 'p(1)', 'Policy creation') - run.delete_policy('test1') - self.assertEqual( - set(run.policy_names()), set(original), 'Policy deletion') - - def test_multi_policy(self): - """Test ability to create/delete multiple policies.""" - # multiple policies - run = agnostic.Runtime() - original = run.policy_names() - run.create_policy('test2') - run.create_policy('test3') - self.assertEqual( - set(run.policy_names()), - set(original + ['test2', 'test3']), - 'Multi policy creation') - run.delete_policy('test2') - run.create_policy('test4') - self.assertEqual( - set(run.policy_names()), - set(original + ['test3', 'test4']), - 'Multiple policy deletion') - run.insert('p(x) :- q(x) q(1)', 'test4') - self.assertEqual( - run.select('p(x)', 'test4'), - 'p(1)', - 'Multipolicy deletion select') - - def test_cross_policy_rule(self): - """Test rule that refer to table from another policy.""" - run = agnostic.Runtime() - run.create_policy('test1') - run.create_policy('test2') - run.create_policy('test3') - run.insert( - 'p(x) :- test1:q(x),test2:q(x),test3:q(x),q(x) q(1) q(2) q(3)', - 'test3') - run.insert('q(1)', 'test1') - run.insert('q(1) q(2)', 'test2') - - self.assertEqual( - run.select('p(x)', 'test3'), - 'p(1)', - 'Cross-policy rule select') - - def test_policy_types(self): - """Test types for multiple policies.""" - # policy types - run = agnostic.Runtime() - run.create_policy('test1', kind=datalog_base.NONRECURSIVE_POLICY_TYPE) - self.assertIsInstance(run.policy_object('test1'), - nonrecursive.NonrecursiveRuleTheory, - 'Nonrecursive policy addition') - run.create_policy('test2', kind=datalog_base.ACTION_POLICY_TYPE) - self.assertIsInstance(run.policy_object('test2'), - nonrecursive.ActionTheory, - 'Action policy addition') - run.create_policy('test3', kind=datalog_base.DATABASE_POLICY_TYPE) - self.assertIsInstance(run.policy_object('test3'), - database.Database, - 'Database policy addition') - run.create_policy('test4', kind=datalog_base.MATERIALIZED_POLICY_TYPE) - self.assertIsInstance(run.policy_object('test4'), - materialized.MaterializedViewTheory, - 'Materialized policy addition') - - def test_policy_errors(self): - """Test errors for multiple policies.""" - # errors - run = agnostic.Runtime() - run.create_policy('existent') - self.assertRaises(KeyError, run.create_policy, 'existent') - self.assertRaises(KeyError, run.delete_policy, 'nonexistent') - self.assertRaises(KeyError, run.policy_object, 'nonexistent') - - def test_wrong_arity_index(self): - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('p(x) :- r(x), q(y, x)') - run.insert('r(1)') - run.insert('q(1,1)') - # run query first to build index - self.assertTrue(helper.datalog_equal(run.select('p(x)'), 'p(1)')) - # next insert causes an exceptions since the thing we indexed on - # doesn't exist - permitted, errs = run.insert('q(5)') - self.assertFalse(permitted) - self.assertEqual(len(errs), 1) - self.assertIsInstance(errs[0], exception.PolicyException) - # double-check that the error didn't result in an inconsistent state - self.assertEqual(run.select('q(5)'), '') - - def test_get_tablename(self): - run = agnostic.DseRuntime('dse') - run.synchronizer = mock.MagicMock() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('q(x) :- r(x)') - run.insert('execute[nova:disconnect(x, y)] :- s(x, y)') - tables = run.get_tablename('test', 'p') - self.assertEqual({'p'}, set(tables)) - - tables = run.get_tablename('test', 't') - self.assertIsNone(tables) - - tables = run.get_tablenames('test') - self.assertEqual({'p', 'q', 'r', 's'}, set(tables)) - - tables = run.get_tablename('test', 'nova:disconnect') - self.assertIsNone(tables) - - def test_tablenames(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x,y)') - run.insert('q(x,y) :- r(x,y)') - run.insert('t(x) :- q(x,y), r(x,z), equal(y, z)') - run.insert('execute[nova:disconnect(x, y)] :- s(x, y)') - tables = run.tablenames() - self.assertEqual({'p', 'q', 'r', 's', 't', 'nova:disconnect'}, - set(tables)) - tables = run.tablenames(include_builtin=True) - self.assertEqual({'p', 'q', 'r', 's', 't', 'nova:disconnect', 'equal'}, - set(tables)) - tables = run.tablenames(body_only=True) - self.assertEqual({'q', 'r', 's'}, set(tables)) - tables = run.tablenames(include_modal=False) - self.assertEqual({'p', 'q', 'r', 's', 't'}, set(tables)) - - @mock.patch.object(db_policy_rules, 'add_policy') - def test_persistent_create_policy(self, mock_add): - run = agnostic.Runtime() - policy_name = 'invalid-table-name' - self.assertRaises(exception.PolicyException, - run.persistent_create_policy, - policy_name) - self.assertNotIn(policy_name, run.policy_names()) - - @mock.patch.object(db_policy_rules, 'add_policy', side_effect=Exception()) - def test_persistent_create_policy_with_db_exception(self, mock_add): - run = agnostic.Runtime() - with mock.patch.object(run, 'delete_policy') as mock_delete: - run.synchronizer = mock.MagicMock() - policy_name = 'test_policy' - self.assertRaises(exception.PolicyException, - run.persistent_create_policy, - policy_name) - mock_add.assert_called_once_with(mock.ANY, - policy_name, - policy_name[:5], - mock.ANY, - 'user', - 'nonrecursive', - session=mock.ANY) - # mock_delete.assert_called_once_with(policy_name) - self.assertFalse(mock_delete.called) - self.assertFalse(run.synchronizer.sync_one_policy.called) - self.assertNotIn('test_policy', run.policy_names()) - - mock_db_policy_obj = lambda: None - setattr(mock_db_policy_obj, 'name', 'test_policy') - - @mock.patch.object(db_policy_rules, 'add_policy_rule') - @mock.patch.object(db_policy_rules, 'policy_name', - side_effect=lambda x, session: x) - @mock.patch.object( - db_policy_rules, 'get_policies', return_value=[mock_db_policy_obj]) - def test_persistent_insert_rules( - self, mock_add, mock_policy_name, mock_get_policies): - run = agnostic.Runtime() - run.synchronizer = mock.MagicMock() - run.create_policy('test_policy') - - # test empty insert - result, _ = run.persistent_insert_rules('test_policy', []) - self.assertEqual(len(result), 0) - self.assertTrue(helper.datalog_equal( - run.select('p(x)'), '')) - - # test duplicated insert, 3 rules, 2 unique - result, _ = run.persistent_insert_rules( - 'test_policy', - [{'rule': 'p(1)', 'name': '', 'comment': ''}, - {'rule': 'p(2)', 'name': '', 'comment': ''}, - {'rule': 'p(1)', 'name': '', 'comment': ''}]) - self.assertEqual(len(result), 2) - self.assertTrue(helper.datalog_equal( - run.select('p(x)'), 'p(1) p(2)')) - - def test_tablenames_theory_name(self): - run = agnostic.Runtime() - run.create_policy('test') - run.create_policy('test2') - run.insert('p(x) :- q(x)', 'test') - run.insert('r(x) :- s(x)', 'test2') - - tables = run.tablenames() - self.assertEqual(set(tables), set(['p', 'q', 'r', 's'])) - - tables = run.tablenames(theory_name='test') - self.assertEqual(set(tables), set(['p', 'q'])) - - -class TestArity(base.TestCase): - def test_same_table_diff_policies(self): - run = agnostic.Runtime() - run.create_policy('alice') - run.create_policy('bob') - run.insert('p(x) :- q(x, y)', 'alice') - run.insert('p(x, y) :- r(x, y, z)', 'bob') - self.assertEqual(1, run.arity('p', 'alice')) - self.assertEqual(2, run.arity('p', 'bob')) - - def test_complex_table(self): - run = agnostic.Runtime() - run.create_policy('alice') - run.create_policy('bob') - run.insert('p(x) :- q(x, y)', 'alice') - run.insert('p(x, y) :- r(x, y, z)', 'bob') - self.assertEqual(1, run.arity('alice:p', 'bob')) - self.assertEqual(1, run.arity('alice:p', 'alice')) - - def test_modals(self): - run = agnostic.Runtime() - run.create_policy('alice') - run.insert('execute[nova:p(x)] :- q(x, y)', 'alice') - self.assertEqual(1, run.arity('nova:p', 'alice', 'execute')) - - -class TestTriggerRegistry(base.TestCase): - def setUp(self): - super(TestTriggerRegistry, self).setUp() - self.f = lambda tbl, old, new: old - - def test_trigger(self): - trigger1 = agnostic.Trigger('table', 'policy', self.f) - trigger2 = agnostic.Trigger('table', 'policy', self.f) - trigger3 = agnostic.Trigger('table2', 'policy', self.f) - trigger4 = agnostic.Trigger('table', 'policy', lambda x: x) - - s = set() - s.add(trigger1) - s.add(trigger2) - s.add(trigger3) - s.add(trigger4) - self.assertEqual(4, len(s)) - s.discard(trigger1) - self.assertEqual(3, len(s)) - s.discard(trigger2) - self.assertEqual(2, len(s)) - s.discard(trigger3) - self.assertEqual(1, len(s)) - s.discard(trigger4) - self.assertEqual(0, len(s)) - - def test_register(self): - g = compile.RuleDependencyGraph() - reg = agnostic.TriggerRegistry(g) - - # register - p_trigger = reg.register_table('p', 'alice', self.f) - triggers = reg.relevant_triggers(['alice:p']) - self.assertEqual(triggers, set([p_trigger])) - - # register 2nd table - q_trigger = reg.register_table('q', 'alice', self.f) - p_triggers = reg.relevant_triggers(['alice:p']) - self.assertEqual(p_triggers, set([p_trigger])) - q_triggers = reg.relevant_triggers(['alice:q']) - self.assertEqual(q_triggers, set([q_trigger])) - - # register again with table p - p2_trigger = reg.register_table('p', 'alice', self.f) - p_triggers = reg.relevant_triggers(['alice:p']) - self.assertEqual(p_triggers, set([p_trigger, p2_trigger])) - q_triggers = reg.relevant_triggers(['alice:q']) - self.assertEqual(q_triggers, set([q_trigger])) - - def test_unregister(self): - g = compile.RuleDependencyGraph() - reg = agnostic.TriggerRegistry(g) - p_trigger = reg.register_table('p', 'alice', self.f) - q_trigger = reg.register_table('q', 'alice', self.f) - self.assertEqual(reg.relevant_triggers(['alice:p']), - set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:q']), - set([q_trigger])) - # unregister p - reg.unregister(p_trigger) - self.assertEqual(reg.relevant_triggers(['alice:p']), set()) - self.assertEqual(reg.relevant_triggers(['alice:q']), - set([q_trigger])) - # unregister q - reg.unregister(q_trigger) - self.assertEqual(reg.relevant_triggers(['alice:p']), set()) - self.assertEqual(reg.relevant_triggers(['alice:q']), set()) - # unregister nonexistent trigger - self.assertRaises(KeyError, reg.unregister, p_trigger) - self.assertEqual(reg.relevant_triggers(['alice:p']), set()) - self.assertEqual(reg.relevant_triggers(['alice:q']), set()) - - def test_basic_dependency(self): - g = compile.RuleDependencyGraph() - reg = agnostic.TriggerRegistry(g) - g.formula_insert(compile.parse1('p(x) :- q(x)'), 'alice') - # register p - p_trigger = reg.register_table('p', 'alice', self.f) - self.assertEqual(reg.relevant_triggers(['alice:q']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:p']), set([p_trigger])) - - # register q - q_trigger = reg.register_table('q', 'alice', self.f) - self.assertEqual(reg.relevant_triggers(['alice:q']), - set([p_trigger, q_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:p']), - set([p_trigger])) - - def test_complex_dependency(self): - g = compile.RuleDependencyGraph() - reg = agnostic.TriggerRegistry(g) - g.formula_insert(compile.parse1('p(x) :- q(x)'), 'alice') - g.formula_insert(compile.parse1('q(x) :- r(x), s(x)'), 'alice') - g.formula_insert(compile.parse1('r(x) :- t(x, y), u(y)'), 'alice') - g.formula_insert(compile.parse1('separate(x) :- separate2(x)'), - 'alice') - g.formula_insert(compile.parse1('notrig(x) :- notrig2(x)'), 'alice') - p_trigger = reg.register_table('p', 'alice', self.f) - sep_trigger = reg.register_table('separate', 'alice', self.f) - - # individual tables - self.assertEqual(reg.relevant_triggers(['alice:p']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:q']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:r']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:s']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:t']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:u']), set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:notrig']), set()) - self.assertEqual(reg.relevant_triggers(['alice:notrig2']), set([])) - self.assertEqual(reg.relevant_triggers(['alice:separate']), - set([sep_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:separate2']), - set([sep_trigger])) - - # groups of tables - self.assertEqual(reg.relevant_triggers(['alice:p', 'alice:q']), - set([p_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:separate', 'alice:p']), - set([p_trigger, sep_trigger])) - self.assertEqual(reg.relevant_triggers(['alice:notrig', 'alice:p']), - set([p_trigger])) - - # events: data - event = compile.Event(compile.parse1('q(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([p_trigger])) - - event = compile.Event(compile.parse1('u(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([p_trigger])) - - event = compile.Event(compile.parse1('separate2(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([sep_trigger])) - - event = compile.Event(compile.parse1('notrig2(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([])) - - # events: rules - event = compile.Event(compile.parse1('separate(x) :- q(x)'), - target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([sep_trigger])) - - event = compile.Event(compile.parse1('notrig(x) :- q(x)'), - target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([])) - - event = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice') - self.assertEqual(reg.relevant_triggers([event]), set([p_trigger])) - - # events: multiple rules and data - event1 = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice') - event2 = compile.Event(compile.parse1('separate2(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event1, event2]), - set([p_trigger, sep_trigger])) - - event1 = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice') - event2 = compile.Event(compile.parse1('notrigger2(1)'), target='alice') - self.assertEqual(reg.relevant_triggers([event1, event2]), - set([p_trigger])) - - def test_triggers_by_table(self): - t1 = agnostic.Trigger('p', 'alice', lambda x: x) - t2 = agnostic.Trigger('p', 'alice', lambda x, y: x) - t3 = agnostic.Trigger('q', 'alice', lambda x: x) - triggers = [t1, t2, t3] - table_triggers = agnostic.TriggerRegistry.triggers_by_table(triggers) - self.assertEqual(2, len(table_triggers)) - self.assertEqual(set(table_triggers[('p', 'alice', None)]), - set([t1, t2])) - self.assertEqual(set(table_triggers[('q', 'alice', None)]), - set([t3])) - - def test_modals(self): - g = compile.RuleDependencyGraph() - reg = agnostic.TriggerRegistry(g) - - # register - p_trigger = reg.register_table('p', 'alice', self.f, modal='exec') - triggers = reg.relevant_triggers(['alice:p']) - self.assertEqual(triggers, set([p_trigger])) - - # register 2nd table - q_trigger = reg.register_table('q', 'alice', self.f) - p_triggers = reg.relevant_triggers(['alice:p']) - self.assertEqual(p_triggers, set([p_trigger])) - q_triggers = reg.relevant_triggers(['alice:q']) - self.assertEqual(q_triggers, set([q_trigger])) - - -class TestTriggers(base.TestCase): - class MyObject(object): - """A class with methods that have side-effects.""" - - def __init__(self): - self.value = 0 - self.equals = False - - def increment(self): - """Used for counting number of times function invoked.""" - self.value += 1 - - def equal(self, realold, realnew, old, new): - """Used for checking if function is invoked with correct args.""" - self.equals = (realold == old and realnew == new) - - def test_empty(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('p(1)') - self.assertEqual(1, obj.value) - - def test_empty2(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(1)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.delete('p(1)') - self.assertEqual(1, obj.value) - - def test_empty3(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(1)') - run.delete('p(1)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.delete('p(1)') - self.assertEqual(0, obj.value) - - def test_nochange(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(1)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('p(1)') - self.assertEqual(0, obj.value) - - def test_batch_change_succeed(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - p1 = compile.parse1('p(1)') - p2 = compile.parse1('p(2)') - p3 = compile.parse1('p(3)') - result = run.update([compile.Event(p1, target='test'), - compile.Event(p2, target='test'), - compile.Event(p3, target='test')]) - self.assertTrue(result[0], ("Update failed with errors: " + - ";".join(str(x) for x in result[1]))) - # IMPORTANT: 3 tuples inserted into p in a single batch triggers once - self.assertEqual(1, obj.value) - - def test_batch_change_fail(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - p1 = compile.parse1('p(1)') - p2 = compile.parse1('p(x) :- q(x)') - p3 = compile.parse1('q(x) :- p(x)') - result = run.update([compile.Event(p1, target='test'), - compile.Event(p2, target='test'), - compile.Event(p3, target='test')]) - self.assertFalse(result[0], - ("Update should have failed with recursion: " + - ";".join(str(x) for x in result[1]))) - # IMPORTANT: trigger not activated even though initial events succeed - self.assertEqual(0, obj.value) - - def test_dependency(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('q(1)') - self.assertEqual(1, obj.value) - - def test_dependency_batch_insert(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('q(1) p(x) :- q(x)') - self.assertEqual(1, obj.value) - - def test_dependency_batch(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - rule = compile.parse1('q(x) :- r(x)') - data = compile.parse1('r(1)') - run.update([compile.Event(rule, target='test'), - compile.Event(data, target='test')]) - self.assertEqual(1, obj.value) - - def test_dependency_batch_delete(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('q(x) :- r(x)') - run.insert('r(1)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.delete('q(x) :- r(x)') - self.assertEqual(1, obj.value) - - def test_multi_dependency(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('q(x) :- r(x), s(x)') - run.insert('s(1)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('r(1)') - self.assertEqual(1, obj.value) - - def test_negation(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x), not r(x)') - run.insert('q(1)') - run.insert('q(2)') - run.insert('r(2)') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('r(1)') - self.assertEqual(1, obj.value) - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.delete('r(1)') - self.assertEqual(3, obj.value) - - def test_anti_dependency(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('r(1)') - run.register_trigger('r', lambda tbl, old, new: obj.increment()) - run.insert('q(1)') - self.assertEqual(0, obj.value) - - def test_old_new_correctness(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('q(x) :- r(x), not s(x)') - run.insert('r(1) r(2) r(3)') - run.insert('s(2)') - oldp = set(compile.parse('p(1) p(3)')) - newp = set(compile.parse('p(1) p(2)')) - run.register_trigger('p', - lambda tbl, old, new: - obj.equal(oldp, newp, old, new)) - run.update([compile.Event(compile.parse1('s(3)')), - compile.Event(compile.parse1('s(2)'), insert=False)]) - self.assertTrue(obj.equals) - - def test_unregister(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - trigger = run.register_trigger('p', - lambda tbl, old, new: obj.increment()) - run.insert('p(1)') - self.assertEqual(1, obj.value) - run.unregister_trigger(trigger) - self.assertEqual(1, obj.value) - run.insert('p(2)') - self.assertEqual(1, obj.value) - self.assertRaises(KeyError, run.unregister_trigger, trigger) - self.assertEqual(1, obj.value) - - def test_sequence(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('p(x) :- q(x)') - run.insert('q(1)') - self.assertEqual(1, obj.value) - - def test_delete_data(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.create_policy('test') - run.register_trigger('p', lambda tbl, old, new: obj.increment()) - run.insert('p(x) :- q(x, y), equal(y, 1)') - run.insert('q(1, 1)') - self.assertEqual(1, obj.value) - run.delete('q(1, 1)') - self.assertEqual(2, obj.value) - - def test_multi_policies(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alice') - run.create_policy('bob') - run.register_trigger('p', - lambda tbl, old, new: obj.increment(), 'alice') - run.insert('p(x) :- bob:q(x)', target='alice') - run.insert('q(1)', target='bob') - self.assertEqual(1, obj.value) - run.delete('q(1)', target='bob') - self.assertEqual(2, obj.value) - - def test_modal(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alice') - run.register_trigger('p', lambda tbl, old, new: - obj.increment(), 'alice', 'execute') - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(0, obj.value) - run.insert('q(1)') - self.assertEqual(1, obj.value) - run.insert('q(2)') - self.assertEqual(2, obj.value) - - def test_initialize(self): - obj = self.MyObject() - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alice') - run.register_trigger('p', lambda tbl, old, new: - obj.increment(), 'alice', 'execute') - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(obj.value, 0) - run.initialize_tables(['q'], [compile.Fact('q', [1])], 'alice') - self.assertEqual(obj.value, 1) - - -class TestMultipolicyRules(base.TestCase): - def test_external(self): - """Test ability to write rules that span multiple policies.""" - # External theory - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('q(1)', target='test1') - run.insert('q(2)', target='test1') - run.create_policy('test2') - run.insert('p(x) :- test1:q(x)', target='test2') - actual = run.select('p(x)', target='test2') - e = helper.db_equal('p(1) p(2)', actual) - self.assertTrue(e, "Basic") - - def test_multi_external(self): - """Test multiple rules that span multiple policies.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('test1') - run.create_policy('test2') - run.create_policy('test3') - run.insert('p(x) :- test2:p(x)', target='test1') - run.insert('p(x) :- test3:p(x)', target='test1') - run.insert('p(1)', target='test2') - run.insert('p(2)', target='test3') - actual = run.select('p(x)', target='test1') - e = helper.db_equal(actual, 'p(1) p(2)') - self.assertTrue(e, "Multiple external rules with multiple policies") - - def test_external_current(self): - """Test ability to write rules that span multiple policies.""" - # External theory plus current theory - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('q(1)', target='test1') - run.insert('q(2)', target='test1') - run.create_policy('test2') - run.insert('p(x) :- test1:q(x), r(x)', target='test2') - run.insert('r(1)', target='test2') - run.insert('r(2)', target='test2') - actual = run.select('p(x)', target='test2') - e = helper.db_equal(actual, 'p(1) p(2)') - self.assertTrue(e, "Mixing external theories with current theory") - - def test_ignore_local(self): - """Test ability to write rules that span multiple policies.""" - # Local table ignored - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('q(1)', target='test1') - run.insert('q(2)', target='test1') - run.create_policy('test2') - run.insert('p(x) :- test1:q(x), r(x)', target='test2') - run.insert('q(3)', 'test2') - run.insert('r(1)', target='test2') - run.insert('r(2)', target='test2') - run.insert('r(3)', target='test2') - actual = run.select('p(x)', target='test2') - e = helper.db_equal(actual, 'p(1) p(2)') - self.assertTrue(e, "Local table ignored") - - def test_local(self): - """Test ability to write rules that span multiple policies.""" - # Local table used - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('q(1)', target='test1') - run.insert('q(2)', target='test1') - run.create_policy('test2') - run.insert('p(x) :- test1:q(x), q(x)', target='test2') - run.insert('q(2)', 'test2') - actual = run.select('p(x)', target='test2') - e = helper.db_equal(actual, 'p(2)') - self.assertTrue(e, "Local table used") - - def test_multiple_external(self): - """Test ability to write rules that span multiple policies.""" - # Multiple external theories - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('q(1)', target='test1') - run.insert('q(2)', target='test1') - run.insert('q(3)', target='test1') - run.create_policy('test2') - run.insert('q(1)', target='test2') - run.insert('q(2)', target='test2') - run.insert('q(4)', target='test2') - run.create_policy('test3') - run.insert('p(x) :- test1:q(x), test2:q(x)', target='test3') - actual = run.select('p(x)', target='test3') - e = helper.db_equal(actual, 'p(1) p(2)') - self.assertTrue(e, "Multiple external theories") - - def test_multiple_levels_external(self): - """Test ability to write rules that span multiple policies.""" - # Multiple levels of external theories - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('test1') - run.insert('p(x) :- test2:q(x), test3:q(x)', target='test1') - run.insert('s(3) s(1) s(2) s(4)', target='test1') - run.create_policy('test2') - run.insert('q(x) :- test4:r(x)', target='test2') - run.create_policy('test3') - run.insert('q(x) :- test1:s(x)', target='test3') - run.create_policy('test4') - run.insert('r(1)', target='test4') - run.insert('r(2)', target='test4') - run.insert('r(5)', target='test4') - actual = run.select('p(x)', target='test1') - e = helper.db_equal(actual, 'p(1) p(2)') - self.assertTrue(e, "Multiple levels of external theories") - - def test_multipolicy_head(self): - """Test SELECT with different policy in the head.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('test1', kind='action') - run.create_policy('test2', kind='action') - (permitted, errors) = run.insert('test2:p+(x) :- q(x)', 'test1') - self.assertTrue(permitted, "modals with policy names must be allowed") - run.insert('q(1)', 'test1') - run.insert('p(2)', 'test2') - actual = run.select('test2:p+(x)', 'test1') - e = helper.db_equal(actual, 'test2:p+(1)') - self.assertTrue(e, "Policy name in the head") - - def test_multipolicy_normal_errors(self): - """Test errors arising from rules in multiple policies.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('test1') - - # policy in head of rule - (permitted, errors) = run.insert('test2:p(x) :- q(x)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of rule with update - (permitted, errors) = run.insert('test2:p+(x) :- q(x)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of rule with update - (permitted, errors) = run.insert('test2:p-(x) :- q(x)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of fact - (permitted, errors) = run.insert('test2:p(1)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of fact - (permitted, errors) = run.insert('test2:p+(1)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of fact - (permitted, errors) = run.insert('test2:p-(1)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # recursion across policies - run.insert('p(x) :- test2:q(x)', target='test1') - run.create_policy('test2') - (permit, errors) = run.insert('q(x) :- test1:p(x)', target='test2') - self.assertFalse(permit, "Recursion across theories should fail") - self.assertEqual(len(errors), 1) - self.assertIn("Rules are recursive", str(errors[0])) - - def test_multipolicy_action_errors(self): - """Test errors arising from rules in action policies.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('test1', kind='action') - - # policy in head of rule - (permitted, errors) = run.insert('test2:p(x) :- q(x)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # policy in head of fact - (permitted, errors) = run.insert('test2:p(1)', 'test1') - self.assertFalse(permitted) - self.assertIn("should not reference any policy", str(errors[0])) - - # recursion across policies - run.insert('p(x) :- test2:q(x)', target='test1') - run.create_policy('test2') - (permit, errors) = run.insert('q(x) :- test1:p(x)', target='test2') - self.assertFalse(permit, "Recursion across theories should fail") - self.assertEqual(len(errors), 1) - self.assertIn("Rules are recursive", str(errors[0])) - - def test_dependency_graph_policy_deletion(self): - run = agnostic.Runtime() - g = run.global_dependency_graph - run.create_policy('test') - rule = 'execute[nova:flavors.delete(id)] :- nova:flavors(id)' - permitted, changes = run.insert(rule, target='test') - self.assertTrue(permitted) - run.create_policy('nova') - run.insert('flavors(1)', target="nova") - run.insert('flavors(2)', target="nova") - run.insert('flavors(3)', target="nova") - run.insert('flavors(4)', target="nova") - - self.assertEqual(g.dependencies('test:nova:flavors.delete'), - set(['nova:flavors', 'test:nova:flavors.delete'])) - run.delete_policy('nova') - self.assertTrue(g.node_in('nova:flavors')) - self.assertEqual(g.dependencies('test:nova:flavors.delete'), - set(['nova:flavors', 'test:nova:flavors.delete'])) - - def test_dependency_graph(self): - """Test that dependency graph gets updated correctly.""" - run = agnostic.Runtime() - run.debug_mode() - g = run.global_dependency_graph - - run.create_policy('test') - - run.insert('p(x) :- q(x), nova:q(x)', target='test') - self.assertTrue(g.edge_in('test:p', 'nova:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:q', False)) - - run.insert('p(x) :- s(x)', target='test') - self.assertTrue(g.edge_in('test:p', 'nova:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:s', False)) - - run.insert('q(x) :- nova:r(x)', target='test') - self.assertTrue(g.edge_in('test:p', 'nova:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:s', False)) - self.assertTrue(g.edge_in('test:q', 'nova:r', False)) - - run.delete('p(x) :- q(x), nova:q(x)', target='test') - self.assertTrue(g.edge_in('test:p', 'test:s', False)) - self.assertTrue(g.edge_in('test:q', 'nova:r', False)) - - run.update([compile.Event(helper.str2form('p(x) :- q(x), nova:q(x)'), - target='test')]) - self.assertTrue(g.edge_in('test:p', 'nova:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:q', False)) - self.assertTrue(g.edge_in('test:p', 'test:s', False)) - self.assertTrue(g.edge_in('test:q', 'nova:r', False)) - - def test_negation(self): - """Test that negation when applied to a different policy works.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alpha') - run.create_policy('beta') - run.insert('p(x) :- beta:q(x), not beta:q(x)', 'alpha') - run.insert('q(1)', 'beta') - self.assertEqual(run.select('p(x)', 'alpha'), '') - - def test_built_in(self): - """Test that built_in function works.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alpha') - run.create_policy('beta') - run.create_policy('sigma') - run.insert('p(x1, x2) :- ' - 'beta:q(x1), sigma:r(x2), not equal(x1, x2)', 'alpha') - run.insert('q(1)', 'beta') - run.insert('r(1)', 'sigma') - run.insert('r(3)', 'sigma') - self.assertEqual(run.select('p(x1,x2)', 'alpha'), 'p(1, 3)') - - def test_schema_check(self): - """Test that schema check in multiple policies works.""" - run = agnostic.Runtime() - run.debug_mode() - run.create_policy('alpha') - run.create_policy('beta') - run.insert('p(x,y) :- beta:q(x,y)', 'alpha') - permitted, changes = run.insert('q(x) :- r(x)', 'beta') - self.assertFalse(permitted) - self.assertEqual(len(changes), 1) - - def test_same_rules(self): - """Test that same rule insertion can be correctly dealt with.""" - run = agnostic.Runtime() - run.debug_mode() - policy = 'alpha' - run.create_policy(policy) - rulestr = 'p(x,y) :- q(x,y)' - rule = compile.parse1(rulestr) - run.insert(rulestr, policy) - self.assertIn(rule, run.policy_object(policy)) - self.assertIn( - rule.head.table.table, run.policy_object(policy).schema) - run.insert(rulestr, policy) - run.delete(rulestr, policy) - self.assertNotIn(rule, run.policy_object(policy)) - self.assertNotIn( - rule.head.table.table, run.policy_object(policy).schema) - - -class TestSelect(base.TestCase): - def test_no_dups(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('p(x) :- r(x)') - run.insert('q(1)') - run.insert('r(1)') - self.assertEqual(run.select('p(x)'), 'p(1)') - - -class TestPolicyCreationDeletion(base.TestCase): - def test_policy_creation_after_ref(self): - """Test ability to write rules that span multiple policies.""" - # Local table used - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('p(x) :- test2:q(x)', 'test1') - run.create_policy('test2') - run.insert('q(1)', 'test2') - actual = run.select('p(x)', 'test1') - e = helper.db_equal(actual, 'p(1)') - self.assertTrue(e, "Creation after reference") - - def test_policy_deletion_after_ref(self): - """Test ability to write rules that span multiple policies.""" - # Local table used - run = agnostic.Runtime() - run.create_policy('test1') - run.insert('p(x) :- test2:q(x)', 'test1') - # ensuring this code runs, without causing an error - run.create_policy('test2') - run.delete_policy('test2') - # add the policy back, this time checking for dangling refs - run.create_policy('test2') - self.assertRaises(exception.DanglingReference, run.delete_policy, - 'test2', disallow_dangling_refs=True) - - def test_policy_deletion_dependency_graph(self): - """Ensure dependency graph is properly updated when deleting policy.""" - run = agnostic.Runtime() - run.create_policy('alice') - run.insert('p(x) :- q(x)') - LOG.info("graph: \n%s", run.global_dependency_graph) - self.assertTrue(run.global_dependency_graph.edge_in( - 'alice:p', 'alice:q', False)) - # don't delete rules first--just delete policy - run.delete_policy('alice') - self.assertEqual(len(run.global_dependency_graph), 0) - - -class TestDependencyGraph(base.TestCase): - def test_fact_insert(self): - run = agnostic.Runtime() - run.create_policy('test') - facts = [compile.Fact('p', [1])] - run.initialize_tables([], facts) - self.assertFalse(run.global_dependency_graph.node_in('test:p')) - - def test_atom_insert(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(1)') - self.assertFalse(run.global_dependency_graph.node_in('test:p')) - - def test_rule_noop(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('q(1) :- p(1)') - run.delete('q(2) :- p(2)') - self.assertTrue(run.global_dependency_graph.node_in('test:p')) - self.assertTrue(run.global_dependency_graph.node_in('test:q')) - self.assertTrue(run.global_dependency_graph.edge_in( - 'test:q', 'test:p', False)) - - def test_atom_deletion(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('q(x) :- p(x)') - run.delete('p(1)') - run.delete('p(1)') - # actually just testing that no error is thrown - self.assertFalse(run.global_dependency_graph.has_cycle()) - - -class TestSimulate(base.TestCase): - DEFAULT_THEORY = 'test_default' - ACTION_THEORY = 'test_action' - - def prep_runtime(self, code=None, msg=None, target=None, theories=None): - if code is None: - code = "" - if target is None: - target = self.DEFAULT_THEORY - run = agnostic.Runtime() - run.create_policy(self.DEFAULT_THEORY, abbr='default') - run.create_policy(self.ACTION_THEORY, abbr='action', kind='action') - if theories: - for theory in theories: - run.create_policy(theory) - run.debug_mode() - run.insert(code, target=target) - return run - - def create(self, action_code, class_code, theories=None): - run = self.prep_runtime(theories=theories) - - actth = self.ACTION_THEORY - permitted, errors = run.insert(action_code, target=actth) - self.assertTrue(permitted, "Error in action policy: {}".format( - utility.iterstr(errors))) - - defth = self.DEFAULT_THEORY - permitted, errors = run.insert(class_code, target=defth) - self.assertTrue(permitted, "Error in classifier policy: {}".format( - utility.iterstr(errors))) - - return run - - def check(self, run, action_sequence, query, correct, msg, delta=False): - original_db = str(run.theory[self.DEFAULT_THEORY]) - actual = run.simulate( - query, self.DEFAULT_THEORY, action_sequence, - self.ACTION_THEORY, delta=delta) - e = helper.datalog_equal(actual, correct) - self.assertTrue(e, msg + " (Query results not correct)") - e = helper.db_equal( - str(run.theory[self.DEFAULT_THEORY]), original_db) - self.assertTrue(e, msg + " (Rollback failed)") - - def test_multipolicy_state_1(self): - """Test update sequence affecting datasources.""" - run = self.prep_runtime(theories=['nova', 'neutron']) - run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY) - sequence = 'nova:p+(1) neutron:p+(2)' - self.check(run, sequence, 'p(x)', 'p(1)', 'Separate theories') - - def test_multipolicy_state_2(self): - """Test update sequence affecting datasources.""" - run = self.prep_runtime(theories=['nova', 'neutron']) - run.insert('p(x) :- neutron:p(x)', self.DEFAULT_THEORY) - run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY) - sequence = 'nova:p+(1) neutron:p+(2)' - self.check(run, sequence, 'p(x)', 'p(1) p(2)', 'Separate theories 2') - - def test_multipolicy_state_3(self): - """Test update sequence affecting datasources.""" - run = self.prep_runtime(theories=['nova', 'neutron']) - run.insert('p(x) :- neutron:p(x)', self.DEFAULT_THEORY) - run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY) - run.insert('p(1)', 'nova') - sequence = 'nova:p+(1) neutron:p+(2)' - self.check(run, sequence, 'p(x)', 'p(1) p(2)', 'Separate theories 3') - self.check(run, '', 'p(x)', 'p(1)', 'Existing data separate theories') - - def test_multipolicy_action_sequence(self): - """Test sequence updates with actions that impact multiple policies.""" - action_code = ('nova:p+(x) :- q(x)' - 'neutron:p+(y) :- q(x), plus(x, 1, y)' - 'ceilometer:p+(y) :- q(x), plus(x, 5, y)' - 'action("q")') - classify_code = 'p(x) :- nova:p(x) p(x) :- neutron:p(x) p(3)' - run = self.create(action_code, classify_code, - theories=['nova', 'neutron', 'ceilometer']) - action_sequence = 'q(1)' - self.check(run, action_sequence, 'p(x)', 'p(1) p(2) p(3)', - 'Multi-policy actions') - - def test_action_sequence(self): - """Test sequence updates with actions.""" - - # Simple - action_code = ('p+(x) :- q(x) action("q")') - classify_code = 'p(2)' # just some other data present - run = self.create(action_code, classify_code) - action_sequence = 'q(1)' - self.check(run, action_sequence, 'p(x)', 'p(1) p(2)', 'Simple') - - # Noop does not break rollback - action_code = ('p-(x) :- q(x)' - 'action("q")') - classify_code = ('') - run = self.create(action_code, classify_code) - action_sequence = 'q(1)' - self.check(run, action_sequence, 'p(x)', '', - "Rollback handles Noop") - - # Add and delete - action_code = ('action("act") ' - 'p+(x) :- act(x) ' - 'p-(y) :- act(x), r(x, y) ') - classify_code = 'p(2) r(1, 2)' - run = self.create(action_code, classify_code) - action_sequence = 'act(1)' - self.check(run, action_sequence, 'p(x)', 'p(1)', 'Add and delete') - - # insertion takes precedence over deletion - action_code = ('p+(x) :- q(x)' - 'p-(x) :- r(x)' - 'action("q")') - classify_code = ('') - run = self.create(action_code, classify_code) - # ordered so that consequences will be p+(1) p-(1) - action_sequence = 'q(1), r(1) :- true' - self.check(run, action_sequence, 'p(x)', 'p(1)', - "Deletion before insertion") - - # multiple action sequences 1 - action_code = ('p+(x) :- q(x)' - 'p-(x) :- r(x)' - 'action("q")' - 'action("r")') - classify_code = ('') - run = self.create(action_code, classify_code) - action_sequence = 'q(1) r(1)' - self.check(run, action_sequence, 'p(x)', '', - "Multiple actions: inversion from {}") - - # multiple action sequences 2 - action_code = ('p+(x) :- q(x)' - 'p-(x) :- r(x)' - 'action("q")' - 'action("r")') - classify_code = ('p(1)') - run = self.create(action_code, classify_code) - action_sequence = 'q(1) r(1)' - self.check(run, action_sequence, 'p(x)', '', - "Multiple actions: inversion from p(1), first is noop") - - # multiple action sequences 3 - action_code = ('p+(x) :- q(x)' - 'p-(x) :- r(x)' - 'action("q")' - 'action("r")') - classify_code = ('p(1)') - run = self.create(action_code, classify_code) - action_sequence = 'r(1) q(1)' - self.check(run, action_sequence, 'p(x)', 'p(1)', - "Multiple actions: inversion from p(1), first is not noop") - - # multiple action sequences 4 - action_code = ('p+(x) :- q(x)' - 'p-(x) :- r(x)' - 'action("q")' - 'action("r")') - classify_code = ('') - run = self.create(action_code, classify_code) - action_sequence = 'r(1) q(1)' - self.check(run, action_sequence, 'p(x)', 'p(1)', - "Multiple actions: inversion from {}, first is not noop") - - # Action with additional info - action_code = ('p+(x,z) :- q(x,y), r(y,z)' - 'action("q") action("r")') - classify_code = 'p(1,2)' - run = self.create(action_code, classify_code) - action_sequence = 'q(1,2), r(2,3) :- true' - self.check(run, action_sequence, 'p(x,y)', 'p(1,2) p(1,3)', - 'Action with additional info') - - def test_state_rule_sequence(self): - """Test state and rule update sequences.""" - # State update - action_code = '' - classify_code = 'p(1)' - run = self.create(action_code, classify_code) - action_sequence = 'p+(2)' - self.check(run, action_sequence, 'p(x)', 'p(1) p(2)', - 'State update') - - # Rule update - action_code = '' - classify_code = 'q(1)' - run = self.create(action_code, classify_code) - action_sequence = 'p+(x) :- q(x)' - self.check(run, action_sequence, 'p(x)', 'p(1)', - 'Rule update') - - def test_complex_sequence(self): - """Test more complex sequences of updates.""" - # action with query - action_code = ('p+(x, y) :- q(x, y)' - 'action("q")') - classify_code = ('r(1)') - run = self.create(action_code, classify_code) - action_sequence = 'q(x, 0) :- r(x)' - self.check(run, action_sequence, 'p(x,y)', 'p(1,0)', - 'Action with query') - - # action sequence with results - action_code = ('p+(id, val) :- create(val)' - 'p+(id, val) :- update(id, val)' - 'p-(id, val) :- update(id, newval), p(id, val)' - 'action("create")' - 'action("update")' - 'result(x) :- create(val), p+(x,val)') - classify_code = 'hasval(val) :- p(x, val)' - run = self.create(action_code, classify_code) - action_sequence = 'create(0) update(x,1) :- result(x)' - self.check(run, action_sequence, 'hasval(x)', 'hasval(1)', - 'Action sequence with results') - - def test_delta_add(self): - """Test when asking for changes in query.""" - action_code = ('action("q") ' - 'p+(x) :- q(x) ') - classify_code = 'p(2)' # just some other data present - run = self.create(action_code, classify_code) - action_sequence = 'q(1)' - self.check(run, action_sequence, 'p(x)', 'p+(1)', 'Add', - delta=True) - - def test_delta_delete(self): - """Test when asking for changes in query.""" - action_code = ('action("q") ' - 'p-(x) :- q(x) ') - classify_code = 'p(1) p(2)' # p(2): just some other data present - run = self.create(action_code, classify_code) - action_sequence = 'q(1)' - self.check(run, action_sequence, 'p(x)', 'p-(1)', 'Delete', - delta=True) - - def test_delta_add_delete(self): - """Test when asking for changes in query.""" - action_code = ('action("act") ' - 'p+(x) :- act(x) ' - 'p-(y) :- act(x), r(x, y) ') - classify_code = 'p(2) r(1, 2) p(3)' # p(3): just other data present - run = self.create(action_code, classify_code) - action_sequence = 'act(1)' - self.check(run, action_sequence, 'p(x)', 'p+(1) p-(2)', - 'Add and delete', delta=True) - - def test_key_value_schema(self): - """Test action of key/value updates.""" - action_code = ( - 'action("changeAttribute")' - 'server_attributes+(uid, name, newvalue) :- ' - 'changeAttribute(uid, name, newvalue) ' - 'server_attributes-(uid, name, oldvalue) :- ' - ' changeAttribute(uid, name, newvalue), ' - ' server_attributes(uid, name, oldvalue)') - policy = 'error(uid) :- server_attributes(uid, name, 0)' - - run = self.create(action_code, policy) - seq = 'changeAttribute(101, "cpu", 0)' - self.check(run, seq, 'error(x)', 'error(101)', - 'Basic error') - - run = self.create(action_code, policy) - seq = 'changeAttribute(101, "cpu", 1)' - self.check(run, seq, 'error(x)', '', - 'Basic non-error') - - data = ('server_attributes(101, "cpu", 1)') - run = self.create(action_code, policy + data) - seq = 'changeAttribute(101, "cpu", 0)' - self.check(run, seq, 'error(x)', 'error(101)', - 'Overwrite existing to cause error') - - data = ('server_attributes(101, "cpu", 0)') - run = self.create(action_code, policy + data) - seq = 'changeAttribute(101, "cpu", 1)' - self.check(run, seq, 'error(x)', '', - 'Overwrite existing to eliminate error') - - data = ('server_attributes(101, "cpu", 0)' - 'server_attributes(101, "disk", 0)') - run = self.create(action_code, policy + data) - seq = 'changeAttribute(101, "cpu", 1)' - self.check(run, seq, 'error(x)', 'error(101)', - 'Overwrite existing but still error') - - def test_duplicates(self): - run = agnostic.Runtime() - run.create_policy('test') - run.insert('p(x) :- q(x)') - run.insert('p(x) :- r(x)') - run.insert('q(1)') - run.insert('r(1)') - self.assertEqual(run.simulate('p(x)', 'test', '', 'test'), 'p(1)') - - -class TestActionExecution(base.TestCase): - - def setUp(self): - super(TestActionExecution, self).setUp() - self.run = agnostic.DseRuntime('test') - self.run.service_exists = mock.MagicMock() - self.run.service_exists.return_value = True - self.run._rpc = mock.MagicMock() - - def test_insert_rule_insert_data(self): - self.run.create_policy('test') - self.run.debug_mode() - self.run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(self.run.logger.messages), 0, - "Improper action logged") - self.run.insert('q(1)') - self.assertEqual(len(self.run.logger.messages), 1, - "No action logged") - self.assertEqual(self.run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = self.run._rpc.call_args_list[0] - - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_insert_data_insert_rule(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 1, "No action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = run._rpc.call_args_list[0] - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_insert_data_insert_rule_delete_data(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 1, "No action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - run.delete('q(1)') - self.assertEqual(len(run.logger.messages), 1, "Delete failure") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = run._rpc.call_args_list[0] - - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_insert_data_insert_rule_delete_rule(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 1, "No action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - run.delete('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 1, "Delete failure") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = run._rpc.call_args_list[0] - - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_insert_data_insert_rule_noop_insert(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 1, "No action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 1, "Improper action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = run._rpc.call_args_list[0] - - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_insert_multiple_rules(self): - # basic test - - # test recursion caused at nth rule - - # test transactional trigger activation: - # e.g. - # a(1) - # trigger(x) :- a(x), not b(x) - # b(x) :- a(x) - # trigger should not be activated - pass - - def test_disjunction(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('execute[p(x)] :- q(x)') - run.insert('execute[p(x)] :- r(x)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('q(1)') - self.assertEqual(len(run.logger.messages), 1, "No action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - run.insert('r(1)') - self.assertEqual(len(run.logger.messages), 1, "Improper action logged") - self.assertEqual(run.logger.messages[0], 'Executing test:p(1)') - - expected_args = ('test', 'p') - expected_kwargs = {'args': {'positional': [1]}} - args, kwargs = run._rpc.call_args_list[0] - - self.assertEqual(expected_args, args) - self.assertEqual(expected_kwargs, kwargs) - - def test_multiple_instances(self): - run = self.run - run.create_policy('test') - run.debug_mode() - run.insert('q(1)') - run.insert('q(2)') - self.assertEqual(len(run.logger.messages), 0, "Improper action logged") - run.insert('execute[p(x)] :- q(x)') - self.assertEqual(len(run.logger.messages), 2, "No action logged") - actualset = set([u'Executing test:p(1)', u'Executing test:p(2)']) - self.assertEqual(actualset, set(run.logger.messages)) - - expected_args_list = [ - [('test', 'p'), {'args': {'positional': [1]}}], - [('test', 'p'), {'args': {'positional': [2]}}], - ] - - for args, kwargs in run._rpc.call_args_list: - self.assertIn([args, kwargs], expected_args_list) - expected_args_list.remove([args, kwargs]) - - def test_disabled_execute_action(self): - cfg.CONF.set_override('enable_execute_action', False) - run = agnostic.DseRuntime('test') - run._rpc = mock.MagicMock() - run.service_exists = mock.MagicMock() - service_name = 'test-service' - action = 'non_executable_action' - action_args = {'positional': ['p_arg1'], - 'named': {'key1': 'value1'}} - - run.execute_action(service_name, action, action_args) - self.assertFalse(run._rpc.called) - - -class TestDisabledRules(base.SqlTestCase): - """Tests for Runtime's ability to enable/disable rules.""" - # insertions - def test_insert_enabled(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - schema = compile.Schema({'q': ('id', 'name', 'status')}) - run.set_schema('test', schema) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 1) - - def test_insert_disabled(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') - self.assertEqual(len(run.disabled_events), 1) - self.assertEqual(len(obj.content()), 0) - - def test_persistent_insert_disabled(self): - """Test that persistent_insert_rule errors on IncompleteSchemaException - - When a table schema is not available, named column references are - permitted but disabled in non-persistent rule insert to allow for - late-arriving schema when importing rules already in DB. - This behavior is not necessary in persistent_insert. - """ - run = agnostic.DseRuntime('dse') - run.synchronizer = synchronizer.PolicyRuleSynchronizer( - run, run.node) - run.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) - run.persistent_create_policy('policy') - obj = run.policy_object('policy') - run.insert('p(x) :- data:q(id=x)') - try: - run.persistent_insert_rule('policy', 'p(x) :- data:q(id=x)', - '', '') - except exception.PolicyException as e: - self.assertTrue( - 'Literal data:q(id=x) uses unknown table q' - in str(e), - 'Named column reference on unknown table ' - 'should be disallowed in persistent insert') - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - try: - run.persistent_insert_rule('policy', 'p(x) :- unknown:q(id=x)', - '', '') - except exception.PolicyException as e: - self.assertTrue( - 'Literal unknown:q(id=x) uses named arguments, but the ' - 'schema is unknown.' - in str(e), - 'Named column reference on unknown table ' - 'should be disallowed in persistent insert') - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_insert_errors(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - schema = compile.Schema({'q': ('name', 'status')}) - run.set_schema('test', schema) - obj = run.policy_object('test') - permitted, errors = run.insert('p(x) :- q(id=x)') - self.assertFalse(permitted) - errstring = " ".join(str(x) for x in errors) - self.assertIn("column name id does not exist", errstring) - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_insert_set_schema_disabled(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') # rule is disabled - self.assertEqual(len(run.disabled_events), 1) - schema = compile.Schema({'q': ('id', 'name', 'status')}) - run.set_schema('test', schema) - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 1) - - def test_insert_set_schema_disabled_multiple(self): - # insert rule that gets disabled - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - run.create_policy('nova', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x), nova:r(id=x)', 'test') - self.assertEqual(len(run.disabled_events), 1) - # set first schema - schema = compile.Schema({'q': ('id', 'name', 'status')}) - run.set_schema('test', schema) - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 1) - self.assertEqual(len(obj.content()), 0) - # set second schema - schema = compile.Schema({'r': ('id', 'name', 'status')}) - run.set_schema('nova', schema) - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 1) - - def test_insert_set_schema_errors(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') # rule is disabled - self.assertEqual(len(run.disabled_events), 1) - schema = compile.Schema({'q': ('name', 'status')},) - run.set_schema('test', schema) - self.assertEqual(len(run.error_events), 1) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_insert_inferred_schema_errors(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - run.insert('p(x) :- q(x)') - permitted, errs = run.insert('q(1,2)') - self.assertFalse(permitted) - - # deletions - def test_delete_enabled(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - schema = compile.Schema({'q': ('id', 'name', 'status')}) - run.set_schema('test', schema) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') - self.assertEqual(len(obj.content()), 1) - run.delete('p(x) :- q(id=x)') - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_delete_set_schema_disabled(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') - run.delete('p(x) :- q(id=x)') - self.assertEqual(len(run.disabled_events), 2) - self.assertEqual(len(obj.content()), 0) - schema = compile.Schema({'q': ('id', 'name', 'status')}) - run.set_schema('test', schema) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_delete_errors(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - schema = compile.Schema({'q': ('name', 'status')}) - run.set_schema('test', schema) - obj = run.policy_object('test') - permitted, errors = run.delete('p(x) :- q(id=x)') - self.assertFalse(permitted) - errstring = " ".join(str(x) for x in errors) - self.assertIn("column name id does not exist", errstring) - self.assertEqual(len(run.error_events), 0) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - def test_delete_set_schema_errors(self): - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.delete('p(x) :- q(id=x)') # rule is disabled - self.assertEqual(len(run.disabled_events), 1) - schema = compile.Schema({'q': ('name', 'status')}) - run.set_schema('test', schema) - self.assertEqual(len(run.error_events), 1) - self.assertEqual(len(run.disabled_events), 0) - self.assertEqual(len(obj.content()), 0) - - # errors in set_schema - def test_set_schema_unknown_policy(self): - run = agnostic.Runtime() - schema = compile.Schema({'q': ('name', 'status')}) - try: - run.set_schema('test', schema) - self.fail("Error not thrown on unknown policy") - except exception.CongressException as e: - self.assertIn("not been created", str(e)) - - def test_disallow_schema_change(self): - # Ensures that cannot change schema once it is set. - # Can be removed once we support schema changes (e.g. for upgrade). - run = agnostic.Runtime() - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - schema = compile.Schema({'q': ('name', 'status')}) - run.set_schema('test', schema) - schema = compile.Schema({'q': ('id', 'name', 'status')}) - try: - run.set_schema('test', schema) - self.fail("Error not thrown on schema change") - except exception.CongressException as e: - self.assertIn("Schema for test already set", str(e)) - - def test_insert_without_datasource_policy(self): - run = agnostic.Runtime() - run.create_policy('beta') # not datasource policy - # exception because col refs over non-datasource policy - permitted, errors = run.insert('p(x) :- beta:q(name=x)') - self.assertFalse(permitted) - self.assertTrue( - any("does not reference a datasource policy" in str(e) - for e in errors)) - - def test_delete_policy_while_disabled_events_outstanding(self): - """Test deleting policy while there are disabled_events outstanding.""" - run = agnostic.Runtime() - - # generate disabled event - run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE) - obj = run.policy_object('test') - run.insert('p(x) :- q(id=x)') - self.assertEqual(len(run.disabled_events), 1) - self.assertEqual(len(obj.content()), 0) - - # create and delete another policy - run.create_policy('to_delete') - run.delete_policy('to_delete') - - -class TestDelegation(base.TestCase): - """Tests for Runtime's delegation functionality.""" - - def test_subpolicy(self): - run = agnostic.Runtime() - run.create_policy('test') - policy = 'error(x) :- q(x), r(x)' - run.insert(policy) - subpolicy = run.find_subpolicy( - set(['q']), set(), set(['error', 'warning'])) - e = helper.datalog_equal(subpolicy, policy) - self.assertTrue(e) - - def test_subpolicy_multiple(self): - run = agnostic.Runtime() - run.create_policy('test') - policy = ('error(x) :- q(x), r(x) ' - 'error(x) :- q(x), s(x) ' - 'warning(x) :- t(x), q(x)') - run.insert(policy) - subpolicy = run.find_subpolicy( - set(['q']), set(), set(['error', 'warning'])) - e = helper.datalog_equal(subpolicy, policy) - self.assertTrue(e) - - def test_subpolicy_prohibited(self): - run = agnostic.Runtime() - run.create_policy('test') - policy1 = 'error(x) :- q(x), r(x) ' - policy2 = 'error(x) :- q(x), s(x) ' - policy3 = 'error(x) :- q(x), prohibit(x, y) ' - policy4 = 'warning(x) :- t(x), q(x)' - run.insert(policy1 + policy2 + policy3 + policy4) - subpolicy = run.find_subpolicy( - set(['q']), set(['prohibit']), set(['error', 'warning'])) - e = helper.datalog_equal(subpolicy, policy1 + policy2 + policy4) - self.assertTrue(e) - - def test_subpolicy_layers(self): - run = agnostic.Runtime() - run.create_policy('test') - policy1 = 'error(x) :- t(x), u(x) ' - policy2 = ' t(x) :- q(x), s(x) ' - policy3 = 'error(x) :- p(x) ' - policy4 = ' p(x) :- prohibit(x, y)' - policy5 = 'warning(x) :- t(x), q(x)' - run.insert(policy1 + policy2 + policy3 + policy4 + policy5) - subpolicy = run.find_subpolicy( - set(['q']), set(['prohibit']), set(['error', 'warning'])) - e = helper.datalog_equal(subpolicy, policy1 + policy2 + policy5) - self.assertTrue(e) diff --git a/congress/tests/policy_engines/test_agnostic_dse2.py b/congress/tests/policy_engines/test_agnostic_dse2.py deleted file mode 100644 index 3a5c070d..00000000 --- a/congress/tests/policy_engines/test_agnostic_dse2.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright (c) 2014 Styra, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import sys - -import mock - -from congress.api import base as api_base -from congress.policy_engines import agnostic -from congress.tests.api import base as tests_api_base -from congress.tests import base -from congress.tests import helper - - -class TestDseRuntime(base.SqlTestCase): - @mock.patch('congress.db.db_policy_rules.get_policy_rules') - def test_enable_schema(self, patched_persisted_rules): - class TestRule(object): - def __init__(self, id, name, rule_str, - policy_name, comment=None): - self.id = id - self.name = name - self.rule = rule_str - self.policy_name = policy_name - self.comment = comment - - persisted_rule = [ - TestRule('rule-id', 'rule-name', - "p(x):-nova:services(id=x)", 'classification'), - ] - patched_persisted_rules.return_value = persisted_rule - - services = tests_api_base.setup_config() - engine2 = services['engine'] - node = services['node'] - - node.invoke_service_rpc = mock.MagicMock() - node.invoke_service_rpc.return_value = [ - ['id1', 'name1', 'status1'], - ['id2', 'name2', 'status2'], - ] - - # loaded rule is disabled - subscriptions = engine2.subscription_list() - self.assertEqual([], subscriptions) - - nova_schema = { - 'services': [ - {'name': 'id', 'desc': 'id of the service'}, - {'name': 'name', 'desc': 'name of the service'}, - {'name': 'status', 'desc': 'status of the service'}]} - - engine2.initialize_datasource('nova', nova_schema) - # loaded rule is enabled and subscribes the table - subscriptions = engine2.subscription_list() - self.assertEqual([('nova', 'services')], subscriptions) - - -class TestAgnostic(base.TestCase): - def test_receive_data_no_sequence_num(self): - '''Test receiving data without sequence numbers''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - # initialize with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=None, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3], [4]], []], seqnum=None, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(2) p(3) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - # remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[], [[2], [4]]], seqnum=None, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(3)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add & remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[4]], [[3]]], seqnum=None, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - # re-initialize with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=None, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - def test_receive_data_in_order(self): - '''Test receiving data with sequence numbers, in order''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - # initialize with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=0, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3], [4]], []], seqnum=1, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(2) p(3) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - # remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[], [[2], [4]]], seqnum=2, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(3)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add & remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[4]], [[3]]], seqnum=3, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - # re-initialize with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=4, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - def test_receive_data_out_of_order(self): - '''Test receiving data with sequence numbers, out of order''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - # update with lower seqnum than init snapshot is ignored - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[10]], []], seqnum=3, is_snapshot=False) - - # add & remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[4]], [[3]]], seqnum=7, is_snapshot=False) - actual = run.select('p(x)') - correct = '' - self.assertTrue(helper.db_equal(actual, correct)) - - # remove data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[], [[2], [4]]], seqnum=6, is_snapshot=False) - actual = run.select('p(x)') - correct = '' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3], [4]], []], seqnum=5, is_snapshot=False) - actual = run.select('p(x)') - correct = '' - self.assertTrue(helper.db_equal(actual, correct)) - - # initialize with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=4, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - def test_receive_data_arbitrary_start(self): - '''Test receiving data with arbitrary starting sequence number''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=1234, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - def test_receive_data_duplicate_sequence_number(self): - '''Test receiving data with duplicate sequence number - - Only one message (arbitrary) should be processed. - ''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - # send three updates with the same seqnum - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[1]], []], seqnum=1, is_snapshot=False) - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[2]], []], seqnum=1, is_snapshot=False) - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3]], []], seqnum=1, is_snapshot=False) - - # start with empty data - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[], seqnum=0, is_snapshot=True) - - # exactly one of the three updates should be applied - actual = run.select('p(x)') - correct1 = 'p(1)' - correct2 = 'p(2)' - correct3 = 'p(3)' - self.assertTrue( - helper.db_equal(actual, correct1) or - helper.db_equal(actual, correct2) or - helper.db_equal(actual, correct3)) - - def test_receive_data_sequence_number_max_int(self): - '''Test receiving data when sequence number goes over max int''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1], [2]], seqnum=sys.maxsize, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[], [[2]]], seqnum=sys.maxsize + 1, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1)' - self.assertTrue(helper.db_equal(actual, correct)) - - # test out-of-sequence update ignored - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[2]], []], seqnum=sys.maxsize, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1)' - self.assertTrue(helper.db_equal(actual, correct)) - - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[4]], []], seqnum=sys.maxsize + 3, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1)' - self.assertTrue(helper.db_equal(actual, correct)) - - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3]], []], seqnum=sys.maxsize + 2, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(3) p(4)' - self.assertTrue(helper.db_equal(actual, correct)) - - def test_receive_data_multiple_tables(self): - '''Test receiving data with sequence numbers, multiple tables''' - run = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) - run.always_snapshot = False - run.create_policy('datasource1') - - # initialize p with full table - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[1]], seqnum=0, is_snapshot=True) - actual = run.select('p(x)') - correct = 'p(1)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data to p - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[2]], []], seqnum=1, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(2)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data to q - run.receive_data_sequenced( - publisher='datasource1', table='q', - data=[[[2]], []], seqnum=1, is_snapshot=False) - actual = run.select('q(x)') - correct = '' # does not apply until initialize - self.assertTrue(helper.db_equal(actual, correct)) - - # initialize q with full table - run.receive_data_sequenced( - publisher='datasource1', table='q', - data=[[1]], seqnum=0, is_snapshot=True) - actual = run.select('q(x)') - correct = 'q(1) q(2)' # both initial data and preceding update applied - self.assertTrue(helper.db_equal(actual, correct)) - - # add data to q - run.receive_data_sequenced( - publisher='datasource1', table='q', - data=[[[3]], []], seqnum=2, is_snapshot=False) - actual = run.select('q(x)') - correct = 'q(1) q(2) q(3)' - self.assertTrue(helper.db_equal(actual, correct)) - - # add data to p - run.receive_data_sequenced( - publisher='datasource1', table='p', - data=[[[3]], []], seqnum=2, is_snapshot=False) - actual = run.select('p(x)') - correct = 'p(1) p(2) p(3)' - self.assertTrue(helper.db_equal(actual, correct)) - - # TODO(ekcs): receive data multiple publishers diff --git a/congress/tests/policy_engines/test_agnostic_performance.py b/congress/tests/policy_engines/test_agnostic_performance.py deleted file mode 100644 index d82a8ba1..00000000 --- a/congress/tests/policy_engines/test_agnostic_performance.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import tenacity - -from oslo_config import cfg -from oslo_log import log as logging - -from congress.datalog import base -from congress.datalog import compile -from congress.policy_engines import agnostic -from congress.tests.api import base as api_base -from congress.tests import base as testbase -from congress.tests.datasources import performance_datasource_driver -from congress.tests import helper - -LOG = logging.getLogger(__name__) - -NREC_THEORY = 'non-recursive theory' -DB_THEORY = 'database' -ACTION_THEORY = 'action' - - -class TestRuntimePerformance(testbase.TestCase): - """Tests for Runtime performance that are not specific to any theory. - - To run one test: - nosetests -v \ - congress/tests/policy_engines/test_agnostic_performance.py:TestRuntimePerformance.test_foo - - To collect profiling data: - python -m cProfile -o profile.out `which nosetests` -v \ - congress/tests/policy_engines/test_agnostic_performance.py:TestRuntimePerformance.test_foo - - To parse and sort profiling data in different ways: - import pstats - pstats.Stats('profile.out').strip_dirs().sort_stats("cum").print_stats() - pstats.Stats('profile.out').strip_dirs().sort_stats("time").print_stats() - pstats.Stats('profile.out').strip_dirs().sort_stats("calls").print_stats() - - """ - - def setUp(self): - super(TestRuntimePerformance, self).setUp() - - self._agnostic = agnostic.Runtime() - self._agnostic.create_policy(NREC_THEORY, - kind=base.NONRECURSIVE_POLICY_TYPE) - self._agnostic.create_policy(DB_THEORY, kind=base.DATABASE_POLICY_TYPE) - self._agnostic.debug_mode() - self._agnostic.insert('', target=NREC_THEORY) - - def _create_event(self, table, tuple_, insert, target): - return compile.Event(compile.Literal.create_from_table_tuple(table, - tuple_), - insert=insert, target=target) - - def _create_large_tables(self, n, theory): - facts = [compile.Fact('p', (i, j, k)) - for i in range(n) for k in range(n) for j in range(n)] - - facts.extend(compile.Fact('q', (i,)) for i in range(n)) - self._agnostic.initialize_tables(['p', 'q'], facts, theory) - - def test_insert_nonrecursive(self): - MAX = 100 - th = NREC_THEORY - for i in range(MAX): - self._agnostic.insert('r(%d)' % i, th) - - def test_insert_database(self): - MAX = 100 - th = DB_THEORY - for i in range(MAX): - self._agnostic.insert('r(%d)' % i, th) - - def test_update_nonrecursive(self): - MAX = 10000 - th = NREC_THEORY - updates = [self._create_event('r', (i,), True, th) - for i in range(MAX)] - self._agnostic.update(updates) - - def test_update_database(self): - MAX = 1000 - th = DB_THEORY - updates = [self._create_event('r', (i,), True, th) - for i in range(MAX)] - self._agnostic.update(updates) - - def test_indexing(self): - MAX = 100 - th = NREC_THEORY - - for table in ('a', 'b', 'c'): - updates = [self._create_event(table, (i,), True, th) - for i in range(MAX)] - self._agnostic.update(updates) - - # With indexing, this query should take O(n) time where n is MAX. - # Without indexing, this query will take O(n^3). - self._agnostic.insert('d(x) :- a(x), b(x), c(x)', th) - ans = ' '.join(['d(%d)' % i for i in range(MAX)]) - self.assertTrue(helper.datalog_equal(self._agnostic.select('d(x)', - th), ans)) - - def test_runtime_initialize_tables(self): - MAX = 700 - longstring = 'a' * 100 - facts = (compile.Fact('p', - (1, 2, 'foo', 'bar', i, longstring + str(i))) - for i in range(MAX)) - - th = NREC_THEORY - self._agnostic.initialize_tables(['p'], facts, th) - - def test_select_1match(self): - # with different types of policies (exercise indexing, large sets, - # many joins, etc) - MAX = 10 - th = NREC_THEORY - - self._create_large_tables(MAX, th) - self._agnostic.insert('r(x,y) :- p(x,x,y), q(x)', th) - - for i in range(100): - # This select returns 1 result - self._agnostic.select('r(1, 1)', th) - - def test_select_100matches(self): - # with different types of policies (exercise indexing, large sets, - # many joins, etc) - MAX = 10 - th = NREC_THEORY - - self._create_large_tables(MAX, th) - self._agnostic.insert('r(x,y) :- p(x,x,y), q(x)', th) - - # This takes about 60ms per select - for i in range(10): - # This select returns 100 results - self._agnostic.select('r(x, y)', th) - - def test_simulate_latency(self): - # We think the cost will be the sum of the simulate call + the cost to - # do and undo the evaluation, so this test should focus on the cost - # specific to the simulate call, so the test should do a minimal - # amount of evaluation. - - MAX = 10 - th = NREC_THEORY - - self._create_large_tables(MAX, th) - self._agnostic.create_policy(ACTION_THEORY, - kind=base.ACTION_POLICY_TYPE) - - self._agnostic.insert('q(0)', th) - self._agnostic.insert('s(x) :- q(x), p(x,0,0)', th) - - # This takes about 13ms per simulate. The query for s(x) can use - # indexing, so it should be efficient. - for _ in range(100): - self._agnostic.simulate('s(x)', th, 'p-(0,0,0)', - ACTION_THEORY, delta=True) - - def test_simulate_throughput(self): - # up to 250 requests per second - pass - - def test_update_rate(self): - pass - - def test_concurrency(self): - pass - - -class TestDsePerformance(testbase.SqlTestCase): - - def setUp(self): - super(TestDsePerformance, self).setUp() - self.services = api_base.setup_config(with_fake_datasource=False, - node_id="perf") - cfg.CONF.set_override( - 'drivers', - [('congress.tests.datasources.performance_datasource_driver' - '.PerformanceTestDriver')]) - - self.node = self.services['node'] - self.engine = self.services['engine'] - - def tearDown(self): - self.node.stop() - super(TestDsePerformance, self).tearDown() - - @tenacity.retry(wait=tenacity.wait_fixed(0.1)) - def wait_til_query_nonempty(self, query, policy): - if len(self.engine.select(query, target=policy)) == 0: - raise Exception("Query %s is not empty" % query) - - def test_initialize_tables_dse(self): - """Test performance of initializing data with DSE and Engine. - - This test populates the tables exported by a datasource driver, - and then invokes the poll() method to send that data to the - policy engine. It tests the amount of time to send tables - across the DSE and load them into the policy engine. - """ - MAX_TUPLES = 700 - # install datasource driver we can control - kwds = helper.datasource_openstack_args() - kwds['poll_time'] = 0 - driver = performance_datasource_driver.PerformanceTestDriver('data', - args=kwds) - self.node.register_service(driver) - self.engine.create_policy('data') - - # set return value for datasource driver - facts = [(1, 2.3, 'foo', 'bar', i, 'a'*100 + str(i)) - for i in range(MAX_TUPLES)] - driver.state = {'p': facts} - - # Send formula to engine (so engine subscribes to data:p) - policy = self.engine.DEFAULT_THEORY - formula = compile.parse1( - 'q(1) :- data:p(1, 2.3, "foo", "bar", 1, %s)' % ('a'*100 + '1')) - self.engine.process_policy_update( - [compile.Event(formula, target=policy)]) - - # Poll data and wait til it arrives at engine - driver.poll() - self.wait_til_query_nonempty('q(1)', policy) - - def test_initialize_tables_full(self): - """Test performance of initializing data with Datasource, DSE, Engine. - - This test gives a datasource driver the Python data that would - have resulted from making an API call and parsing it into Python - and then polls that datasource, waiting until the data arrives - in the policy engine. It tests the amount of time required to - translate Python data into tables, send those tables over the DSE, - and load them into the policy engine. - """ - MAX_TUPLES = 700 - # install datasource driver we can control - kwds = helper.datasource_openstack_args() - kwds['poll_time'] = 0 - driver = performance_datasource_driver.PerformanceTestDriver('data', - args=kwds) - self.node.register_service(driver) - self.engine.create_policy('data') - - # set return value for datasource driver - facts = [{'field1': 1, - 'field2': 2.3, - 'field3': 'foo', - 'field4': 'bar', - 'field5': i, - 'field6': 'a'*100 + str(i)} - for i in range(MAX_TUPLES)] - driver.client_data = facts - - # Send formula to engine (so engine subscribes to data:p) - policy = self.engine.DEFAULT_THEORY - formula = compile.parse1( - 'q(1) :- data:p(1, 2.3, "foo", "bar", 1, %s)' % ('a'*100 + '1')) - LOG.info("publishing rule") - self.engine.process_policy_update( - [compile.Event(formula, target=policy)]) - - # Poll data and wait til it arrives at engine - driver.poll() - self.wait_til_query_nonempty('q(1)', policy) diff --git a/congress/tests/policy_fixture.py b/congress/tests/policy_fixture.py deleted file mode 100644 index 2b3456cd..00000000 --- a/congress/tests/policy_fixture.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import os - -import fixtures -from oslo_config import cfg - -import congress.common.policy -from congress.tests import fake_policy - -CONF = cfg.CONF - - -class PolicyFixture(fixtures.Fixture): - - def setUp(self): - super(PolicyFixture, self).setUp() - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.policy_data) - CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') - congress.common.policy.reset() - congress.common.policy.init() - self.addCleanup(congress.common.policy.reset) - - -class RoleBasedPolicyFixture(fixtures.Fixture): - - def __init__(self, role="admin", *args, **kwargs): - super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs) - self.role = role - - def setUp(self): - """Set up the env for test. - - Copy live policy.json file and convert all actions to - allow users of the specified role only. - """ - super(RoleBasedPolicyFixture, self).setUp() - policy = json.load(open(CONF.oslo_policy.policy_file)) - - # Convert all actions to require specified role - for action, rule in policy.items(): - policy[action] = 'role:%s' % self.role - - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - json.dump(policy, policy_file) - CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') - congress.common.policy.reset() - congress.common.policy.init() - self.addCleanup(congress.common.policy.reset) diff --git a/congress/tests/test_auth.py b/congress/tests/test_auth.py deleted file mode 100644 index 8295595d..00000000 --- a/congress/tests/test_auth.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_middleware import request_id -import webob - -from congress import auth -from congress.tests import base - - -class CongressKeystoneContextTestCase(base.TestCase): - def setUp(self): - super(CongressKeystoneContextTestCase, self).setUp() - - @webob.dec.wsgify - def fake_app(req): - self.context = req.environ['congress.context'] - return webob.Response() - - self.context = None - self.middleware = auth.CongressKeystoneContext(fake_app) - self.request = webob.Request.blank('/') - self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' - - def test_no_user_id(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - response = self.request.get_response(self.middleware) - self.assertEqual('401 Unauthorized', response.status) - - def test_with_user_id(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testuserid', self.context.user_id) - self.assertEqual('testuserid', self.context.user) - - def test_with_tenant_id(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'test_user_id' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testtenantid', self.context.tenant_id) - self.assertEqual('testtenantid', self.context.tenant) - - def test_roles_no_admin(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5'], - self.context.roles) - self.assertFalse(self.context.is_admin) - - def test_roles_with_admin(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,' - 'AdMiN') - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5', - 'AdMiN'], self.context.roles) - self.assertTrue(self.context.is_admin) - - def test_with_user_tenant_name(self): - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_PROJECT_NAME'] = 'testtenantname' - self.request.headers['X_USER_NAME'] = 'testusername' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testuserid', self.context.user_id) - self.assertEqual('testusername', self.context.user_name) - self.assertEqual('testtenantid', self.context.tenant_id) - self.assertEqual('testtenantname', self.context.tenant_name) - - def test_request_id_extracted_from_env(self): - req_id = 'dummy-request-id' - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.environ[request_id.ENV_REQUEST_ID] = req_id - self.request.get_response(self.middleware) - self.assertEqual(req_id, self.context.request_id) diff --git a/congress/tests/test_benchmark_updates.py b/congress/tests/test_benchmark_updates.py deleted file mode 100644 index 7a512e18..00000000 --- a/congress/tests/test_benchmark_updates.py +++ /dev/null @@ -1,169 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import functools -import logging - -import eventlet -from mox3 import mox -from six.moves import range - -from congress.api import base as api_base -from congress.datalog import compile -from congress import harness -from congress.policy_engines import agnostic -from congress.tests import base -from congress.tests import helper - - -LOG = logging.getLogger(__name__) - - -class BenchmarkDatasource(base.Benchmark): - - def setUp(self): - super(BenchmarkDatasource, self).setUp() - config = {'benchmark': { - 'module': helper.data_module_path('benchmark_driver.py'), - 'poll_time': 0}} - cage = harness.create(helper.root_path(), None, config) - engine = cage.service_object(api_base.ENGINE_SERVICE_ID) - api = {'policy': cage.service_object('api-policy'), - 'rule': cage.service_object('api-rule'), - 'table': cage.service_object('api-table'), - 'row': cage.service_object('api-row'), - 'datasource': cage.service_object('api-datasource'), - 'status': cage.service_object('api-status'), - 'schema': cage.service_object('api-schema')} - helper.retry_check_subscriptions(engine, [(api['rule'].name, - 'policy-update')]) - helper.retry_check_subscribers(api['rule'], [(engine.name, - 'policy-update')]) - self.assertIn('benchmark', cage.services) - datasource = cage.service_object('benchmark') - table_name = datasource.BENCHTABLE - - self.assertEqual(datasource.state, {}) - - # add a subscriber to ensure the updates end up in datasource.dataPath - # pubdata = datasource.pubdata.setdefault(table_name, - # dataobj.pubData(table_name)) - # pubdata.addsubscriber(self.__class__.__name__, "push", "") - # self.assertTrue(datasource.pubdata[table_name]) - - self.cage = cage - self.engine = engine - self.api = api - self.table_name = table_name - self.datasource = datasource - - def benchmark_datasource_update(self, size): - """Benchmark a datasource update. - - Time the propagation of a datasource update from datasource.poll() to - ending up in the datasource.dataPath queue. - """ - - LOG.info("%s:: benchmarking datasource update of %d rows", size) - self.datasource.datarows = size - - # intercept the queue addition so it doesn't immediately get pulled off - # by the d6cage - received = eventlet.Queue() - self.mox.StubOutWithMock(self.datasource.dataPath, "put_nowait") - self.datasource.dataPath.put_nowait(mox.IgnoreArg()).WithSideEffects( - received.put_nowait) - self.mox.ReplayAll() - - # poll and then wait until we've got an item from our queue - LOG.info("%s:: polling datasource", self.__class__.__name__) - self.datasource.poll() - result = received.get(timeout=30) - self.assertTrue(result.body) - self.assertEqual(len(result.body.data), size) - self.mox.VerifyAll() - - def benchmark_datasource_to_policy_update(self, size): - """Benchmark small datsource update to policy propagation. - - Time the propagation of a datasource update from datasource.poll() to - completion of a simple policy update. - """ - LOG.info("%s:: benchmarking datasource update of %d rows", size) - self.datasource.datarows = size - table_name = self.table_name - - # dummy policy only intended to produce a subscriber for the table - key_to_index = self.datasource.get_column_map(table_name) - id_index = 'x%d' % list(key_to_index.items())[0][1] - max_index = max(key_to_index.values()) - args = ['x%d' % i for i in range(max_index + 1)] - formula = compile.parse1('p(%s) :- benchmark:%s(%s)' % (id_index, - table_name, ','.join(args))) - - # publish the formula and verify we see a subscription - LOG.debug('%s:: sending formula: %s', self.__class__.__name__, formula) - self.api['rule'].publish('policy-update', [agnostic.Event(formula)]) - helper.retry_check_subscriptions( - self.engine, [('benchmark', table_name)]) - helper.retry_check_subscribers( - self.datasource, [(self.engine.name, table_name)]) - - # intercept inbox.task_done() so we know when it's finished. Sadly, - # eventlet doesn't have a condition-like object. - fake_condition = eventlet.Queue() - fake_notify = functools.partial(fake_condition.put_nowait, True) - self.mox.StubOutWithMock(self.engine.inbox, "task_done") - self.engine.inbox.task_done().WithSideEffects(fake_notify) - self.mox.ReplayAll() - - LOG.info("%s:: polling datasource", self.__class__.__name__) - self.datasource.poll() - fake_condition.get(timeout=30) - self.mox.VerifyAll() - - def test_benchmark_datasource_update_small(self): - """Benchmark a small datasource update. - - Time the propagation of a small (10 row) datasource update from - datasource.poll() to ending up in the datasource.dataPath queue. - """ - self.benchmark_datasource_update(10) - - def test_benchmark_datasource_update_large(self): - """Benchmark a large datasource update. - - Time the propagation of a large (100k row) datasource update from - datasource.poll() to ending up in the datasource.dataPath queue. - """ - self.benchmark_datasource_update(100000) - - def test_benchmark_datasource_to_policy_update_small(self): - """Benchmark small datsource update to policy propagation. - - Time the propagation of a small (10 row) datasource update from - datasource.poll() to a simple policy update. - """ - self.benchmark_datasource_to_policy_update(10) - - def test_benchmark_datasource_to_policy_update_large(self): - """Benchmark small datsource update to policy propagation. - - Time the propagation of a large (100k row) datasource update from - datasource.poll() to a simple policy update. - """ - self.benchmark_datasource_to_policy_update(100000) diff --git a/congress/tests/test_config.py b/congress/tests/test_config.py deleted file mode 100644 index 4dec24cf..00000000 --- a/congress/tests/test_config.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2014 VMware -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from oslo_config import cfg -import testtools - - -class ConfigurationTest(testtools.TestCase): - - def test_defaults(self): - self.assertEqual('0.0.0.0', cfg.CONF.bind_host) - self.assertEqual(1789, cfg.CONF.bind_port) - self.assertFalse(cfg.CONF.tcp_keepalive) - self.assertEqual(600, cfg.CONF.tcp_keepidle) - self.assertEqual(1, cfg.CONF.api_workers) - self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config) - self.assertEqual('keystone', cfg.CONF.auth_strategy) - self.assertEqual(False, cfg.CONF.datasources) - self.assertEqual(False, cfg.CONF.api) - self.assertEqual(False, cfg.CONF.policy_engine) - self.assertTrue(hasattr(cfg.CONF, 'node_id')) # default varies - self.assertEqual(False, cfg.CONF.delete_missing_driver_datasources) diff --git a/congress/tests/test_congress.py b/congress/tests/test_congress.py deleted file mode 100644 index ecec8ba5..00000000 --- a/congress/tests/test_congress.py +++ /dev/null @@ -1,583 +0,0 @@ -# -# Copyright (c) 2014 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_congress ----------------------------------- - -Tests for `congress` module. -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -import neutronclient.v2_0 -from oslo_log import log as logging - -from congress.api import base as api_base -from congress.common import config -from congress.datalog import compile -from congress.datasources import neutronv2_driver -from congress.datasources import nova_driver -from congress.db import db_library_policies -from congress.tests.api import base as tests_api_base -from congress.tests import base -from congress.tests.datasources import test_neutron_driver as test_neutron -from congress.tests import helper - - -LOG = logging.getLogger(__name__) - - -class BaseTestPolicyCongress(base.SqlTestCase): - - def setUp(self): - super(BaseTestPolicyCongress, self).setUp() - self.services = tests_api_base.setup_config(with_fake_datasource=False) - self.api = self.services['api'] - self.node = self.services['node'] - self.engine = self.services['engine'] - self.library = self.services['library'] - - self.neutronv2 = self._create_neutron_mock('neutron') - - def tearDown(self): - self.node.stop() - super(BaseTestPolicyCongress, self).tearDown() - - def _create_neutron_mock(self, name): - # Register Neutron service - args = helper.datasource_openstack_args() - neutronv2 = neutronv2_driver.NeutronV2Driver(name, args=args) - # FIXME(ekcs): this is a hack to prevent the synchronizer from - # attempting to delete this DSD because it's not in DB - neutronv2.type = 'no_sync_datasource_driver' - neutron_mock = mock.MagicMock(spec=neutronclient.v2_0.client.Client) - neutronv2.neutron = neutron_mock - - # initialize neutron_mocks - network1 = test_neutron.network_response - port_response = test_neutron.port_response - router_response = test_neutron.router_response - sg_group_response = test_neutron.security_group_response - neutron_mock.list_networks.return_value = network1 - neutron_mock.list_ports.return_value = port_response - neutron_mock.list_routers.return_value = router_response - neutron_mock.list_security_groups.return_value = sg_group_response - self.node.register_service(neutronv2) - return neutronv2 - - -class TestCongress(BaseTestPolicyCongress): - - def setUp(self): - """Setup tests that use multiple mock neutron instances.""" - super(TestCongress, self).setUp() - - # clear the library policies loaded on startup - db_library_policies.delete_policies() - - def tearDown(self): - super(TestCongress, self).tearDown() - - def setup_config(self): - args = ['--config-file', helper.etcdir('congress.conf.test')] - config.init(args) - - def test_startup(self): - self.assertIsNotNone(self.services['api']) - self.assertIsNotNone(self.services['engine']) - self.assertIsNotNone(self.services['library']) - self.assertIsNotNone(self.services['engine'].node) - - def test_policy(self): - self.create_policy('alpha') - self.insert_rule('q(1, 2) :- true', 'alpha') - self.insert_rule('q(2, 3) :- true', 'alpha') - helper.retry_check_function_return_value( - lambda: sorted(self.query('q', 'alpha')['results'], - key=lambda x: x['data']), - sorted([{'data': (1, 2)}, {'data': (2, 3)}], - key=lambda x: x['data'])) - helper.retry_check_function_return_value( - lambda: list(self.query('q', 'alpha').keys()), - ['results']) - - def test_policy_datasource(self): - self.create_policy('alpha') - self.create_fake_datasource('fake') - data = self.node.service_object('fake') - data.state = {'fake_table': set([(1, 2)])} - - data.poll() - self.insert_rule('q(x) :- fake:fake_table(x,y)', 'alpha') - helper.retry_check_function_return_value( - lambda: self.query('q', 'alpha'), {'results': [{'data': (1,)}]}) - - # TODO(dse2): enable rules to be inserted before data created. - # Maybe just have subscription handle errors gracefull when - # asking for a snapshot and return []. - # self.insert_rule('p(x) :- fake:fake_table(x)', 'alpha') - - def test_policy_create_from_library(self): - def adjust_for_comparison(rules): - # compile rule string into rule object - # replace dict with tuple for sorting - # 'id' field implicitly dropped if present - rules = [(compile.parse1(rule['rule']), rule['name'], - rule['comment']) for rule in rules] - - # sort lists for comparison - return sorted(rules) - - test_policy = { - "name": "test_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) :- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - test_policy_id, test_policy_obj = self.api[ - 'api-library-policy'].add_item(test_policy, {}) - - add_policy_id, add_policy_obj = self.api['api-policy'].add_item( - None, {'library_policy': test_policy_id}) - - test_policy['id'] = add_policy_id - - # adjust for comparison - test_policy['owner_id'] = 'user' - test_policy['rules'] = adjust_for_comparison(test_policy['rules']) - - add_policy_obj['rules'] = adjust_for_comparison( - add_policy_obj['rules']) - - self.assertEqual(add_policy_obj, test_policy) - - context = {'policy_id': test_policy['name']} - rules = self.api['api-rule'].get_items({}, context)['results'] - rules = adjust_for_comparison(rules) - self.assertEqual(rules, test_policy['rules']) - - res = self.api['api-policy'].get_items({})['results'] - del test_policy['rules'] - self.assertIn(test_policy, res) - - def test_policy_create_with_rules(self): - def adjust_for_comparison(rules): - # compile rule string into rule object - # replace dict with tuple for sorting - # 'id' field implicitly dropped if present - rules = [(compile.parse1(rule['rule']), rule['name'], - rule['comment']) for rule in rules] - - # sort lists for comparison - return sorted(rules) - - test_policy = { - "name": "test_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) :- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - - add_policy_id, add_policy_obj = self.api['api-policy'].add_item( - test_policy, {}) - - test_policy['id'] = add_policy_id - - # adjust for comparison - test_policy['owner_id'] = 'user' - test_policy['rules'] = adjust_for_comparison(test_policy['rules']) - - add_policy_obj['rules'] = adjust_for_comparison( - add_policy_obj['rules']) - - self.assertEqual(add_policy_obj, test_policy) - - context = {'policy_id': test_policy['name']} - rules = self.api['api-rule'].get_items({}, context)['results'] - rules = adjust_for_comparison(rules) - self.assertEqual(rules, test_policy['rules']) - - res = self.api['api-policy'].get_items({})['results'] - del test_policy['rules'] - self.assertIn(test_policy, res) - - def create_policy(self, name): - return self.api['api-policy'].add_item({'name': name}, {}) - - def create_policy_from_obj(self, policy_obj): - return self.api['api-policy'].add_item(policy_obj, {}) - - def insert_rule(self, rule, policy): - context = {'policy_id': policy} - return self.api['api-rule'].add_item( - {'rule': rule}, {}, context=context) - - def create_fake_datasource(self, name): - item = {'name': name, - 'driver': 'fake_datasource', - 'description': 'hello world!', - 'enabled': True, - 'type': None, - 'config': {'auth_url': 'foo', - 'username': 'armax', - 'password': '', - 'tenant_name': 'armax'}} - - return self.api['api-datasource'].add_item(item, params={}) - - def query(self, tablename, policyname): - context = {'policy_id': policyname, - 'table_id': tablename} - return self.api['api-row'].get_items({}, context) - - def test_rule_insert_delete(self): - self.api['api-policy'].add_item({'name': 'alice'}, {}) - context = {'policy_id': 'alice'} - (id1, _) = self.api['api-rule'].add_item( - {'rule': 'p(x) :- plus(y, 1, x), q(y)'}, {}, context=context) - ds = self.api['api-rule'].get_items({}, context)['results'] - self.assertEqual(len(ds), 1) - self.api['api-rule'].delete_item(id1, {}, context) - ds = self.engine.policy_object('alice').content() - self.assertEqual(len(ds), 0) - - def test_datasource_request_refresh(self): - # neutron polls automatically here, which is why register_service - # starts its service. - neutron = self.neutronv2 - neutron.stop() - - self.assertEqual(neutron.refresh_request_queue.qsize(), 0) - neutron.request_refresh() - self.assertEqual(neutron.refresh_request_queue.qsize(), 1) - neutron.start() - - neutron.request_refresh() - f = lambda: neutron.refresh_request_queue.qsize() - helper.retry_check_function_return_value(f, 0) - - def test_datasource_poll(self): - neutron = self.neutronv2 - neutron.stop() - neutron._translate_ports({'ports': []}) - self.assertEqual(len(neutron.state['ports']), 0) - neutron.start() - f = lambda: len(neutron.state['ports']) - helper.retry_check_function_return_value_not_eq(f, 0) - - def test_library_service(self): - # NOTE(ekcs): only the most basic test right now, more detailed testing - # done in test_library_service.py - res = self.library.get_policies() - self.assertEqual(res, []) - - -class APILocalRouting(BaseTestPolicyCongress): - - def setUp(self): - super(APILocalRouting, self).setUp() - - # set up second API+PE node - self.services = tests_api_base.setup_config( - with_fake_datasource=False, node_id='testnode2', - same_partition_as_node=self.node) - self.api2 = self.services['api'] - self.node2 = self.services['node'] - self.engine2 = self.services['engine'] - self.data = self.services['data'] - - # add different data to two PE instances - # going directly to agnostic not via API to make sure faulty API - # routing (subject of the test) would not affect test accuracy - self.engine.create_policy('policy') - self.engine2.create_policy('policy') - self.engine.insert('p(1) :- NOT q()', 'policy') - # self.engine1.insert('p(1)', 'policy') - self.engine2.insert('p(2) :- NOT q()', 'policy') - self.engine2.insert('p(3) :- NOT q()', 'policy') - - def test_intranode_pe_routing(self): - for i in range(0, 5): # run multiple times (non-determinism) - result = self.api['api-row'].get_items( - {}, {'policy_id': 'policy', 'table_id': 'p'}) - self.assertEqual(len(result['results']), 1) - result = self.api2['api-row'].get_items( - {}, {'policy_id': 'policy', 'table_id': 'p'}) - self.assertEqual(len(result['results']), 2) - - def test_non_PE_service_reachable(self): - # intranode - result = self.api['api-row'].get_items( - {}, {'ds_id': 'neutron', 'table_id': 'ports'}) - self.assertEqual(len(result['results']), 1) - - # internode - result = self.api2['api-row'].get_items( - {}, {'ds_id': 'neutron', 'table_id': 'ports'}) - self.assertEqual(len(result['results']), 1) - - def test_internode_pe_routing(self): - '''test reach internode PE when intranode PE not available''' - self.node.unregister_service(api_base.ENGINE_SERVICE_ID) - result = self.api['api-row'].get_items( - {}, {'policy_id': 'policy', 'table_id': 'p'}) - self.assertEqual(len(result['results']), 2) - result = self.api2['api-row'].get_items( - {}, {'policy_id': 'policy', 'table_id': 'p'}) - self.assertEqual(len(result['results']), 2) - - -class TestPolicyExecute(BaseTestPolicyCongress): - - def setUp(self): - super(TestPolicyExecute, self).setUp() - self.nova = self._register_test_datasource('nova') - - def _register_test_datasource(self, name): - args = helper.datasource_openstack_args() - if name == 'nova': - ds = nova_driver.NovaDriver('nova', args=args) - if name == 'neutron': - ds = neutronv2_driver.NeutronV2Driver('neutron', args=args) - ds.update_from_datasource = mock.MagicMock() - return ds - - def test_policy_execute(self): - class NovaClient(object): - def __init__(self, testkey): - self.testkey = testkey - - def disconnectNetwork(self, arg1): - LOG.info("disconnectNetwork called on %s", arg1) - self.testkey = "arg1=%s" % arg1 - - nova_client = NovaClient("testing") - nova = self.nova - nova.nova_client = nova_client - self.node.register_service(nova) - - # insert rule and data - self.api['api-policy'].add_item({'name': 'alice'}, {}) - (id1, _) = self.api['api-rule'].add_item( - {'rule': 'execute[nova:disconnectNetwork(x)] :- q(x)'}, {}, - context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 0) - (id2, _) = self.api['api-rule'].add_item( - {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - ans = "arg1=1" - f = lambda: nova.nova_client.testkey - helper.retry_check_function_return_value(f, ans) - - # insert more data - self.api['api-rule'].add_item( - {'rule': 'q(2)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - ans = "arg1=2" - f = lambda: nova.nova_client.testkey - helper.retry_check_function_return_value(f, ans) - - # insert irrelevant data - self.api['api-rule'].add_item( - {'rule': 'r(3)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - - # delete relevant data - self.api['api-rule'].delete_item( - id2, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - - # delete policy rule - self.api['api-rule'].delete_item( - id1, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - - def test_policy_execute_data_first(self): - class NovaClient(object): - def __init__(self, testkey): - self.testkey = testkey - - def disconnectNetwork(self, arg1): - LOG.info("disconnectNetwork called on %s", arg1) - self.testkey = "arg1=%s" % arg1 - - nova_client = NovaClient(None) - nova = self.nova - nova.nova_client = nova_client - self.node.register_service(nova) - - # insert rule and data - self.api['api-policy'].add_item({'name': 'alice'}, {}) - self.api['api-rule'].add_item( - {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 0) - self.api['api-rule'].add_item( - {'rule': 'execute[nova:disconnectNetwork(x)] :- q(x)'}, {}, - context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - ans = "arg1=1" - f = lambda: nova.nova_client.testkey - helper.retry_check_function_return_value(f, ans) - - def test_policy_execute_dotted(self): - class NovaClient(object): - def __init__(self, testkey): - self.testkey = testkey - self.servers = ServersClass() - - class ServersClass(object): - def __init__(self): - self.ServerManager = ServerManagerClass() - - class ServerManagerClass(object): - def __init__(self): - self.testkey = None - - def pause(self, id_): - self.testkey = "arg1=%s" % id_ - - nova_client = NovaClient(None) - nova = self.nova - nova.nova_client = nova_client - self.node.register_service(nova) - - self.api['api-policy'].add_item({'name': 'alice'}, {}) - self.api['api-rule'].add_item( - {'rule': 'execute[nova:servers.ServerManager.pause(x)] :- q(x)'}, - {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 0) - self.api['api-rule'].add_item( - {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - ans = "arg1=1" - f = lambda: nova.nova_client.servers.ServerManager.testkey - helper.retry_check_function_return_value(f, ans) - - def test_policy_execute_no_args(self): - class NovaClient(object): - def __init__(self, testkey): - self.testkey = testkey - - def disconnectNetwork(self): - LOG.info("disconnectNetwork called") - self.testkey = "noargs" - - nova_client = NovaClient(None) - nova = self.nova - nova.nova_client = nova_client - self.node.register_service(nova) - - # Note: this probably isn't the behavior we really want. - # But at least we have a test documenting that behavior. - - # insert rule and data - self.api['api-policy'].add_item({'name': 'alice'}, {}) - (id1, rule1) = self.api['api-rule'].add_item( - {'rule': 'execute[nova:disconnectNetwork()] :- q(x)'}, {}, - context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 0) - (id2, rule2) = self.api['api-rule'].add_item( - {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - ans = "noargs" - f = lambda: nova.nova_client.testkey - helper.retry_check_function_return_value(f, ans) - - # insert more data (which DOES NOT cause an execution) - (id3, rule3) = self.api['api-rule'].add_item( - {'rule': 'q(2)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - - # delete all data - self.api['api-rule'].delete_item( - id2, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - - self.api['api-rule'].delete_item( - id3, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - - # insert data (which now DOES cause an execution) - (id4, rule3) = self.api['api-rule'].add_item( - {'rule': 'q(3)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - ans = "noargs" - f = lambda: nova.nova_client.testkey - helper.retry_check_function_return_value(f, ans) - - # delete policy rule - self.api['api-rule'].delete_item( - id1, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 2) - - def test_neutron_policy_execute(self): - class NeutronClient(object): - def __init__(self, testkey): - self.testkey = testkey - - def disconnectNetwork(self, arg1): - LOG.info("disconnectNetwork called on %s", arg1) - self.testkey = "arg1=%s" % arg1 - - neutron_client = NeutronClient(None) - neutron = self.neutronv2 - neutron.neutron = neutron_client - - # insert rule and data - self.api['api-policy'].add_item({'name': 'alice'}, {}) - (id1, _) = self.api['api-rule'].add_item( - {'rule': 'execute[neutron:disconnectNetwork(x)] :- q(x)'}, {}, - context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 0) - (id2, _) = self.api['api-rule'].add_item( - {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) - self.assertEqual(len(self.engine.logger.messages), 1) - ans = "arg1=1" - f = lambda: neutron.neutron.testkey - helper.retry_check_function_return_value(f, ans) - - def test_neutron_policy_poll_and_subscriptions(self): - """Test polling and publishing of neutron updates.""" - policy = self.engine.DEFAULT_THEORY - neutron2 = self._create_neutron_mock('neutron2') - self.engine.initialize_datasource('neutron', - self.neutronv2.get_schema()) - self.engine.initialize_datasource('neutron2', - self.neutronv2.get_schema()) - str_rule = ('p(x0, y0) :- neutron:networks(x0, x1, x2, x3, x4, x5), ' - 'neutron2:networks(y0, y1, y2, y3, y4, y5)') - rule = {'rule': str_rule, 'name': 'testrule1', 'comment': 'test'} - self.api['api-rule'].add_item(rule, {}, context={'policy_id': policy}) - # Test policy subscriptions - subscriptions = self.engine.subscription_list() - self.assertEqual(sorted([('neutron', 'networks'), - ('neutron2', 'networks')]), sorted(subscriptions)) - # Test multiple instances - self.neutronv2.poll() - neutron2.poll() - ans = ('p("240ff9df-df35-43ae-9df5-27fae87f2492", ' - ' "240ff9df-df35-43ae-9df5-27fae87f2492") ') - helper.retry_check_db_equal(self.engine, 'p(x, y)', ans, target=policy) diff --git a/congress/tests/test_server.py b/congress/tests/test_server.py deleted file mode 100644 index b2c690b5..00000000 --- a/congress/tests/test_server.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2014 VMware -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import socket - -import mock -from oslo_config import cfg -import testtools - -from congress.common import eventlet_server - - -class APIServerTest(testtools.TestCase): - - @mock.patch('paste.deploy.loadapp') - @mock.patch('eventlet.listen') - @mock.patch('socket.getaddrinfo') - def test_keepalive_unset(self, mock_getaddrinfo, mock_listen, mock_app): - mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] - mock_sock = mock.Mock() - mock_sock.setsockopt = mock.Mock() - mock_app.return_value = mock.MagicMock() - - mock_listen.return_value = mock_sock - server = eventlet_server.APIServer('/path/to/paste', 'api-server', - host=cfg.CONF.bind_host, - port=cfg.CONF.bind_port) - server.start() - self.assertTrue(mock_listen.called) - self.assertTrue(mock_app.called) - self.assertFalse(mock_sock.setsockopt.called) - - @mock.patch('paste.deploy.loadapp') - @mock.patch('eventlet.listen') - @mock.patch('socket.getaddrinfo') - def test_keepalive_set(self, mock_getaddrinfo, mock_listen, mock_app): - mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] - mock_sock = mock.Mock() - mock_sock.setsockopt = mock.Mock() - mock_app.return_value = mock.MagicMock() - - mock_listen.return_value = mock_sock - server = eventlet_server.APIServer('/path/to/paste', 'api-server', - host=cfg.CONF.bind_host, - port=cfg.CONF.bind_port, - keepalive=True) - server.start() - mock_sock.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_KEEPALIVE, - 1) - self.assertTrue(mock_listen.called) - self.assertTrue(mock_app.called) - - @mock.patch('paste.deploy.loadapp') - @mock.patch('eventlet.listen') - @mock.patch('socket.getaddrinfo') - def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen, - mock_app): - mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)] - mock_sock = mock.Mock() - mock_sock.setsockopt = mock.Mock() - mock_app.return_value = mock.MagicMock() - - mock_listen.return_value = mock_sock - server = eventlet_server.APIServer('/path/to/paste', 'api-server', - host=cfg.CONF.bind_host, - port=cfg.CONF.bind_port, - keepalive=True, - keepidle=1) - server.start() - - # keepidle isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - self.assertEqual(mock_sock.setsockopt.call_count, 2) - - # Test the last set of call args i.e. for the keepidle - mock_sock.setsockopt.assert_called_with(socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - 1) - else: - self.assertEqual(mock_sock.setsockopt.call_count, 1) - - self.assertTrue(mock_listen.called) - self.assertTrue(mock_app.called) diff --git a/congress/tests/test_utils.py b/congress/tests/test_utils.py deleted file mode 100644 index 584d3311..00000000 --- a/congress/tests/test_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2014 VMware -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys - -import testtools - -import congress.utils as utils - - -class UtilsTest(testtools.TestCase): - - def test_value_to_congress(self): - self.assertEqual("abc", utils.value_to_congress("abc")) - self.assertEqual("True", utils.value_to_congress(True)) - self.assertEqual("False", utils.value_to_congress(False)) - self.assertEqual(0, utils.value_to_congress(0)) - self.assertEqual(1, utils.value_to_congress(1)) - self.assertEqual(123, utils.value_to_congress(123)) - if sys.version < '3': - self.assertEqual(456.0, utils.value_to_congress(456.0)) - - def test_pretty_rule(self): - test_rule = "\t \n head(1, 2)\t \n " - expected = "head(1, 2)" - self.assertEqual(utils.pretty_rule(test_rule), expected) - - test_rule = "\t \n head(1, 2)\t \n :- \t \n" - expected = "head(1, 2)" - self.assertEqual(utils.pretty_rule(test_rule), expected) - - test_rule = ("\t \n server_with_bad_flavor(id)\t \n :- \t \n " - "nova:servers(id=id,flavor_id=flavor_id), \t \n " - "nova:flavors(id=flavor_id, name=flavor), " - "not permitted_flavor(flavor)\t \n ") - expected = ("server_with_bad_flavor(id) :-\n" - " nova:servers(id=id,flavor_id=flavor_id),\n" - " nova:flavors(id=flavor_id, name=flavor),\n" - " not permitted_flavor(flavor)") - self.assertEqual(utils.pretty_rule(test_rule), expected) diff --git a/congress/utils.py b/congress/utils.py deleted file mode 100644 index f6540585..00000000 --- a/congress/utils.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities and helper functions.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import contextlib -import json -import os -import shutil -import tempfile - -from oslo_config import cfg -from oslo_log import log as logging -import six - -LOG = logging.getLogger(__name__) - -utils_opts = [ - cfg.StrOpt('tempdir', - help='Explicitly specify the temporary working directory'), -] -CONF = cfg.CONF -CONF.register_opts(utils_opts) - - -# Note(thread-safety): blocking function -@contextlib.contextmanager -def tempdir(**kwargs): - argdict = kwargs.copy() - if 'dir' not in argdict: - argdict['dir'] = CONF.tempdir - tmpdir = tempfile.mkdtemp(**argdict) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.error(('Could not remove tmpdir: %s'), e) - - -def value_to_congress(value): - if isinstance(value, six.string_types): - # TODO(ayip): This throws away high unicode data because congress does - # not have full support for unicode yet. We'll need to fix this to - # handle unicode coming from datasources. - try: - six.text_type(value).encode('ascii') - except UnicodeEncodeError: - LOG.warning('Ignoring non-ascii characters') - # Py3: decode back into str for compat (bytes != str) - return six.text_type(value).encode('ascii', 'ignore').decode('ascii') - # Check for bool before int, because True and False are also ints. - elif isinstance(value, bool): - return str(value) - elif (isinstance(value, six.integer_types) or - isinstance(value, float)): - return value - return str(value) - - -# Note(thread-safety): blocking function -def create_datasource_policy(bus, datasource, engine): - # Get the schema for the datasource using - # Note(thread-safety): blocking call - schema = bus.rpc(datasource, 'get_datasource_schema', - {'source_id': datasource}) - # Create policy and sets the schema once datasource is created. - args = {'name': datasource, 'schema': schema} - # Note(thread-safety): blocking call - bus.rpc(engine, 'initialize_datasource', args) - - -def get_root_path(): - return os.path.dirname(os.path.dirname(__file__)) - - -class Location (object): - """A location in the program source code.""" - - __slots__ = ['line', 'col'] - - def __init__(self, line=None, col=None, obj=None): - try: - self.line = obj.location.line - self.col = obj.location.col - except AttributeError: - pass - self.col = col - self.line = line - - def __str__(self): - s = "" - if self.line is not None: - s += " line: {}".format(self.line) - if self.col is not None: - s += " col: {}".format(self.col) - return s - - def __repr__(self): - return "Location(line={}, col={})".format( - repr(self.line), repr(self.col)) - - def __hash__(self): - return hash(('Location', hash(self.line), hash(self.col))) - - -def pretty_json(data): - print(json.dumps(data, sort_keys=True, - indent=4, separators=(',', ': '))) - - -def pretty_rule(rule_str): - # remove line breaks - rule_str = ''.join( - [line.strip() for line in rule_str.strip().splitlines()]) - - head_and_body = rule_str.split(':-') - - # drop empty body - head_and_body = [item.strip() - for item in head_and_body if len(item.strip()) > 0] - - head = head_and_body[0] - if len(head_and_body) == 1: - return head - else: - body = head_and_body[1] - # split the literal by spliting on ')' - body_list = body.split(')') - body_list = body_list[:-1] # drop part behind the final ')' - - new_body_list = [] - for literal in body_list: - # remove commas between literals - if literal[0] == ',': - literal = literal[1:] - # add back the ')', also add an indent - new_body_list.append(' ' + literal.strip() + ')') - - pretty_rule_str = head + " :-\n" + ",\n".join(new_body_list) - return pretty_rule_str diff --git a/congress/version.py b/congress/version.py deleted file mode 100644 index 4d3ebb35..00000000 --- a/congress/version.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import pbr.version - -version_info = pbr.version.VersionInfo('congress') diff --git a/congress_dashboard/README.md b/congress_dashboard/README.md deleted file mode 100644 index 069dce95..00000000 --- a/congress_dashboard/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Congress Dashboard ------------------- - -Congress Dashboard is an extension for OpenStack Dashboard that provides a UI -for Congress. With congress-dashboard, a user is able to easily write the -policies and rules for governance of cloud. - -Setup Instructions ------------------- - -This instruction assumes that Horizon is already installed and its -installation folder is . Detailed information on how to install -Horizon can be found at -http://docs.openstack.org/developer/horizon/quickstart.html#setup. - -To integrate congress with horizon, copy the files in -/enabled to /openstack_dashboard/local/enabled/ - -$ cp -b /enabled/_50_policy.py /openstack_dashboard/local/enabled/ -$ cp -b /enabled/_60_policies.py /openstack_dashboard/local/enabled/ -$ cp -b /enabled/_70_datasources.py /openstack_dashboard/local/enabled/ - -Restart Apache server -sudo service apache2 restart - diff --git a/congress_dashboard/api/__init__.py b/congress_dashboard/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_dashboard/api/congress.py b/congress_dashboard/api/congress.py deleted file mode 100644 index 4768e1a8..00000000 --- a/congress_dashboard/api/congress.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from congressclient.v1 import client as congress_client -from django.conf import settings -import keystoneauth1.identity.v2 as v2 -import keystoneauth1.identity.v3 as v3 -import keystoneauth1.session as kssession -from openstack_dashboard.api import base -from oslo_log import log as logging - - -LITERALS_SEPARATOR = '),' -RULE_SEPARATOR = ':-' -TABLE_SEPARATOR = ':' - -LOG = logging.getLogger(__name__) - - -def _set_id_as_name_if_empty(apidict, length=0): - try: - if not apidict._apidict.get('name'): - id = apidict._apidict['id'] - if length: - id = id[:length] - apidict._apidict['name'] = '(%s)' % id - else: - apidict._apidict['name'] = id - except KeyError: - pass - - -class PolicyAPIDictWrapper(base.APIDictWrapper): - def set_id_as_name_if_empty(self): - _set_id_as_name_if_empty(self) - - def set_id_if_empty(self, id): - apidict_id = self._apidict.get('id') - if not apidict_id or apidict_id == "None": - self._apidict['id'] = id - - def set_value(self, key, value): - self._apidict[key] = value - - def delete_by_key(self, key): - del self._apidict[key] - - -class PolicyRule(PolicyAPIDictWrapper): - """Wrapper for a Congress policy's rule.""" - def set_id_as_name_if_empty(self): - pass - - -class PolicyTable(PolicyAPIDictWrapper): - """Wrapper for a Congress policy's data table.""" - def set_policy_details(self, policy): - self._apidict['policy_name'] = policy['name'] - self._apidict['policy_owner_id'] = policy['owner_id'] - - -def congressclient(request): - """Instantiate Congress client.""" - auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL') - user = request.user - session = get_keystone_session(auth_url, user) - region_name = user.services_region - - kwargs = { - 'session': session, - 'auth': None, - 'interface': 'publicURL', - 'service_type': 'policy', - 'region_name': region_name - } - return congress_client.Client(**kwargs) - - -def get_keystone_session(auth_url, user): - if auth_url[-3:] == '/v3': - auth = v3.Token(auth_url, user.token.id, project_id=user.tenant_id) - else: - auth = v2.Token(auth_url, user.token.id, tenant_id=user.tenant_id, - tenant_name=user.tenant_name) - - session = kssession.Session(auth=auth) - return session - - -def policies_list(request): - """List all policies.""" - client = congressclient(request) - policies_list = client.list_policy() - results = policies_list['results'] - policies = [] - for p in results: - policy = PolicyAPIDictWrapper(p) - # Policies currently have a name but not necessarily a non-"None" id. - # Use the name to identify the policy, needed to differentiate them in - # DataTables. - policy.set_id_if_empty(policy.get('name')) - policies.append(policy) - return policies - - -def policy_create(request, args): - """Create a policy with the given properties.""" - client = congressclient(request) - policy = client.create_policy(args) - return policy - - -def policy_delete(request, policy_id): - """Delete a policy by id.""" - client = congressclient(request) - policy = client.delete_policy(policy_id) - return policy - - -def policy_get(request, policy_name): - """Get a policy by name.""" - # TODO(jwy): Use congress.show_policy() once system policies have unique - # IDs. - policies = policies_list(request) - for p in policies: - if p['name'] == policy_name: - return p - - -def policy_rule_create(request, policy_name, body=None): - """Create a rule in the given policy, with the given properties.""" - client = congressclient(request) - rule = client.create_policy_rule(policy_name, body=body) - return rule - - -def policy_rule_delete(request, policy_name, rule_id): - """Delete a rule by id, from the given policy.""" - client = congressclient(request) - rule = client.delete_policy_rule(policy_name, rule_id) - return rule - - -def policy_rules_list(request, policy_name): - """List all rules in a policy, given by name.""" - client = congressclient(request) - policy_rules_list = client.list_policy_rules(policy_name) - results = policy_rules_list['results'] - return [PolicyRule(r) for r in results] - - -def policy_tables_list(request, policy_name): - """List all data tables in a policy, given by name.""" - client = congressclient(request) - policy_tables_list = client.list_policy_tables(policy_name) - results = policy_tables_list['results'] - return [PolicyTable(t) for t in results] - - -def policy_table_get(request, policy_name, table_name): - """Get a policy table in a policy, given by name.""" - client = congressclient(request) - return client.show_policy_table(policy_name, table_name) - - -def policy_rows_list(request, policy_name, table_name): - """List all rows in a policy's data table, given by name.""" - client = congressclient(request) - policy_rows_list = client.list_policy_rows(policy_name, table_name) - results = policy_rows_list['results'] - - policy_rows = [] - # Policy table rows currently don't have ids. However, the DataTable object - # requires an id for the table to get rendered properly. Otherwise, the - # same contents are displayed for every row in the table. Assign the rows - # ids here. - id = 0 - for row in results: - new_row = PolicyAPIDictWrapper(row) - new_row.set_id_if_empty(id) - id += 1 - policy_rows.append(new_row) - return policy_rows - - -def policy_table_schema_get(request, policy_name, table_name): - """Get the schema for a policy table, based on the first matching rule.""" - column_names = [] - rules = policy_rules_list(request, policy_name) - # There might be multiple rules that use the same name in the head. Pick - # the first matching one, which is what the policy engine currently does. - for rule in rules: - rule_def = rule['rule'] - head, _ = rule_def.split(RULE_SEPARATOR) - if head.strip().startswith('%s(' % table_name): - start = head.index('(') + 1 - end = head.index(')') - column_names = head[start:end].split(',') - break - - schema = {'table_id': table_name} - schema['columns'] = [{'name': name.strip(), 'description': None} - for name in column_names] - return schema - - -def datasources_list(request): - """List all the data sources.""" - client = congressclient(request) - datasources_list = client.list_datasources() - datasources = datasources_list['results'] - return [PolicyAPIDictWrapper(d) for d in datasources] - - -def datasource_get(request, datasource_id): - """Get a data source by id.""" - # TODO(jwy): Need API in congress_client to retrieve data source by id. - datasources = datasources_list(request) - for d in datasources: - if d['id'] == datasource_id: - return d - - -def datasource_get_by_name(request, datasource_name): - """Get a data source by name.""" - datasources = datasources_list(request) - for d in datasources: - if d['name'] == datasource_name: - return d - - -def datasource_tables_list(request, datasource_id): - """List all data tables in a data source, given by id.""" - client = congressclient(request) - datasource_tables_list = client.list_datasource_tables(datasource_id) - results = datasource_tables_list['results'] - return [PolicyAPIDictWrapper(t) for t in results] - - -def datasource_rows_list(request, datasource_id, table_name): - """List all rows in a data source's data table, given by id.""" - client = congressclient(request) - datasource_rows_list = client.list_datasource_rows(datasource_id, - table_name) - results = datasource_rows_list['results'] - datasource_rows = [] - id = 0 - for row in results: - new_row = PolicyAPIDictWrapper(row) - new_row.set_id_if_empty(id) - id += 1 - datasource_rows.append(new_row) - return datasource_rows - - -def datasource_schema_get(request, datasource_id): - """Get the schema for all tables in the given data source.""" - client = congressclient(request) - return client.show_datasource_schema(datasource_id) - - -def datasource_table_schema_get(request, datasource_id, table_name): - """Get the schema for a data source table.""" - client = congressclient(request) - return client.show_datasource_table_schema(datasource_id, table_name) - - -def datasource_table_schema_get_by_name(request, datasource_name, table_name): - """Get the schema for a data source table.""" - datasource = datasource_get_by_name(request, datasource_name) - client = congressclient(request) - return client.show_datasource_table_schema(datasource['id'], table_name) - - -def datasource_statuses_list(request): - client = congressclient(request) - datasources_list = client.list_datasources() - datasources = datasources_list['results'] - ds_status = [] - - for ds in datasources: - try: - status = client.list_datasource_status(ds['id']) - except Exception: - LOG.exception("Exception while getting the status") - raise - wrapper = PolicyAPIDictWrapper(ds) - wrapper.set_value('service', ds['name']) - for key in status: - value = status[key] - wrapper.set_value(key, value) - ds_status.append(wrapper) - return ds_status diff --git a/congress_dashboard/datasources/__init__.py b/congress_dashboard/datasources/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_dashboard/datasources/panel.py b/congress_dashboard/datasources/panel.py deleted file mode 100644 index 8f3e06cd..00000000 --- a/congress_dashboard/datasources/panel.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.utils.translation import ugettext_lazy as _ -import horizon -from openstack_dashboard.dashboards.admin import dashboard - - -class DataSources(horizon.Panel): - name = _("Data Sources") - slug = "datasources" - permissions = ('openstack.roles.admin',) - - -dashboard.Admin.register(DataSources) diff --git a/congress_dashboard/datasources/tables.py b/congress_dashboard/datasources/tables.py deleted file mode 100644 index 50d09862..00000000 --- a/congress_dashboard/datasources/tables.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.core.urlresolvers import reverse -from django.template.defaultfilters import unordered_list -from django.utils.translation import ugettext_lazy as _ -from horizon import tables - - -def get_resource_url(obj): - return reverse('horizon:admin:datasources:datasource_table_detail', - args=(obj['datasource_id'], obj['table_id'])) - - -class DataSourcesTablesTable(tables.DataTable): - name = tables.Column("name", verbose_name=_("Table Name"), - link=get_resource_url) - datasource_name = tables.Column("datasource_name", - verbose_name=_("Service")) - datasource_driver = tables.Column("datasource_driver", - verbose_name=_("Driver")) - - class Meta(object): - name = "datasources_tables" - verbose_name = _("Service Data") - hidden_title = False - - -def get_policy_link(datum): - return reverse('horizon:admin:policies:detail', - args=(datum['policy_name'],)) - - -def get_policy_table_link(datum): - return reverse('horizon:admin:datasources:policy_table_detail', - args=(datum['policy_name'], datum['name'])) - - -class DataSourceRowsTable(tables.DataTable): - class Meta(object): - name = "datasource_rows" - verbose_name = _("Rows") - hidden_title = False - - -class DataSourceStatusesTable(tables.DataTable): - datasource_name = tables.Column("service", - verbose_name=_("Service")) - last_updated = tables.Column("last_updated", - verbose_name=_("Last Updated")) - subscriptions = tables.Column("subscriptions", - verbose_name=_("Subscriptions"), - wrap_list=True, filters=(unordered_list,)) - last_error = tables.Column("last_error", verbose_name=_("Last Error")) - subscribers = tables.Column("subscribers", verbose_name=_("Subscribers"), - wrap_list=True, filters=(unordered_list,)) - initialized = tables.Column("initialized", verbose_name=_("Initialized")) - number_of_updates = tables.Column("number_of_updates", - verbose_name=_("Number of Updates")) - - class Meta(object): - name = "service_status" - verbose_name = _("Service Status") - hidden_title = False diff --git a/congress_dashboard/datasources/templates/datasources/_detail_overview.html b/congress_dashboard/datasources/templates/datasources/_detail_overview.html deleted file mode 100644 index 70cdfa71..00000000 --- a/congress_dashboard/datasources/templates/datasources/_detail_overview.html +++ /dev/null @@ -1,14 +0,0 @@ -{% load i18n %} - -

{% trans "Table Overview" %}

- -
-
-
{{ datasource_type }} {% trans "Data Source" %}
-
{{ datasource_name }}
-
{% trans "Name" %}
-
{{ table_name }}
-
{% trans "ID" %}
-
{{ id|default:table_name }}
-
-
diff --git a/congress_dashboard/datasources/templates/datasources/detail.html b/congress_dashboard/datasources/templates/datasources/detail.html deleted file mode 100644 index d0196adf..00000000 --- a/congress_dashboard/datasources/templates/datasources/detail.html +++ /dev/null @@ -1,14 +0,0 @@ -{% extends 'base.html' %} -{% load i18n %} -{% block title %}{% trans "Data Source Table Details" %}{% endblock %} - -{% block page_header %} - {% include "horizon/common/_page_header.html" with title=_("Data Source Table Details: ")|add:table_name %} -{% endblock page_header %} - -{% block main %} - {% include "admin/datasources/_detail_overview.html" %} -
- {{ datasource_rows_table.render }} -
-{% endblock %} diff --git a/congress_dashboard/datasources/templates/datasources/index.html b/congress_dashboard/datasources/templates/datasources/index.html deleted file mode 100644 index ad153b16..00000000 --- a/congress_dashboard/datasources/templates/datasources/index.html +++ /dev/null @@ -1,19 +0,0 @@ -{% extends 'base.html' %} -{% load i18n %} -{% block title %}{% trans "Data Sources" %}{% endblock %} - -{% block page_header %} - {% include "horizon/common/_page_header.html" with title=_("Data Sources") %} -{% endblock page_header %} - -{% block main %} -
- {{ policies_tables_table.render }} -
-
- {{ service_status_table.render }} -
-
- {{ datasources_tables_table.render }} -
-{% endblock %} diff --git a/congress_dashboard/datasources/urls.py b/congress_dashboard/datasources/urls.py deleted file mode 100644 index 045cb34c..00000000 --- a/congress_dashboard/datasources/urls.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.conf.urls import patterns -from django.conf.urls import url - -from congress_dashboard.datasources import views - - -SERVICES = ( - r'^services/(?P[^/]+)/(?P[^/]+)/%s$') - - -urlpatterns = patterns( - '', - url(r'^$', views.IndexView.as_view(), name='index'), - url(SERVICES % 'detail', views.DetailView.as_view(), - name='datasource_table_detail'), -) diff --git a/congress_dashboard/datasources/utils.py b/congress_dashboard/datasources/utils.py deleted file mode 100644 index 23915c15..00000000 --- a/congress_dashboard/datasources/utils.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2015 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from congress_dashboard.api import congress - - -LOG = logging.getLogger(__name__) - - -def _get_policy_tables(request): - # Return all policy tables. - all_tables = [] - try: - # Get all the policies. - policies = congress.policies_list(request) - except Exception as e: - LOG.error('Unable to get list of policies: %s', str(e)) - else: - try: - for policy in policies: - # Get all the tables in this policy. - policy_name = policy['name'] - policy_tables = congress.policy_tables_list(request, - policy_name) - # Get the names of the tables. - datasource_tables = [] - for table in policy_tables: - table.set_id_as_name_if_empty() - table_name = table['name'] - # Exclude service-derived tables. - if congress.TABLE_SEPARATOR not in table_name: - datasource_tables.append(table['name']) - - all_tables.append({'datasource': policy_name, - 'tables': datasource_tables}) - except Exception as e: - LOG.error('Unable to get tables for policy "%s": %s', - policy_name, str(e)) - return all_tables - - -def _get_service_tables(request): - # Return all service tables. - all_tables = [] - try: - # Get all the services. - services = congress.datasources_list(request) - except Exception as e: - LOG.error('Unable to get list of data sources: %s', str(e)) - else: - try: - for service in services: - # Get all the tables in this service. - service_id = service['id'] - service_tables = congress.datasource_tables_list(request, - service_id) - # Get the names of the tables. - datasource_tables = [] - for table in service_tables: - table.set_id_as_name_if_empty() - datasource_tables.append(table['name']) - - all_tables.append({'datasource': service['name'], - 'tables': datasource_tables}) - except Exception as e: - LOG.error('Unable to get tables for data source "%s": %s', - service_id, str(e)) - return all_tables - - -def get_datasource_tables(request): - """Get names of all data source tables. - - Example: - [ - { - 'datasource': 'classification', - 'tables': ['error'] - }, - { - 'datasource': 'neutronv2' - 'tables': ['networks', 'ports', ...] - }, - ... - ] - """ - tables = _get_policy_tables(request) - tables.extend(_get_service_tables(request)) - return tables - - -def get_datasource_columns(request): - """Get of names of columns from all data sources. - - Example: - [ - { - 'datasource': 'classification', - 'tables': [ - { - 'table': 'error', - 'columns': ['name'] - } - ] - }, - { - 'datasource': 'neutronv2', - 'tables': [ - { - 'table': 'networks', - 'columns': ['id', 'tenant_id', ...], - }, - ... - ], - ... - }, - ... - ] - """ - all_columns = [] - - # Get all the policy tables. - policy_tables = _get_policy_tables(request) - try: - for policy in policy_tables: - # Get all the columns in this policy. Unlike for the services, - # there's currently no congress client API to get the schema for - # all tables in a policy in a single call. - policy_name = policy['datasource'] - tables = policy['tables'] - - datasource_tables = [] - for table_name in tables: - # Get all the columns in this policy table. - schema = congress.policy_table_schema_get(request, policy_name, - table_name) - columns = [c['name'] for c in schema['columns']] - datasource_tables.append({'table': table_name, - 'columns': columns}) - - all_columns.append({'datasource': policy_name, - 'tables': datasource_tables}) - except Exception as e: - LOG.error('Unable to get schema for policy "%s" table "%s": %s', - policy_name, table_name, str(e)) - - try: - # Get all the services. - services = congress.datasources_list(request) - except Exception as e: - LOG.error('Unable to get list of data sources: %s', str(e)) - else: - try: - for service in services: - # Get the schema for this service. - service_id = service['id'] - service_name = service['name'] - schema = congress.datasource_schema_get(request, service_id) - - datasource_tables = [] - for table in schema['tables']: - # Get the columns for this table. - columns = [c['name'] for c in table['columns']] - datasource_table = {'table': table['table_id'], - 'columns': columns} - datasource_tables.append(datasource_table) - - all_columns.append({'datasource': service_name, - 'tables': datasource_tables}) - except Exception as e: - LOG.error('Unable to get schema for data source "%s": %s', - service_id, str(e)) - - return all_columns diff --git a/congress_dashboard/datasources/views.py b/congress_dashboard/datasources/views.py deleted file mode 100644 index 7074a8dd..00000000 --- a/congress_dashboard/datasources/views.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging - -from django.core.urlresolvers import reverse -from django.template.defaultfilters import slugify -from django.utils.translation import ugettext_lazy as _ -from horizon import exceptions -from horizon import messages -from horizon import tables - -from congress_dashboard.api import congress -from congress_dashboard.datasources import tables as datasources_tables - - -logger = logging.getLogger(__name__) - - -class IndexView(tables.MultiTableView): - """List service and policy defined data.""" - table_classes = (datasources_tables.DataSourcesTablesTable, - datasources_tables.DataSourceStatusesTable,) - template_name = 'admin/datasources/index.html' - - def get_datasources_tables_data(self): - try: - datasources = congress.datasources_list(self.request) - except Exception as e: - msg = _('Unable to get services list: %s') % str(e) - messages.error(self.request, msg) - return [] - - ds_temp = [] - for ds in datasources: - ds_id = ds['id'] - try: - ds_tables = congress.datasource_tables_list(self.request, - ds_id) - except Exception as e: - msg_args = {'ds_id': ds_id, 'error': str(e)} - msg = _('Unable to get tables list for service "%(ds_id)s": ' - '%(error)s') % msg_args - messages.error(self.request, msg) - return [] - - for table in ds_tables: - table.set_value('datasource_id', ds_id) - table.set_value('datasource_name', ds['name']) - table.set_value('datasource_driver', ds['driver']) - table.set_id_as_name_if_empty() - # Object ids within a Horizon table must be unique. Otherwise, - # Horizon will cache the column values for the object by id and - # use the same column values for all rows with the same id. - table.set_value('table_id', table['id']) - table.set_value('id', '%s-%s' % (ds_id, table['table_id'])) - ds_temp.append(table) - - logger.debug("ds_temp %s" % ds_temp) - return ds_temp - - def get_service_status_data(self): - ds = [] - try: - ds = congress.datasource_statuses_list(self.request) - logger.debug("ds status : %s " % ds) - except Exception as e: - msg = _('Unable to get datasource status list: %s') % str(e) - messages.error(self.request, msg) - return ds - - -class DetailView(tables.DataTableView): - """List details about and rows from a data source (service or policy).""" - table_class = datasources_tables.DataSourceRowsTable - template_name = 'admin/datasources/detail.html' - - def get_data(self): - datasource_id = self.kwargs['datasource_id'] - table_name = self.kwargs.get('policy_table_name') - is_service = False - - try: - if table_name: - # Policy data table. - rows = congress.policy_rows_list(self.request, datasource_id, - table_name) - if congress.TABLE_SEPARATOR in table_name: - table_name_parts = table_name.split( - congress.TABLE_SEPARATOR) - maybe_datasource_name = table_name_parts[0] - datasources = congress.datasources_list(self.request) - for datasource in datasources: - if datasource['name'] == maybe_datasource_name: - # Serivce-derived policy data table. - is_service = True - datasource_id = datasource['id'] - table_name = table_name_parts[1] - break - else: - # Service data table. - is_service = True - datasource = congress.datasource_get_by_name( - self.request, datasource_id) - table_name = self.kwargs['service_table_name'] - rows = congress.datasource_rows_list( - self.request, datasource_id, table_name) - except Exception as e: - msg_args = { - 'table_name': table_name, - 'ds_id': datasource_id, - 'error': str(e) - } - msg = _('Unable to get rows in table "%(table_name)s", data ' - 'source "%(ds_id)s": %(error)s') % msg_args - messages.error(self.request, msg) - redirect = reverse('horizon:admin:datasources:index') - raise exceptions.Http302(redirect) - - # Normally, in Horizon, the columns for a table are defined as - # attributes of the Table class. When the class is instantiated, - # the columns are processed during the metaclass initialization. To - # add columns dynamically, re-create the class from the metaclass - # with the added columns, re-create the Table from the new class, - # then reassign the Table stored in this View. - column_names = [] - table_class_attrs = copy.deepcopy(dict(self.table_class.__dict__)) - # Get schema from the server. - try: - if is_service: - schema = congress.datasource_table_schema_get( - self.request, datasource_id, table_name) - else: - schema = congress.policy_table_schema_get( - self.request, datasource_id, table_name) - except Exception as e: - msg_args = { - 'table_name': table_name, - 'ds_id': datasource_id, - 'error': str(e) - } - msg = _('Unable to get schema for table "%(table_name)s", ' - 'data source "%(ds_id)s": %(error)s') % msg_args - messages.error(self.request, msg) - redirect = reverse('horizon:admin:datasources:index') - raise exceptions.Http302(redirect) - - columns = schema['columns'] - row_len = 0 - if len(rows): - row_len = len(rows[0].get('data', [])) - - if not row_len or row_len == len(columns): - for col in columns: - col_name = col['name'] - # Attribute name for column in the class must be a valid - # identifier. Slugify it. - col_slug = slugify(col_name) - column_names.append(col_slug) - table_class_attrs[col_slug] = tables.Column( - col_slug, verbose_name=col_name) - else: - # There could be another table with the same name and different - # arity. Divide the rows into unnamed columns. Number them for - # internal reference. - for i in xrange(0, row_len): - col_name = str(i) - column_names.append(col_name) - table_class_attrs[col_name] = tables.Column( - col_name, verbose_name='') - - # Class and object re-creation, using a new class name, the same base - # classes, and the new class attributes, which now includes columns. - columnized_table_class_name = '%s%sRows' % ( - slugify(datasource_id).title(), slugify(table_name).title()) - columnized_table_class = tables.base.DataTableMetaclass( - str(columnized_table_class_name), self.table_class.__bases__, - table_class_attrs) - - self.table_class = columnized_table_class - columnized_table = columnized_table_class(self.request, **self.kwargs) - self._tables[columnized_table_class._meta.name] = columnized_table - - # Map columns names to row values. - num_cols = len(column_names) - for row in rows: - try: - row_data = row['data'] - row.delete_by_key('data') - for i in xrange(0, num_cols): - row.set_value(column_names[i], row_data[i]) - except Exception as e: - msg_args = { - 'table_name': table_name, - 'ds_id': datasource_id, - 'error': str(e) - } - msg = _('Unable to get data for table "%(table_name)s", data ' - 'source "%(ds_id)s": %(error)s') % msg_args - messages.error(self.request, msg) - redirect = reverse('horizon:admin:datasources:index') - raise exceptions.Http302(redirect) - - return rows - - def get_context_data(self, **kwargs): - context = super(DetailView, self).get_context_data(**kwargs) - if 'policy_table_name' in kwargs: - table_name = kwargs.get('policy_table_name') - context['datasource_type'] = _('Policy') - datasource_name = kwargs['datasource_id'] - else: - table_name = kwargs['service_table_name'] - context['datasource_type'] = _('Service') - try: - datasource_id = kwargs['datasource_id'] - datasource = congress.datasource_get(self.request, - datasource_id) - datasource_name = datasource['name'] - except Exception as e: - datasource_name = datasource_id - logger.info('Failed to get data source "%s": %s' % - (datasource_id, str(e))) - context['datasource_name'] = datasource_name - context['table_name'] = table_name - return context diff --git a/congress_dashboard/enabled/_50_policy.py b/congress_dashboard/enabled/_50_policy.py deleted file mode 100644 index 57243af3..00000000 --- a/congress_dashboard/enabled/_50_policy.py +++ /dev/null @@ -1,3 +0,0 @@ -PANEL_GROUP = 'policy' -PANEL_GROUP_NAME = 'Policy' -PANEL_GROUP_DASHBOARD = 'admin' diff --git a/congress_dashboard/enabled/_60_policies.py b/congress_dashboard/enabled/_60_policies.py deleted file mode 100644 index dad327a2..00000000 --- a/congress_dashboard/enabled/_60_policies.py +++ /dev/null @@ -1,9 +0,0 @@ -PANEL = 'policies' -PANEL_DASHBOARD = 'admin' -PANEL_GROUP = 'policy' -ADD_PANEL = 'congress_dashboard.policies.panel.Policies' -ADD_INSTALLED_APPS = [ - 'congress_dashboard', -] -AUTO_DISCOVER_STATIC_FILES = True -ADD_SCSS_FILES = ['congress_dashboard/static/admin/css/policies.css'] diff --git a/congress_dashboard/enabled/_70_datasources.py b/congress_dashboard/enabled/_70_datasources.py deleted file mode 100644 index 6c92105e..00000000 --- a/congress_dashboard/enabled/_70_datasources.py +++ /dev/null @@ -1,5 +0,0 @@ -PANEL = 'datasources' -PANEL_DASHBOARD = 'admin' -PANEL_GROUP = 'policy' -ADD_PANEL = 'congress_dashboard.datasources.panel.DataSources' -AUTO_DISCOVER_STATIC_FILES = True diff --git a/congress_dashboard/policies/__init__.py b/congress_dashboard/policies/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_dashboard/policies/forms.py b/congress_dashboard/policies/forms.py deleted file mode 100644 index a3e8a73d..00000000 --- a/congress_dashboard/policies/forms.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2015 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from django.core.urlresolvers import reverse -from django.utils.translation import ugettext_lazy as _ -from horizon import exceptions -from horizon import forms -from horizon import messages - -from congress_dashboard.api import congress - - -LOG = logging.getLogger(__name__) - -POLICY_KIND_CHOICES = ( - ('nonrecursive', _('Nonrecursive')), - ('action', _('Action')), - ('database', _('Database')), - ('materialized', _('Materialized')), -) - - -class CreatePolicy(forms.SelfHandlingForm): - name = forms.CharField(max_length=255, label=_("Policy Name")) - kind = forms.ChoiceField(choices=POLICY_KIND_CHOICES, label=_("Kind"), - initial='nonrecursive') - description = forms.CharField(label=_("Description"), required=False, - widget=forms.Textarea(attrs={'rows': 4})) - failure_url = 'horizon:admin:policies:index' - - def handle(self, request, data): - policy_name = data['name'] - policy_description = data.get('description') - policy_kind = data.pop('kind') - LOG.info('User %s creating policy "%s" of type %s in tenant %s', - request.user.username, policy_name, policy_kind, - request.user.tenant_name) - try: - params = { - 'name': policy_name, - 'description': policy_description, - 'kind': policy_kind, - } - policy = congress.policy_create(request, params) - msg = _('Created policy "%s"') % policy_name - LOG.info(msg) - messages.success(request, msg) - except Exception as e: - msg_args = {'policy_name': policy_name, 'error': str(e)} - msg = _('Failed to create policy "%(policy_name)s": ' - '%(error)s') % msg_args - LOG.error(msg) - messages.error(self.request, msg) - redirect = reverse(self.failure_url) - raise exceptions.Http302(redirect) - return policy diff --git a/congress_dashboard/policies/panel.py b/congress_dashboard/policies/panel.py deleted file mode 100644 index f95a61ec..00000000 --- a/congress_dashboard/policies/panel.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.utils.translation import ugettext_lazy as _ -import horizon -from openstack_dashboard.dashboards.admin import dashboard - - -class Policies(horizon.Panel): - name = _("Policies") - slug = "policies" - permissions = ('openstack.roles.admin',) - - -dashboard.Admin.register(Policies) diff --git a/congress_dashboard/policies/rules/__init__.py b/congress_dashboard/policies/rules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_dashboard/policies/rules/tables.py b/congress_dashboard/policies/rules/tables.py deleted file mode 100644 index ebae2bdd..00000000 --- a/congress_dashboard/policies/rules/tables.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from django.core.urlresolvers import reverse -from django.template.defaultfilters import linebreaksbr -from django.utils.translation import ugettext_lazy as _ -from django.utils.translation import ungettext_lazy -from horizon import exceptions -from horizon import messages -from horizon import tables -from openstack_dashboard import policy - -from congress_dashboard.api import congress - - -LOG = logging.getLogger(__name__) - - -class CreateRule(tables.LinkAction): - name = 'create_rule' - verbose_name = _('Create Rule') - url = 'horizon:admin:policies:create_rule' - classes = ('ajax-modal',) - icon = 'plus' - policy_rules = (('policy', 'create_rule'),) - - def get_link_url(self, datum=None): - policy_name = self.table.kwargs['policy_name'] - return reverse(self.url, args=(policy_name,)) - - -class DeleteRule(policy.PolicyTargetMixin, tables.DeleteAction): - @staticmethod - def action_present(count): - return ungettext_lazy( - u'Delete Rule', - u'Delete Rules', - count - ) - - @staticmethod - def action_past(count): - return ungettext_lazy( - u'Deleted rule', - u'Deleted rules', - count - ) - - redirect_url = 'horizon:admin:policies:detail' - - def delete(self, request, obj_id): - policy_name = self.table.kwargs['policy_name'] - LOG.info('User %s deleting policy "%s" rule "%s" in tenant %s', - request.user.username, policy_name, obj_id, - request.user.tenant_name) - try: - congress.policy_rule_delete(request, policy_name, obj_id) - LOG.info('Deleted policy rule "%s"', obj_id) - except Exception as e: - msg_args = {'rule_id': obj_id, 'error': str(e)} - msg = _('Failed to delete policy rule "%(rule_id)s": ' - '%(error)s') % msg_args - LOG.error(msg) - messages.error(request, msg) - redirect = reverse(self.redirect_url, args=(policy_name,)) - raise exceptions.Http302(redirect) - - -def _format_rule(rule): - """Make rule's text more human readable.""" - head_body = rule.split(congress.RULE_SEPARATOR) - if len(head_body) < 2: - return rule - head = head_body[0] - body = head_body[1] - - # Add newline after each literal in the body. - body_literals = body.split(congress.LITERALS_SEPARATOR) - literals_break = congress.LITERALS_SEPARATOR + '\n' - new_body = literals_break.join(body_literals) - - # Add newline after the head. - rules_break = congress.RULE_SEPARATOR + '\n' - return rules_break.join([head, new_body]) - - -class PolicyRulesTable(tables.DataTable): - id = tables.Column("id", verbose_name=_("Rule ID")) - name = tables.Column("name", verbose_name=_("Name")) - comment = tables.Column("comment", verbose_name=_("Comment")) - rule = tables.Column("rule", verbose_name=_("Rule"), - filters=(_format_rule, linebreaksbr,)) - - class Meta(object): - name = "policy_rules" - verbose_name = _("Rules") - table_actions = (CreateRule, DeleteRule,) - row_actions = (DeleteRule,) - hidden_title = False - - -def get_policy_table_link(datum): - return reverse('horizon:admin:policies:policy_table_detail', - args=(datum['policy_name'], datum['name'])) - - -class PoliciesTablesTable(tables.DataTable): - name = tables.Column("name", verbose_name=_("Table Name"), - link=get_policy_table_link) - - class Meta(object): - name = "policies_tables" - verbose_name = _("Policy Table Data") - hidden_title = False diff --git a/congress_dashboard/policies/rules/views.py b/congress_dashboard/policies/rules/views.py deleted file mode 100644 index f3be6d29..00000000 --- a/congress_dashboard/policies/rules/views.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2015 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.core.urlresolvers import reverse -from horizon import workflows - -from congress_dashboard.policies.rules import workflows as rule_workflows - - -class CreateView(workflows.WorkflowView): - workflow_class = rule_workflows.CreateRule - ajax_template_name = 'admin/policies/rules/create.html' - success_url = 'horizon:admin:policies:detail' - - def get_success_url(self): - return reverse(self.success_url, - args=(self.kwargs['policy_name'],)) - - def get_initial(self): - return {'policy_name': self.kwargs['policy_name']} diff --git a/congress_dashboard/policies/rules/workflows.py b/congress_dashboard/policies/rules/workflows.py deleted file mode 100644 index bcc4f4ee..00000000 --- a/congress_dashboard/policies/rules/workflows.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright 2015 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import re - -from django.core.urlresolvers import reverse -from django import template -from django.utils.text import slugify -from django.utils.translation import ugettext_lazy as _ -from horizon import forms -from horizon import workflows -import six - -from congress_dashboard.api import congress - - -COLUMN_FORMAT = '%s
' % congress.TABLE_SEPARATOR -COLUMN_PATTERN = r'\s*[\w.]+%s[\w.]+\s+[\w.]+\s*$' % congress.TABLE_SEPARATOR -COLUMN_PATTERN_ERROR = 'Column name must be in "%s" format' % COLUMN_FORMAT - -TABLE_FORMAT = '%s
' % congress.TABLE_SEPARATOR -TABLE_PATTERN = r'\s*[\w.]+%s[\w.]+\s*$' % congress.TABLE_SEPARATOR -TABLE_PATTERN_ERROR = 'Table name must be in "%s" format' % TABLE_FORMAT - -LOG = logging.getLogger(__name__) - - -class CreateOutputAction(workflows.Action): - policy_name = forms.CharField(widget=forms.HiddenInput(), required=False) - rule_name = forms.CharField(label=_('Rule Name'), max_length=255, - initial='', required=False) - comment = forms.CharField(label=_('Rule Comment'), initial='', - required=False) - policy_table = forms.CharField(label=_("Policy Table Name"), initial='', - max_length=255) - policy_columns = forms.CharField( - label=_('Policy Table Columns'), initial='', - help_text=_('Name the columns in the output table, one per textbox.')) - failure_url = 'horizon:admin:policies:detail' - - def __init__(self, request, context, *args, **kwargs): - super(CreateOutputAction, self).__init__(request, context, *args, - **kwargs) - self.fields['policy_name'].initial = context['policy_name'] - - class Meta(object): - name = _('Output') - - -class CreateOutput(workflows.Step): - action_class = CreateOutputAction - contributes = ('policy_name', 'rule_name', 'comment', 'policy_table', - 'policy_columns') - template_name = 'admin/policies/rules/_create_output.html' - help_text = _('Information about the rule and the policy table ' - 'being created.') - - def render(self): - # Overriding parent method to add extra template context variables. - step_template = template.loader.get_template(self.template_name) - extra_context = {"form": self.action, - "step": self} - context = template.RequestContext(self.workflow.request, extra_context) - - # Data needed to re-create policy column inputs after an error occurs. - policy_columns = self.workflow.request.POST.get('policy_columns', '') - columns_list = policy_columns.split(', ') - context['policy_columns_list'] = columns_list - context['policy_columns_count'] = len(columns_list) - return step_template.render(context) - - -class CreateConditionsAction(workflows.Action): - mappings = forms.CharField(label=_('Policy table columns:'), initial='') - - class Meta(object): - name = _('Conditions') - - -class CreateConditions(workflows.Step): - action_class = CreateConditionsAction - contributes = ('mappings',) - template_name = 'admin/policies/rules/_create_conditions.html' - help_text = _('Sources from which the output policy table will get its ' - 'data, plus any constraints.') - - def _compare_mapping_columns(self, x, y): - # x = "mapping_column_", y = "mapping_column_" - return cmp(int(x.split('_')[-1]), int(y.split('_')[-1])) - - def render(self): - # Overriding parent method to add extra template context variables. - step_template = template.loader.get_template(self.template_name) - extra_context = {"form": self.action, - "step": self} - context = template.RequestContext(self.workflow.request, extra_context) - - # Data needed to re-create mapping column inputs after an error occurs. - post = self.workflow.request.POST - mappings = [] - policy_columns = post.get('policy_columns') - policy_columns_list = [] - # Policy column to data source mappings. - if policy_columns: - policy_columns_list = policy_columns.split(', ') - mapping_columns = [] - for param, value in post.items(): - if (param.startswith('mapping_column_') and - param != 'mapping_column_0'): - mapping_columns.append(param) - - # Mapping columns should be in the same order as the policy columns - # above to which they match. - sorted_mapping_columns = sorted(mapping_columns, - cmp=self._compare_mapping_columns) - mapping_columns_list = [post.get(c) - for c in sorted_mapping_columns] - mappings = zip(policy_columns_list, mapping_columns_list) - context['mappings'] = mappings - # Add one for the hidden template row. - context['mappings_count'] = len(mappings) + 1 - - # Data needed to re-create join, negation, and alias inputs. - joins = [] - negations = [] - aliases = [] - for param, value in post.items(): - if param.startswith('join_left_') and value: - join_num = param.split('_')[-1] - other_value = post.get('join_right_%s' % join_num) - join_op = post.get('join_op_%s' % join_num) - if other_value and join_op is not None: - joins.append((value, join_op, other_value)) - elif param.startswith('negation_value_') and value: - negation_num = param.split('_')[-1] - negation_column = post.get('negation_column_%s' % - negation_num) - if negation_column: - negations.append((value, negation_column)) - elif param.startswith('alias_column_') and value: - alias_num = param.split('_')[-1] - alias_name = post.get('alias_name_%s' % alias_num) - if alias_name: - aliases.append((value, alias_name)) - - # Make sure there's at least one empty row. - context['joins'] = joins or [('', '')] - context['joins_count'] = len(joins) or 1 - context['negations'] = negations or [('', '')] - context['negations_count'] = len(negations) or 1 - context['aliases'] = aliases or [('', '')] - context['aliases_count'] = len(aliases) or 1 - - # Input validation attributes. - context['column_pattern'] = COLUMN_PATTERN - context['column_pattern_error'] = COLUMN_PATTERN_ERROR - context['table_pattern'] = TABLE_PATTERN - context['table_pattern_error'] = TABLE_PATTERN_ERROR - return step_template.render(context) - - -def _underscore_slugify(name): - # Slugify given string, except using undesrscores instead of hyphens. - return slugify(name).replace('-', '_') - - -class CreateRule(workflows.Workflow): - slug = 'create_rule' - name = _('Create Rule') - finalize_button_name = _('Create') - success_message = _('Created rule%(rule_name)s.%(error)s') - failure_message = _('Unable to create rule%(rule_name)s: %(error)s') - default_steps = (CreateOutput, CreateConditions) - wizard = True - - def get_success_url(self): - policy_name = self.context.get('policy_name') - return reverse('horizon:admin:policies:detail', args=(policy_name,)) - - def get_failure_url(self): - policy_name = self.context.get('policy_name') - return reverse('horizon:admin:policies:detail', args=(policy_name,)) - - def format_status_message(self, message): - rule_name = self.context.get('rule_name') - name_str = '' - if rule_name: - name_str = ' "%s"' % rule_name - else: - rule_id = self.context.get('rule_id') - if rule_id: - name_str = ' %s' % rule_id - return message % {'rule_name': name_str, - 'error': self.context.get('error', '')} - - def _get_schema_columns(self, request, table): - table_parts = table.split(congress.TABLE_SEPARATOR) - datasource = table_parts[0] - table_name = table_parts[1] - try: - schema = congress.datasource_table_schema_get_by_name( - request, datasource, table_name) - except Exception: - # Maybe it's a policy table, not a service. - try: - schema = congress.policy_table_schema_get( - request, datasource, table_name) - except Exception as e: - # Nope. - LOG.error('Unable to get schema for table "%s", ' - 'datasource "%s": %s', - table_name, datasource, str(e)) - return str(e) - return schema['columns'] - - def handle(self, request, data): - policy_name = data['policy_name'] - username = request.user.username - project_name = request.user.tenant_name - - # Output data. - rule_name = data.get('rule_name') - comment = data.get('comment') - policy_table = _underscore_slugify(data['policy_table']) - if not data['policy_columns']: - self.context['error'] = 'Missing policy table columns' - return False - policy_columns = data['policy_columns'].split(', ') - - # Conditions data. - if not data['mappings']: - self.context['error'] = ('Missing data source column mappings for ' - 'policy table columns') - return False - mapping_columns = [c.strip() for c in data['mappings'].split(', ')] - if len(policy_columns) != len(mapping_columns): - self.context['error'] = ('Missing data source column mappings for ' - 'some policy table columns') - return False - # Map columns used in rule's head. Every column in the head must also - # appear in the body. - head_columns = [_underscore_slugify(c).strip() for c in policy_columns] - column_variables = dict(zip(mapping_columns, head_columns)) - - # All tables needed in the body. - body_tables = set() - negation_tables = set() - - # Keep track of the tables from the head that need to be in the body. - for column in mapping_columns: - if re.match(COLUMN_PATTERN, column) is None: - self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR, - column) - return False - table = column.split()[0] - body_tables.add(table) - - # Make sure columns that are given a significant variable name are - # unique names by adding name_count as a suffix. - name_count = 0 - for param, value in request.POST.items(): - if param.startswith('join_left_') and value: - if re.match(COLUMN_PATTERN, value) is None: - self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR, - value) - return False - value = value.strip() - - # Get operator and other column used in join. - join_num = param.split('_')[-1] - join_op = request.POST.get('join_op_%s' % join_num) - other_value = request.POST.get('join_right_%s' % join_num) - other_value = other_value.strip() - - if join_op == '=': - try: - # Check if static value is a number, but keep it as a - # string, to be used later. - int(other_value) - column_variables[value] = other_value - except ValueError: - # Pass it along as a quoted string. - column_variables[value] = '"%s"' % other_value - else: - # Join between two columns. - if not other_value: - # Ignore incomplete pairing. - continue - if re.match(COLUMN_PATTERN, other_value) is None: - self.context['error'] = ('%s: %s' % - (COLUMN_PATTERN_ERROR, - other_value)) - return False - - # Tables used in the join need to be in the body. - value_parts = value.split() - body_tables.add(value_parts[0]) - body_tables.add(other_value.split()[0]) - - # Arbitrarily name the right column the same as the left. - column_name = value_parts[1] - # Use existing variable name if there is already one for - # either column in this join. - if other_value in column_variables: - column_variables[value] = column_variables[other_value] - elif value in column_variables: - column_variables[other_value] = column_variables[value] - else: - variable = '%s_%s' % (column_name, name_count) - name_count += 1 - column_variables[value] = variable - column_variables[other_value] = variable - - elif param.startswith('negation_value_') and value: - if re.match(COLUMN_PATTERN, value) is None: - self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR, - value) - return False - value = value.strip() - - # Get operator and other column used in negation. - negation_num = param.split('_')[-1] - negation_column = request.POST.get('negation_column_%s' % - negation_num) - if not negation_column: - # Ignore incomplete pairing. - continue - if re.match(COLUMN_PATTERN, negation_column) is None: - self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR, - negation_column) - return False - negation_column = negation_column.strip() - - # Tables for columns referenced by the negation table must - # appear in the body. - value_parts = value.split() - body_tables.add(value_parts[0]) - - negation_tables.add(negation_column.split()[0]) - # Use existing variable name if there is already one for either - # column in this negation. - if negation_column in column_variables: - column_variables[value] = column_variables[negation_column] - elif value in column_variables: - column_variables[negation_column] = column_variables[value] - else: - # Arbitrarily name the negated table's column the same as - # the value column. - column_name = value_parts[1] - variable = '%s_%s' % (column_name, name_count) - name_count += 1 - column_variables[value] = variable - column_variables[negation_column] = variable - - LOG.debug('column_variables for rule: %s', column_variables) - - # Form the literals for all the tables needed in the body. Make sure - # column that have no relation to any other columns are given a unique - # variable name, using column_count. - column_count = 0 - literals = [] - for table in body_tables: - # Replace column names with variable names that join related - # columns together. - columns = self._get_schema_columns(request, table) - if isinstance(columns, six.string_types): - self.context['error'] = columns - return False - - literal_columns = [] - if columns: - for column in columns: - table_column = '%s %s' % (table, column['name']) - literal_columns.append( - column_variables.get(table_column, 'col_%s' % - column_count)) - column_count += 1 - literals.append('%s(%s)' % (table, ', '.join(literal_columns))) - else: - # Just the table name, such as for classification:true. - literals.append(table) - - # Form the negated tables. - for table in negation_tables: - columns = self._get_schema_columns(request, table) - if isinstance(columns, six.string_types): - self.context['error'] = columns - return False - - literal_columns = [] - num_variables = 0 - for column in columns: - table_column = '%s %s' % (table, column['name']) - if table_column in column_variables: - literal_columns.append(column_variables[table_column]) - num_variables += 1 - else: - literal_columns.append('col_%s' % column_count) - column_count += 1 - literal = 'not %s(%s)' % (table, ', '.join(literal_columns)) - literals.append(literal) - - # Every column in the negated table must appear in a non-negated - # literal in the body. If there are some variables that have not - # been used elsewhere, repeat the literal in its non-negated form. - if num_variables != len(columns) and table not in body_tables: - literals.append(literal.replace('not ', '')) - - # All together now. - rule = '%s(%s) %s %s' % (policy_table, ', '.join(head_columns), - congress.RULE_SEPARATOR, ', '.join(literals)) - LOG.info('User %s creating policy "%s" rule "%s" in tenant %s: %s', - username, policy_name, rule_name, project_name, rule) - try: - params = { - 'name': rule_name, - 'comment': comment, - 'rule': rule, - } - rule = congress.policy_rule_create(request, policy_name, - body=params) - LOG.info('Created rule %s', rule['id']) - self.context['rule_id'] = rule['id'] - except Exception as e: - LOG.error('Error creating policy "%s" rule "%s": %s', - policy_name, rule_name, str(e)) - self.context['error'] = str(e) - return False - return True diff --git a/congress_dashboard/policies/tables.py b/congress_dashboard/policies/tables.py deleted file mode 100644 index 3df38ba7..00000000 --- a/congress_dashboard/policies/tables.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from django.core.urlresolvers import reverse -from django.utils.translation import ugettext_lazy as _ -from django.utils.translation import ungettext_lazy -from horizon import exceptions -from horizon import messages -from horizon import tables -from openstack_dashboard import policy - -from congress_dashboard.api import congress - - -LOG = logging.getLogger(__name__) - - -def get_policy_link(datum): - return reverse('horizon:admin:policies:detail', args=(datum['name'],)) - - -class CreatePolicy(tables.LinkAction): - name = 'create_policy' - verbose_name = _('Create Policy') - url = 'horizon:admin:policies:create' - classes = ('ajax-modal',) - icon = 'plus' - - -class DeletePolicy(policy.PolicyTargetMixin, tables.DeleteAction): - @staticmethod - def action_present(count): - return ungettext_lazy( - u'Delete Policy', - u'Delete Policies', - count - ) - - @staticmethod - def action_past(count): - return ungettext_lazy( - u'Deleted policy', - u'Deleted policies', - count - ) - - redirect_url = 'horizon:admin:policies:index' - - def delete(self, request, obj_id): - LOG.info('User %s deleting policy "%s" in tenant %s', - request.user.username, obj_id, request.user.tenant_name) - try: - congress.policy_delete(request, obj_id) - LOG.info('Deleted policy "%s"', obj_id) - except Exception as e: - msg_args = {'policy_id': obj_id, 'error': str(e)} - msg = _('Failed to delete policy "%(policy_id)s": ' - '%(error)s') % msg_args - LOG.error(msg) - messages.error(request, msg) - redirect = reverse(self.redirect_url) - raise exceptions.Http302(redirect) - - def allowed(self, request, policy=None): - # Only user policies can be deleted. - if policy: - return policy['owner_id'] == 'user' - return True - - -class PoliciesTable(tables.DataTable): - name = tables.Column("name", verbose_name=_("Name"), link=get_policy_link) - description = tables.Column("description", verbose_name=_("Description")) - kind = tables.Column("kind", verbose_name=_("Kind")) - owner_id = tables.Column("owner_id", verbose_name=_("Owner ID")) - - class Meta(object): - name = "policies" - verbose_name = _("Policies") - table_actions = (CreatePolicy, DeletePolicy) - row_actions = (DeletePolicy,) diff --git a/congress_dashboard/policies/templates/policies/_create.html b/congress_dashboard/policies/templates/policies/_create.html deleted file mode 100644 index 2c2380f3..00000000 --- a/congress_dashboard/policies/templates/policies/_create.html +++ /dev/null @@ -1,22 +0,0 @@ -{% extends "horizon/common/_modal_form.html" %} -{% load i18n %} -{% load url from future %} - -{% block form_id %}create_policy_form{% endblock %} -{% block form_action %}{% url 'horizon:admin:policies:create' %}{% endblock %} - -{% block modal_id %}create_policy_modal{% endblock %} -{% block modal-header %}{% trans "Create Policy" %}{% endblock %} - -{% block modal-body %} -
-
- {% include "horizon/common/_form_fields.html" %} -
-
-{% endblock %} - -{% block modal-footer %} - - {% trans "Cancel" %} -{% endblock %} diff --git a/congress_dashboard/policies/templates/policies/_detail_overview.html b/congress_dashboard/policies/templates/policies/_detail_overview.html deleted file mode 100644 index 1c3fb3f3..00000000 --- a/congress_dashboard/policies/templates/policies/_detail_overview.html +++ /dev/null @@ -1,18 +0,0 @@ -{% load i18n %} - -

{% trans "Policy Overview" %}

- -
-
-
{% trans "Policy Name" %}
-
{{ policy.name|default:policy.id }}
-
{% trans "Policy ID" %}
-
{{ policy.id }}
-
{% trans "Description" %}
-
{{ policy.description }}
-
{% trans "Kind" %}
-
{{ policy.kind }}
-
{% trans "Owner ID" %}
-
{{ policy.owner_id|default:"-" }}
-
-
diff --git a/congress_dashboard/policies/templates/policies/create.html b/congress_dashboard/policies/templates/policies/create.html deleted file mode 100644 index 53cd6227..00000000 --- a/congress_dashboard/policies/templates/policies/create.html +++ /dev/null @@ -1,11 +0,0 @@ -{% extends 'base.html' %} -{% load i18n %} -{% block title %}{% trans "Create Policy" %}{% endblock %} - -{% block page_header %} - {% include "horizon/common/_page_header.html" with title=_("Create Policy") %} -{% endblock page_header %} - -{% block main %} - {% include "admin/policies/_create.html" %} -{% endblock %} diff --git a/congress_dashboard/policies/templates/policies/detail.html b/congress_dashboard/policies/templates/policies/detail.html deleted file mode 100644 index 095bd7fe..00000000 --- a/congress_dashboard/policies/templates/policies/detail.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends 'base.html' %} -{% load i18n %} -{% block title %}{% trans "Policy Details" %}{% endblock %} - -{% block page_header %} - {% include "horizon/common/_page_header.html" with title=_("Policy Details: ")|add:policy.name %} -{% endblock page_header %} - -{% block main %} - {% include "admin/policies/_detail_overview.html" %} -
-
- {{ policy_rules_table.render }} -
-
- {{ policies_tables_table.render }} -
- - -{% endblock %} diff --git a/congress_dashboard/policies/templates/policies/index.html b/congress_dashboard/policies/templates/policies/index.html deleted file mode 100644 index b4b6be80..00000000 --- a/congress_dashboard/policies/templates/policies/index.html +++ /dev/null @@ -1,13 +0,0 @@ -{% extends 'base.html' %} -{% load i18n %} -{% block title %}{% trans "Policies" %}{% endblock %} - -{% block page_header %} - {% include "horizon/common/_page_header.html" with title=_("Policies") %} -{% endblock page_header %} - -{% block main %} -
- {{ policies_table.render }} -
-{% endblock %} diff --git a/congress_dashboard/policies/templates/policies/rules/_create_conditions.html b/congress_dashboard/policies/templates/policies/rules/_create_conditions.html deleted file mode 100644 index 997b5561..00000000 --- a/congress_dashboard/policies/templates/policies/rules/_create_conditions.html +++ /dev/null @@ -1,174 +0,0 @@ - -{{ step.get_help_text }} -{% include 'horizon/common/_form_errors.html' with form=form %} -
-
-
- - - - {% include 'admin/policies/rules/_mapping_row.html' with form=form count=0 column='' value='' %} - {% for column, value in mappings %}{% include 'admin/policies/rules/_mapping_row.html' with form=form count=forloop.counter column=column value=value %}{% endfor %} - - - -
 
- {% for error in form.mappings.errors %} - {{ error }} - {% endfor %} -
- - - - - {% for left, op, right in joins %} - - - - {% endfor %} -
-
- -
-
-
- - -
-
- - -
- & - - - - - {% for value, column in negations %} - - - - - {% endfor %} -
-
- - -
- -
-
- - -
-
value is in -
- - -
-
- -
- & -{% comment %} - - - - - {% for column, name in aliases %} - - - - - - {% endfor %} -
-
- -
-
- {% if forloop.first %}
- - -
{% endif %} -
-
- - -
-
as - - - -
- + -{% endcomment %} - - - - diff --git a/congress_dashboard/policies/templates/policies/rules/_create_output.html b/congress_dashboard/policies/templates/policies/rules/_create_output.html deleted file mode 100644 index d4914c99..00000000 --- a/congress_dashboard/policies/templates/policies/rules/_create_output.html +++ /dev/null @@ -1,65 +0,0 @@ - -{{ step.get_help_text }} -{% include 'horizon/common/_form_errors.html' with form=form %} -
-
-
- - {% if form.rule_name.help_text %} - - {% endif %} - - {% for error in form.rule_name.errors %} - {{ error }} - {% endfor %} -
- -
- - {% if form.comment.help_text %} - - {% endif %} - - {% for error in form.comment.errors %} - {{ error }} - {% endfor %} -
- -
- - {% if form.policy_table.help_text %} - - {% endif %} - - {% for error in form.policy_table.errors %} - {{ error }} - {% endfor %} -
- -
- - {% if form.policy_columns.help_text %} - - {% endif %} - - - {% for column in policy_columns_list %} - - - {% endfor %} - - - -
- - - -
- {% for error in form.policy_columns.errors %} - {{ error }} - {% endfor %} -
- + -
-
-
diff --git a/congress_dashboard/policies/templates/policies/rules/_mapping_row.html b/congress_dashboard/policies/templates/policies/rules/_mapping_row.html deleted file mode 100644 index ed7bea9f..00000000 --- a/congress_dashboard/policies/templates/policies/rules/_mapping_row.html +++ /dev/null @@ -1,21 +0,0 @@ - - - {% if count <= 1 %}
- - {% if form.mappings.help_text %} - - {% endif %} -
{% endif %} - - {{ column }} - maps to - -
- - -
- - - diff --git a/congress_dashboard/policies/templates/policies/rules/create.html b/congress_dashboard/policies/templates/policies/rules/create.html deleted file mode 100644 index 46be86d5..00000000 --- a/congress_dashboard/policies/templates/policies/rules/create.html +++ /dev/null @@ -1,24 +0,0 @@ -{% extends 'horizon/common/_workflow.html' %} -{% load i18n %} - -{% block modal-footer %} - {% if workflow.wizard %} -
- - -
- -
- - -
- {% else %} - - {% if modal %}{% trans "Cancel" %}{% endif %} - {% endif %} -{% endblock %} diff --git a/congress_dashboard/policies/urls.py b/congress_dashboard/policies/urls.py deleted file mode 100644 index 05ff51c9..00000000 --- a/congress_dashboard/policies/urls.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from django.conf.urls import patterns -from django.conf.urls import url - -from congress_dashboard.datasources import views as data_views -from congress_dashboard.policies.rules import views as rule_views -from congress_dashboard.policies import views - - -POLICY = r'^(?P[^/]+)/%s$' -POLICYTABLE = r'^(?P[^/]+)/(?P[^/]+)/%s$' - - -urlpatterns = patterns( - '', - url(r'^$', views.IndexView.as_view(), name='index'), - url(r'^create/$', views.CreateView.as_view(), name='create'), - url(POLICY % 'detail', views.DetailView.as_view(), name='detail'), - url(POLICYTABLE % 'detail', data_views.DetailView.as_view(), - name='policy_table_detail'), - url(POLICY % 'rules/create', - rule_views.CreateView.as_view(), name='create_rule'), -) diff --git a/congress_dashboard/policies/views.py b/congress_dashboard/policies/views.py deleted file mode 100644 index ce385fd8..00000000 --- a/congress_dashboard/policies/views.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging - -from django.core.urlresolvers import reverse -from django.core.urlresolvers import reverse_lazy -from django.template.defaultfilters import dictsort -from django.utils.translation import ugettext_lazy as _ -from horizon import exceptions -from horizon import forms -from horizon import messages -from horizon import tables - -from congress_dashboard.api import congress -import congress_dashboard.datasources.utils as ds_utils -from congress_dashboard.policies import forms as policies_forms -from congress_dashboard.policies.rules import tables as rules_tables -from congress_dashboard.policies import tables as policies_tables - - -LOG = logging.getLogger(__name__) - - -class IndexView(tables.DataTableView): - """List policies.""" - table_class = policies_tables.PoliciesTable - template_name = 'admin/policies/index.html' - - def get_data(self): - try: - policies = congress.policies_list(self.request) - except Exception as e: - msg = _('Unable to get policies list: %s') % str(e) - LOG.error(msg) - messages.error(self.request, msg) - return [] - return policies - - -class CreateView(forms.ModalFormView): - form_class = policies_forms.CreatePolicy - template_name = 'admin/policies/create.html' - success_url = reverse_lazy('horizon:admin:policies:index') - - -class DetailView(tables.MultiTableView): - """List details about and rules in a policy.""" - table_classes = (rules_tables.PolicyRulesTable, - rules_tables.PoliciesTablesTable,) - template_name = 'admin/policies/detail.html' - - def get_policies_tables_data(self): - policy_name = self.kwargs['policy_name'] - try: - policy_tables = congress.policy_tables_list(self.request, - policy_name) - except Exception as e: - msg_args = {'policy_name': policy_name, 'error': str(e)} - msg = _('Unable to get tables list for policy ' - '"%(policy_name)s": %(error)s') % msg_args - messages.error(self.request, msg) - return [] - - for pt in policy_tables: - pt.set_id_as_name_if_empty() - pt.set_value('policy_name', policy_name) - # Object ids within a Horizon table must be unique. Otherwise, - # Horizon will cache the column values for the object by id and - # use the same column values for all rows with the same id. - pt.set_value('table_id', pt['id']) - pt.set_value('id', '%s-%s' % (policy_name, pt['table_id'])) - - return policy_tables - - def get_policy_rules_data(self): - policy_name = self.kwargs['policy_name'] - try: - policy_rules = congress.policy_rules_list(self.request, - policy_name) - except Exception as e: - msg_args = {'policy_name': policy_name, 'error': str(e)} - msg = _('Unable to get rules in policy "%(policy_name)s": ' - '%(error)s') % msg_args - LOG.error(msg) - messages.error(self.request, msg) - redirect = reverse('horizon:admin:policies:index') - raise exceptions.Http302(redirect) - - for r in policy_rules: - r.set_id_as_name_if_empty() - return policy_rules - - def get_context_data(self, **kwargs): - context = super(DetailView, self).get_context_data(**kwargs) - policy_name = kwargs['policy_name'] - try: - policy = congress.policy_get(self.request, policy_name) - except Exception as e: - msg_args = {'policy_name': policy_name, 'error': str(e)} - msg = _('Unable to get policy "%(policy_name)s": ' - '%(error)s') % msg_args - LOG.error(msg) - messages.error(self.request, msg) - redirect = reverse('horizon:admin:policies:index') - raise exceptions.Http302(redirect) - context['policy'] = policy - - # Alphabetize and convert list of data source tables and columns into - # JSON formatted string consumable by JavaScript. Do this here instead - # of in the Create Rule form so that the tables and columns lists - # appear in the HTML document before the JavaScript that uses them. - all_tables = ds_utils.get_datasource_tables(self.request) - sorted_datasources = dictsort(all_tables, 'datasource') - tables = [] - for ds in sorted_datasources: - datasource_tables = ds['tables'] - datasource_tables.sort() - for table in ds['tables']: - tables.append('%s%s%s' % (ds['datasource'], - congress.TABLE_SEPARATOR, table)) - context['tables'] = json.dumps(tables) - - datasource_columns = ds_utils.get_datasource_columns(self.request) - sorted_datasources = dictsort(datasource_columns, 'datasource') - columns = [] - for ds in sorted_datasources: - sorted_tables = dictsort(ds['tables'], 'table') - for tbl in sorted_tables: - # Ignore service-derived tables, which are already included. - if congress.TABLE_SEPARATOR in tbl['table']: - continue - table_columns = tbl['columns'] - if table_columns: - table_columns.sort() - else: - # Placeholder name for column when the table has none. - table_columns = ['_'] - - for column in table_columns: - columns.append('%s%s%s %s' % (ds['datasource'], - congress.TABLE_SEPARATOR, - tbl['table'], column)) - context['columns'] = json.dumps(columns) - return context diff --git a/congress_dashboard/static/admin/css/policies.css b/congress_dashboard/static/admin/css/policies.css deleted file mode 100644 index 3c2ec64c..00000000 --- a/congress_dashboard/static/admin/css/policies.css +++ /dev/null @@ -1,134 +0,0 @@ -/* tables */ -#policy_columns_table { - margin-bottom: 5px; -} -#policy_columns_table td.input-cell { - width: 94%; - padding-left: 0; -} -#policy_columns_table td.button-cell { - padding: 0; - text-align: right; -} - -#policy_columns_table td.input-errors, -#mappings_table td.input-errors { - padding: 0; -} -#policy_columns_table td.borderless, -#mappings_table td.borderless, -#joins_table td.borderless, -#negations_table td.borderless, -#aliases_table td.borderless { - border: none; -} -#mappings_table td.input-cell, -#joins_table td.input-cell, -#negations_table td.input-cell, -#aliases_table td.input-cell { - width: 36%; -} - -#mappings_table td.label-cell { - width: 22%; -} -#mappings_table td.policy-column-name { - width: 28%; - text-align: right; - font-style: italic; -} -#mappings_table td.mapping-text { - width: 10%; - text-align: center; -} - -#joins_table, -#negations_table, -#aliases_table { - margin-top: 30px; - margin-bottom: 5px; -} -#joins_table td.operator-cell, -#negations_table td.operator-cell { - width: 24%; - text-align: center; -} - -#aliases_table td.label-cell { - width: 19%; -} -#aliases_table td.alias-text { - width: 5%; - text-align: center; -} - -/* forms */ -#mappings_table div.form-group, -#joins_table div.form-group, -#negations_table div.form-group, -#aliases_table div.form-group { - margin-bottom: 0; -} -#mappings_table input.form-control, -#joins_table input.form-control, -#negations_table input.form-control, -#aliases_table input.form-control { - padding-right: 36px; -} -#mappings_table div.form-control-feedback, -#joins_table div.form-control-feedback, -#negations_table div.form-control-feedback, -#aliases_table div.form-control-feedback { - background-color: #DDDDDD; - width: 20px; - height: 24px; - top: 5px; - right: 5px; -} -#mappings_table span.caret, -#joins_table span.caret, -#negations_table span.caret, -#aliases_table span.caret { - border-width: 5px; - color: #333333; - margin-bottom: 10px; -} -#add_join_button, -#add_negation_button, -#add_alias_button { - margin-left: 5px; -} - - -/* autocompletion */ -.ui-autocomplete { - max-height: 200px; - overflow-y: auto; - /* prevent horizontal scrollbar */ - overflow-x: hidden; -} -/* IE 6 doesn't support max-height - * we use height instead, but this forces the menu to always be this tall - */ -* html .ui-autocomplete { - height: 200px; -} -.ui-widget { - font-family: inherit; - font-size: inherit; -} -.ui-state-hover, -.ui-widget-content .ui-state-hover, -.ui-widget-header .ui-state-hover, -.ui-state-focus, -.ui-widget-content .ui-state-focus, -.ui-widget-header .ui-state-focus, -.ui-state-active, -.ui-widget-content .ui-state-active, -.ui-widget-header .ui-state-active { - border: 1px solid #285e8e; - background: none; - background-color: #3276b1; - font-weight: normal; - color: #ffffff; -} diff --git a/congress_dashboard/static/admin/js/policies.js b/congress_dashboard/static/admin/js/policies.js deleted file mode 100644 index c20807d1..00000000 --- a/congress_dashboard/static/admin/js/policies.js +++ /dev/null @@ -1,288 +0,0 @@ -horizon.policies = { - /* Update input attributes for column name autocompletion. */ - updateColumnAcInput: function($input) { - $input.attr({ - 'placeholder': $input.attr('data-column-example'), - 'pattern': $input.attr('data-pattern'), - 'title': $input.attr('data-pattern-error') - }); - /* form-control-feedback only hidden, so it still has autocompletion. */ - $input.closest('td').find('.form-control-feedback') - .removeClass('hidden'); - }, - - /* Get column names from conditions mappings. */ - getMappedColumns: function() { - var mappings = []; - $('#mappings_table').find('.policy-column-name').each(function() { - var $td = $(this); - var column = $td.text(); - if (column) { - mappings.push(column); - } - }); - return mappings; - }, - - /* Check if any columns need to be removed from conditions mappings. */ - scrubMappedColumns: function(columns) { - mappings = horizon.policies.getMappedColumns(); - if (!columns) { - columns = []; - var $inputs = $('#policy_columns_table').find('.policy-column-input'); - $inputs.each(function() { - var $input = $(this); - var name = $input.val(); - if (name) { - columns.push(name); - } - }); - } - - for (var i = 0; i < mappings.length; i++) { - var name = mappings[i]; - if ($.inArray(name, columns) == -1) { - $('#mappings_table').find('.policy-column-name:contains(' + - name + ')').closest('tr').remove(); - } - } - /* Put label back if there's only one row left without it. */ - var $rows = $('#mappings_table').find('.mapping-row'); - if ($rows.length == 1 && !$rows.find('.label-cell').text()) { - var label = $('#mapping_0').find('.label-cell').html(); - $rows.find('.label-cell').html(label); - } - }, -} - -horizon.addInitFunction(horizon.policies.init = function() { - /* Add another policy table column name. */ - $(document).on('click', '#add_policy_column_button', function(evt) { - evt.preventDefault(); - var $button = $(this); - var $tr = $('#policy_column_0').clone(); - - var count = $button.attr('data-count'); - var cid = parseInt(count); - $button.attr('data-count', cid + 1); - - /* Change ids and reset inputs. */ - $tr.attr('id', 'policy_column_' + cid); - $tr.find('input[name]').val('').each(function() { - this.name = this.name.replace(/^(.+_)\d+$/, '$1' + cid); - }); - $tr.find('.remove-policy-column-button').removeClass('hidden'); - /* Add row before the one reserved for errors. */ - $('#policy_columns_table').find('tr:last').before($tr); - }); - - /* Remove policy table column name input. */ - $(document).on('click', - '#policy_columns_table a.remove-policy-column-button', - function(evt) { - evt.preventDefault(); - var $a = $(this); - var $tr = $a.closest('tr'); - $tr.remove(); - horizon.policies.scrubMappedColumns(); - }); - - /* Add policy table columns to conditions and combine into single param. */ - $(document).on('change', - '#policy_columns_table input.policy-column-input', - function() { - var mappings = horizon.policies.getMappedColumns(); - var columns = []; - - var $inputs = $('#policy_columns_table').find('.policy-column-input'); - $inputs.each(function() { - var $input = $(this); - var name = $input.val(); - /* Does not make sense to have multiple of the same column. */ - if (name && $.inArray(name, columns) == -1) { - columns.push(name); - - if ($.inArray(name, mappings) == -1) { - /* Add mapping inputs for new policy column. */ - var $tr = $('#mapping_0').clone(); - var count = $('#mappings_table').attr('data-count'); - var cid = parseInt(count); - $('#mappings_table').attr('data-count', cid + 1); - - /* Change ids. */ - $tr.attr('id', 'mapping_' + cid).toggleClass('hidden mapping-row'); - $tr.find('.policy-column-name').text(name); - $tr.find('input[id]').each(function() { - this.id = this.id.replace(/^(.+_)\d+$/, '$1' + cid); - this.name = this.id; - }); - /* Remove label if there's already a row with it. */ - if ($('#mappings_table').find('.mapping-row').length) { - $tr.find('.label-cell').empty(); - } - $('#mappings_table').find('tr:last').before($tr); - - /* Add autocompletion. */ - $('#mapping_column_' + cid).autocomplete({ - minLength: 0, - source: JSON.parse($('#ds_columns').text()) - }); - $('#mapping_' + cid).find('.ac div.form-control-feedback') - .click(function() { - /* Focus on list now so that clicking outside of it closes it. */ - $('#mapping_column_' + cid).autocomplete('search', '').focus(); - }); - } - } - }); - - /* Workflow expects one policy_columns value. */ - $('#policy_columns').val(columns.join(', ')); - horizon.policies.scrubMappedColumns(columns); - }); - - /* Add another join. */ - $(document).on('click', '#add_join_button', function(evt) { - evt.preventDefault(); - var $button = $(this); - var $tr = $('#join_0').clone(); - - var count = $button.attr('data-count'); - var cid = parseInt(count); - $button.attr('data-count', cid + 1); - - /* Change ids and reset inputs. */ - $tr.attr('id', 'join_' + cid); - $tr.find('input[id], select[id]').val('').each(function() { - this.id = this.id.replace(/^(.+_)\d+$/, '$1' + cid); - this.name = this.id; - }); - $tr.find('select').val($tr.find('option:first').val()); - $tr.find('.remove-join-button').removeClass('hidden'); - $('#joins_table').append($tr); - - /* Add autocompletion. */ - $('#join_left_' + cid + ', #join_right_' + cid).autocomplete({ - minLength: 0, - source: JSON.parse($('#ds_columns').text()) - }); - horizon.policies.updateColumnAcInput($('#join_right_' + cid)); - $('#join_' + cid).find('.ac div.form-control-feedback').click(function() { - var $div = $(this); - /* Focus on list now so that clicking outside of it closes it. */ - $div.siblings('.ac-columns').autocomplete('search', '').focus(); - }); - }); - - /* Remove join input. */ - $(document).on('click', '#joins_table a.remove-join-button', - function(evt) { - evt.preventDefault(); - var $a = $(this); - var $tr = $a.closest('tr'); - $tr.remove(); - }); - - /* Update input attributes based on type selected. */ - $(document).on('change', '#joins_table select.join-op', function() { - var $select = $(this); - var $input = $select.closest('tr').find('.join-right').val(''); - - if (!$select.val()) { - $input.autocomplete({ - minLength: 0, - source: JSON.parse($('#ds_columns').text()) - }); - horizon.policies.updateColumnAcInput($input); - } else { - $input.closest('td').find('.form-control-feedback').addClass('hidden'); - $input.autocomplete('destroy'); - $input.attr('placeholder', $input.attr('data-static-example')); - $input.removeAttr('pattern').removeAttr('title'); - } - }); - - /* Add another negation. */ - $(document).on('click', '#add_negation_button', function(evt) { - evt.preventDefault(); - var $button = $(this); - var $tr = $('#negation_0').clone(); - - var count = $button.attr('data-count'); - var cid = parseInt(count); - $button.attr('data-count', cid + 1); - - /* Change ids and reset inputs. */ - $tr.attr('id', 'negation_' + cid); - $tr.find('input[id], select[id]').val('').each(function() { - this.id = this.id.replace(/^(.+_)\d+$/, '$1' + cid); - this.name = this.id; - }); - $tr.find('select').val($tr.find('option:first').val()); - $tr.find('.remove-negation-button').removeClass('hidden'); - $('#negations_table').append($tr); - - /* Add autocompletion. */ - $('#negation_value_' + cid + ', #negation_column_' + cid).autocomplete({ - minLength: 0, - source: JSON.parse($('#ds_columns').text()) - }); - $('#negation_' + cid).find('.ac div.form-control-feedback') - .click(function() { - var $div = $(this); - /* Focus on list now so that clicking outside of it closes it. */ - $div.siblings('.ac-columns').autocomplete('search', '').focus(); - }); - }); - - /* Remove negation input. */ - $(document).on('click', '#negations_table a.remove-negation-button', - function(evt) { - evt.preventDefault(); - var $a = $(this); - var $tr = $a.closest('tr'); - $tr.remove(); - }); - - /* Add another alias. */ - $(document).on('click', '#add_alias_button', function(evt) { - evt.preventDefault(); - var $button = $(this); - var $tr = $('#alias_0').clone(); - - var count = $button.attr('data-count'); - var cid = parseInt(count); - $button.attr('data-count', cid + 1); - - /* Change ids and reset inputs. */ - $tr.attr('id', 'alias_' + cid); - $tr.find('td:first').empty(); - $tr.find('input[id]').val('').each(function() { - this.id = this.id.replace(/^(.+_)\d+$/, '$1' + cid); - this.name = this.id; - }); - $tr.find('.remove-alias-button').removeClass('hidden'); - $('#aliases_table').append($tr); - - /* Add autocompletion. */ - $('#alias_column_' + cid).autocomplete({ - minLength: 0, - source: JSON.parse($('#ds_tables').text()) - }); - $('#alias_' + cid).find('.ac div.form-control-feedback') - .click(function() { - var $div = $(this); - /* Focus on list now so that clicking outside of it closes it. */ - $div.siblings('.ac-tables').autocomplete('search', '').focus(); - }); - }); - - /* Remove alias input. */ - $(document).on('click', '#aliases_table a.remove-alias-button', - function(evt) { - evt.preventDefault(); - var $a = $(this); - var $tr = $a.closest('tr'); - $tr.remove(); - }); -}); diff --git a/congress_dashboard/templates/admin/_scripts.html b/congress_dashboard/templates/admin/_scripts.html deleted file mode 100644 index 49e294ba..00000000 --- a/congress_dashboard/templates/admin/_scripts.html +++ /dev/null @@ -1,5 +0,0 @@ -{% extends 'horizon/_scripts.html' %} - -{% block custom_js_files %} - -{% endblock %} diff --git a/congress_dashboard/templates/admin/base.html b/congress_dashboard/templates/admin/base.html deleted file mode 100644 index 09777566..00000000 --- a/congress_dashboard/templates/admin/base.html +++ /dev/null @@ -1,14 +0,0 @@ -{% extends 'base.html' %} - -{% block css %} - {% include "_stylesheets.html" %} - - {% load compress %} - {% compress css %} - - {% endcompress %} -{% endblock %} - -{% block js %} - {% include "admin/_scripts.html" %} -{% endblock %} diff --git a/congress_tempest_tests/__init__.py b/congress_tempest_tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/config.py b/congress_tempest_tests/config.py deleted file mode 100644 index 5bdc08bb..00000000 --- a/congress_tempest_tests/config.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2015 Intel Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from tempest import config # noqa - - -service_available_group = cfg.OptGroup(name="service_available", - title="Available OpenStack Services") -ServiceAvailableGroup = [ - cfg.BoolOpt('congress', - default=True, - help="Whether or not Congress is expected to be available"), -] -congressha_group = cfg.OptGroup(name="congressha", title="Congress HA Options") - -CongressHAGroup = [ - cfg.StrOpt("replica_type", - default="policyha", - help="service type used to create a replica congress server."), - cfg.IntOpt("replica_port", - default=4001, - help="The listening port for a replica congress server. "), -] diff --git a/congress_tempest_tests/plugin.py b/congress_tempest_tests/plugin.py deleted file mode 100644 index 68ab4f83..00000000 --- a/congress_tempest_tests/plugin.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2015 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest import config -from tempest.test_discover import plugins - -from congress_tempest_tests import config as config_congress - - -class CongressTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(__file__)))[0] - test_dir = "congress_tempest_tests/tests" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group(conf, - config_congress.service_available_group, - config_congress.ServiceAvailableGroup) - config.register_opt_group(conf, config_congress.congressha_group, - config_congress.CongressHAGroup) - - def get_opt_lists(self): - return [ - (config_congress.congressha_group.name, - config_congress.CongressHAGroup), - (config_congress.service_available_group.name, - config_congress.ServiceAvailableGroup) - ] diff --git a/congress_tempest_tests/services/__init__.py b/congress_tempest_tests/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/services/policy/__init__.py b/congress_tempest_tests/services/policy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/services/policy/policy_client.py b/congress_tempest_tests/services/policy/policy_client.py deleted file mode 100644 index 4921d082..00000000 --- a/congress_tempest_tests/services/policy/policy_client.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from tempest.lib.common import rest_client - - -class PolicyClient(rest_client.RestClient): - - policy = '/v1/policies' - policy_path = '/v1/policies/%s' - policy_rules = '/v1/policies/%s/rules' - policy_rules_path = '/v1/policies/%s/rules/%s' - policy_tables = '/v1/policies/%s/tables' - policy_table_path = '/v1/policies/%s/tables/%s' - policy_rows = '/v1/policies/%s/tables/%s/rows' - policy_rows_trace = '/v1/policies/%s/tables/%s/rows?trace=True' - policies = '/v1/policies' - policies_status = '/v1/policies/%s/status' - policy_action = '/v1/policies/%s?%s' - library_policy = '/v1/librarypolicies' - library_policy_path = '/v1/librarypolicies/%s' - library_policies = '/v1/librarypolicies' - datasources = '/v1/data-sources' - datasource_path = '/v1/data-sources/%s' - datasource_tables = '/v1/data-sources/%s/tables' - datasource_table_path = '/v1/data-sources/%s/tables/%s' - datasource_status = '/v1/data-sources/%s/status' - datasource_schema = '/v1/data-sources/%s/schema' - datasource_table_schema = '/v1/data-sources/%s/tables/%s/spec' - datasource_rows = '/v1/data-sources/%s/tables/%s/rows' - driver = '/v1/system/drivers' - driver_path = '/v1/system/drivers/%s' - - def _resp_helper(self, resp, body=None): - if body: - body = json.loads(body) - return rest_client.ResponseBody(resp, body) - - def create_policy(self, body): - body = json.dumps(body) - resp, body = self.post( - self.policy, body=body) - return self._resp_helper(resp, body) - - def delete_policy(self, policy): - resp, body = self.delete( - self.policy_path % policy) - return self._resp_helper(resp, body) - - def show_policy(self, policy): - resp, body = self.get( - self.policy_path % policy) - return self._resp_helper(resp, body) - - def create_library_policy(self, body): - body = json.dumps(body) - resp, body = self.post( - self.library_policy, body=body) - return self._resp_helper(resp, body) - - def delete_library_policy(self, policy): - resp, body = self.delete( - self.library_policy_path % policy) - return self._resp_helper(resp, body) - - def show_library_policy(self, policy): - resp, body = self.get( - self.library_policy_path % policy) - return self._resp_helper(resp, body) - - def create_policy_rule(self, policy_name, body=None): - body = json.dumps(body) - resp, body = self.post( - self.policy_rules % policy_name, body=body) - return self._resp_helper(resp, body) - - def delete_policy_rule(self, policy_name, rule_id): - resp, body = self.delete( - self.policy_rules_path % (policy_name, rule_id)) - return self._resp_helper(resp, body) - - def show_policy_rule(self, policy_name, rule_id): - resp, body = self.get( - self.policy_rules_path % (policy_name, rule_id)) - return self._resp_helper(resp, body) - - def list_policy_rows(self, policy_name, table, trace=None): - if trace: - query = self.policy_rows_trace - else: - query = self.policy_rows - resp, body = self.get(query % (policy_name, table)) - return self._resp_helper(resp, body) - - def list_policy_rules(self, policy_name): - resp, body = self.get(self.policy_rules % (policy_name)) - return self._resp_helper(resp, body) - - def list_policy(self): - resp, body = self.get(self.policies) - return self._resp_helper(resp, body) - - def list_library_policy(self): - resp, body = self.get(self.library_policies) - return self._resp_helper(resp, body) - - def list_policy_tables(self, policy_name): - resp, body = self.get(self.policy_tables % (policy_name)) - return self._resp_helper(resp, body) - - def list_policy_status(self, policy_name): - resp, body = self.get(self.policies_status % (policy_name)) - return self._resp_helper(resp, body) - - def execute_policy_action(self, policy_name, action, trace, delta, body): - body = json.dumps(body) - uri = "?action=%s&trace=%s&delta=%s" % (action, trace, delta) - resp, body = self.post( - (self.policy_path % policy_name) + str(uri), body=body) - return self._resp_helper(resp, body) - - def show_policy_table(self, policy_name, table_id): - resp, body = self.get(self.policy_table_path % (policy_name, table_id)) - return self._resp_helper(resp, body) - - def list_datasources(self): - resp, body = self.get(self.datasources) - return self._resp_helper(resp, body) - - def list_datasource_tables(self, datasource_name): - resp, body = self.get(self.datasource_tables % (datasource_name)) - return self._resp_helper(resp, body) - - def list_datasource_rows(self, datasource_name, table_name): - resp, body = self.get(self.datasource_rows % - (datasource_name, table_name)) - return self._resp_helper(resp, body) - - def list_datasource_status(self, datasource_name): - resp, body = self.get(self.datasource_status % datasource_name) - return self._resp_helper(resp, body) - - def show_datasource_schema(self, datasource_name): - resp, body = self.get(self.datasource_schema % datasource_name) - return self._resp_helper(resp, body) - - def show_datasource_table_schema(self, datasource_name, table_name): - resp, body = self.get(self.datasource_table_schema % - (datasource_name, table_name)) - return self._resp_helper(resp, body) - - def show_datasource_table(self, datasource_name, table_id): - resp, body = self.get(self.datasource_table_path % - (datasource_name, table_id)) - return self._resp_helper(resp, body) - - def create_datasource(self, body=None): - body = json.dumps(body) - resp, body = self.post( - self.datasources, body=body) - return self._resp_helper(resp, body) - - def delete_datasource(self, datasource): - resp, body = self.delete( - self.datasource_path % datasource) - return self._resp_helper(resp, body) - - def update_datasource_row(self, datasource_name, table_id, rows): - body = json.dumps(rows) - resp, body = self.put( - self.datasource_rows % (datasource_name, table_id), body) - return self._resp_helper(resp) - - def execute_datasource_action(self, service_name, action, body): - body = json.dumps(body) - uri = "?action=%s" % (action) - resp, body = self.post( - (self.datasource_path % service_name) + str(uri), body=body) - return self._resp_helper(resp, body) - - def list_drivers(self): - resp, body = self.get(self.driver) - return self._resp_helper(resp, body) - - def show_driver(self, driver): - resp, body = self.get(self.driver_path % (driver)) - return self._resp_helper(resp, body) - - def request_refresh(self, driver, body=None): - body = json.dumps(body) - resp, body = self.post(self.datasource_path % - (driver) + "?action=request-refresh", - body=body) - return self._resp_helper(resp, body) diff --git a/congress_tempest_tests/tests/README.rst b/congress_tempest_tests/tests/README.rst deleted file mode 100644 index 8b5f3faa..00000000 --- a/congress_tempest_tests/tests/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -==================== -Tempest Integration -==================== - -This directory contains Tempest tests to cover Congress project. - -To list all Congress tempest cases, go to tempest directory, then run:: - - $ testr list-tests congress - -To run only these tests in tempest, go to tempest directory, then run:: - - $ ./run_tempest.sh -N -- congress - -To run a single test case, go to tempest directory, then run with test case name, e.g.:: - - $ ./run_tempest.sh -N -- congress_tempest_tests.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op - -Alternatively, to run congress tempest plugin tests using tox, go to tempest directory, then run:: - - $ tox -eall-plugin congress - -And, to run a specific test:: - - $ tox -eall-plugin congress_tempest_tests.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op diff --git a/congress_tempest_tests/tests/__init__.py b/congress_tempest_tests/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/tests/api/__init__.py b/congress_tempest_tests/tests/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/tests/scenario/__init__.py b/congress_tempest_tests/tests/scenario/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/__init__.py b/congress_tempest_tests/tests/scenario/congress_datasources/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/_test_keystonev2.py b/congress_tempest_tests/tests/scenario/congress_datasources/_test_keystonev2.py deleted file mode 100644 index 6c9a3a05..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/_test_keystonev2.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import clients -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF - - -class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestKeystoneV2Driver, cls).skip_checks() - if not (CONF.network.project_networks_reachable or - CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def setUp(cls): - super(TestKeystoneV2Driver, cls).setUp() - cls.os_primary = clients.Manager( - cls.os_admin.auth_provider.credentials) - cls.keystone = cls.os_primary.identity_client - cls.tenants_client = cls.os_primary.tenants_client - cls.roles_client = cls.os_primary.roles_client - cls.users_client = cls.os_primary.users_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'keystone') - - @decorators.attr(type='smoke') - def test_keystone_users_table(self): - user_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'users')['columns']) - user_id_col = next(i for i, c in enumerate(user_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_users(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - users = self.users_client.list_users()['users'] - user_map = {} - for user in users: - user_map[user['id']] = user - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'users')) - for row in results['results']: - try: - user_row = user_map[row['data'][user_id_col]] - except KeyError: - return False - for index in range(len(user_schema)): - if ((user_schema[index]['name'] == 'tenantId' and - 'tenantId' not in user_row) or - (user_schema[index]['name'] == 'email' and - 'email' not in user_row)): - # Keystone does not return the tenantId or email column - # if not present. - pass - elif (str(row['data'][index]) != - str(user_row[user_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_users, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_keystone_roles_table(self): - role_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'roles')['columns']) - role_id_col = next(i for i, c in enumerate(role_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_roles(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - roles = self.roles_client.list_roles()['roles'] - roles_map = {} - for role in roles: - roles_map[role['id']] = role - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'roles')) - for row in results['results']: - try: - role_row = roles_map[row['data'][role_id_col]] - except KeyError: - return False - for index in range(len(role_schema)): - if (str(row['data'][index]) != - str(role_row[role_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_roles, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_keystone_tenants_table(self): - tenant_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'tenants')['columns']) - tenant_id_col = next(i for i, c in enumerate(tenant_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_tenants(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - tenants = self.tenants_client.list_tenants()['tenants'] - tenants_map = {} - for tenant in tenants: - tenants_map[tenant['id']] = tenant - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'tenants')) - for row in results['results']: - try: - tenant_row = tenants_map[row['data'][tenant_id_col]] - except KeyError: - return False - for index in range(len(tenant_schema)): - if (str(row['data'][index]) != - str(tenant_row[tenant_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_tenants, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('keystone'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_aodh.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_aodh.py deleted file mode 100644 index 442a6c1a..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_aodh.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2016 NEC Corporation. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestAodhDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestAodhDriver, cls).skip_checks() - if not getattr(CONF.service_available, 'aodh_plugin', False): - msg = ("%s skipped as aodh is not available" % - cls.__class__.__name__) - raise cls.skipException(msg) - - def setUp(cls): - super(TestAodhDriver, cls).setUp() - cls.alarms_client = cls.os_admin.alarms_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'aodh') - - @decorators.attr(type='smoke') - def test_aodh_alarms_table(self): - # Add test alarm - rule = {'meter_name': 'cpu_util', - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'period': 70} - self.alarms_client.create_alarm(name='test-alarm', - type='threshold', - enabled=False, - threshold_rule=rule) - alarms_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'alarms')['columns']) - alarms_id_col = next(i for i, c in enumerate(alarms_schema) - if c['name'] == 'alarm_id') - - def _check_data_table_aodh_alarms(): - # Fetch data from aodh each time, because this test may start - # before aodh has all the users. - alarms = self.alarms_client.list_alarms() - alarm_map = {} - for alarm in alarms: - alarm_map[alarm['alarm_id']] = alarm - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'alarms')) - rule_data = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'alarms.threshold_rule'))['results'] - - for row in results['results']: - try: - alarm_row = alarm_map[row['data'][alarms_id_col]] - except KeyError: - return False - for index in range(len(alarms_schema)): - if alarms_schema[index]['name'] == 'threshold_rule_id': - threshold_rule = alarm_row['threshold_rule'] - data = [r['data'] for r in rule_data - if r['data'][0] == row['data'][index]] - for l in data: - if str(threshold_rule[l[1]]) != str(l[2]): - return False - continue - - if (str(row['data'][index]) != - str(alarm_row[alarms_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true(func=_check_data_table_aodh_alarms, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('aodh'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_ceilometer.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_ceilometer.py deleted file mode 100644 index bb61dd38..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_ceilometer.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestCeilometerDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestCeilometerDriver, cls).skip_checks() - if not getattr(CONF.service_available, 'ceilometer', False): - msg = ("%s skipped as ceilometer is not available" % - cls.__class__.__name__) - raise cls.skipException(msg) - - def setUp(cls): - super(TestCeilometerDriver, cls).setUp() - cls.telemetry_client = cls.os_admin.telemetry_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'ceilometer') - - @decorators.attr(type='smoke') - def test_ceilometer_meters_table(self): - meter_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'meters')['columns']) - meter_id_col = next(i for i, c in enumerate(meter_schema) - if c['name'] == 'meter_id') - - def _check_data_table_ceilometer_meters(): - # Fetch data from ceilometer each time, because this test may start - # before ceilometer has all the users. - meters = self.telemetry_client.list_meters() - meter_map = {} - for meter in meters: - meter_map[meter['meter_id']] = meter - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'meters')) - for row in results['results']: - try: - meter_row = meter_map[row['data'][meter_id_col]] - except KeyError: - return False - for index in range(len(meter_schema)): - if (str(row['data'][index]) != - str(meter_row[meter_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_ceilometer_meters, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('ceilometer'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_cinder.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_cinder.py deleted file mode 100644 index 7f5806b3..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_cinder.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from tempest import clients -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF -LOG = logging.getLogger(__name__) - - -class TestCinderDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestCinderDriver, cls).skip_checks() - if not (CONF.network.project_networks_reachable or - CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - if not CONF.service_available.cinder: - skip_msg = ("%s skipped as cinder is not available" % cls.__name__) - raise cls.skipException(skip_msg) - - def setUp(cls): - super(TestCinderDriver, cls).setUp() - cls.os_primary = clients.Manager( - cls.os_admin.auth_provider.credentials) - cls.cinder = cls.os_primary.volumes_v2_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'cinder') - res = cls.cinder.create_volume(size=1, description=None, name='v0', - consistencygroup_id=None, metadata={}) - LOG.debug('result of creating new volume: %s', res) - - @decorators.attr(type='smoke') - def test_cinder_volumes_table(self): - volume_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'volumes')['columns']) - volume_id_col = next(i for i, c in enumerate(volume_schema) - if c['name'] == 'id') - - def _check_data_table_cinder_volumes(): - # Fetch data from cinder each time, because this test may start - # before cinder has all the users. - volumes = self.cinder.list_volumes()['volumes'] - LOG.debug('cinder volume list: %s', volumes) - volumes_map = {} - for volume in volumes: - volumes_map[volume['id']] = volume - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'volumes')) - LOG.debug('congress cinder volumes table: %s', results) - # check that congress and cinder return the same volume IDs - rows_volume_id_set = set() - for row in results['results']: - rows_volume_id_set.add(row['data'][volume_id_col]) - if rows_volume_id_set != frozenset(volumes_map.keys()): - LOG.debug('volumes IDs mismatch') - return False - # FIXME(ekcs): the following code is broken because 'user_id' - # and 'description' fields do not appear in results provided by - # [tempest].os.volumes_client.list_volumes(). - # Detailed checking disabled for now. Re-enable when fixed. - # It appears the code was written for v1 volumes client but never - # worked. The problem was not evident because the list of volumes - # was empty. - # Additional adaptation is needed for v2 volumes client. - # for row in results['results']: - # try: - # volume_row = volumes_map[row['data'][volume_id_col]] - # except KeyError: - # return False - # for index in range(len(volume_schema)): - # if (str(row['data'][index]) != - # str(volume_row[volume_schema[index]['name']])): - # return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_cinder_volumes, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('cinder'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_doctor.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_doctor.py deleted file mode 100644 index 5c6c5671..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_doctor.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2016 NTT All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import helper -from congress_tempest_tests.tests.scenario import manager_congress - - -class TestDoctorDriver(manager_congress.ScenarioPolicyBase): - def setUp(self): - super(TestDoctorDriver, self).setUp() - doctor_setting = { - 'name': 'doctor', - 'driver': 'doctor', - 'config': None, - } - self.client = self.os_admin.congress_client - - response = self.client.create_datasource(doctor_setting) - self.datasource_id = response['id'] - - def tearDown(self): - super(TestDoctorDriver, self).tearDown() - self.client.delete_datasource(self.datasource_id) - - def _list_datasource_rows(self, datasource, table): - return self.client.list_datasource_rows(datasource, table) - - @decorators.attr(type='smoke') - def test_doctor_event_tables(self): - rows = [ - { - "time": "2016-02-22T11:48:55Z", - "type": "compute.host.down", - "details": { - "hostname": "compute1", - "status": "down", - "monitor": "zabbix1", - "monitor_event_id": "111" - } - } - ] - - expected_row = [ - "2016-02-22T11:48:55Z", - "compute.host.down", - "compute1", - "down", - "zabbix1", - "111" - ] - - # Check if service is up - @helper.retry_on_exception - def _check_service(): - self.client.list_datasource_status(self.datasource_id) - return True - - if not test_utils.call_until_true(func=_check_service, - duration=60, sleep_for=1): - raise exceptions.TimeoutException("Doctor dataservice is not up") - - self.client.update_datasource_row(self.datasource_id, 'events', rows) - results = self._list_datasource_rows(self.datasource_id, 'events') - if len(results['results']) != 1: - error_msg = ('Unexpected additional rows are ' - 'inserted. row details: %s' % results['results']) - raise exceptions.InvalidStructure(error_msg) - - if results['results'][0]['data'] != expected_row: - msg = ('inserted row %s is not expected row %s' - % (results['data'], expected_row)) - raise exceptions.InvalidStructure(msg) diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_glancev2.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_glancev2.py deleted file mode 100644 index 40b57e98..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_glancev2.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import clients -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF - - -class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestGlanceV2Driver, cls).skip_checks() - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - if not CONF.service_available.glance: - skip_msg = ("%s skipped as glance is not available" % cls.__name__) - raise cls.skipException(skip_msg) - - def setUp(cls): - super(TestGlanceV2Driver, cls).setUp() - cls.os_primary = clients.Manager( - cls.os_admin.auth_provider.credentials) - cls.glancev2 = cls.os_primary.image_client_v2 - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'glancev2') - - @decorators.attr(type='smoke') - @test.services('image') - def test_glancev2_images_table(self): - image_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'images')['columns']) - image_id_col = next(i for i, c in enumerate(image_schema) - if c['name'] == 'id') - - def _check_data_table_glancev2_images(): - # Fetch data from glance each time, because this test may start - # before glance has all the users. - images = self.glancev2.list_images()['images'] - image_map = {} - for image in images: - image_map[image['id']] = image - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'images')) - for row in results['results']: - try: - image_row = image_map[row['data'][image_id_col]] - except KeyError: - return False - for index in range(len(image_schema)): - # glancev2 doesn't return kernel_id/ramdisk_id if - # it isn't present... - if ((image_schema[index]['name'] == 'kernel_id' and - 'kernel_id' not in row['data']) or - (image_schema[index]['name'] == 'ramdisk_id' and - 'ramdisk_id' not in row['data'])): - continue - - # FIXME(arosen): congress-server should retain the type - # but doesn't today. - if (str(row['data'][index]) != - str(image_row[image_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_glancev2_images, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('image') - def test_glancev2_tags_table(self): - def _check_data_table_glance_images(): - # Fetch data from glance each time, because this test may start - # before glance has all the users. - images = self.glancev2.list_images()['images'] - image_tag_map = {} - for image in images: - image_tag_map[image['id']] = image['tags'] - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'tags')) - for row in results['results']: - image_id, tag = row['data'][0], row['data'][1] - glance_image_tags = image_tag_map.get(image_id) - if not glance_image_tags: - # congress had image that glance doesn't know about. - return False - if tag not in glance_image_tags: - # congress had a tag that wasn't on the image. - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_glance_images, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('glancev2'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_heat.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_heat.py deleted file mode 100644 index e335ae52..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_heat.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2017 VMware Corporation. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestHeatDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestHeatDriver, cls).skip_checks() - if not getattr(CONF.service_available, 'heat_plugin', False): - msg = ("%s skipped because heat service is not configured" % - cls.__class__.__name__) - raise cls.skipException(msg) - - # TODO(testing): checks on correctness of data in updates - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('heat'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_ironic.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_ironic.py deleted file mode 100644 index 0c0a9fd0..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_ironic.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2017 VMware Corporation. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestIronicDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestIronicDriver, cls).skip_checks() - if not getattr(CONF.service_available, 'ironic', False): - msg = ("%s skipped because ironic service is not configured" % - cls.__class__.__name__) - raise cls.skipException(msg) - - # TODO(testing): checks on correctness of data in updates - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('ironic'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_keystonev3.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_keystonev3.py deleted file mode 100644 index 55cc11d2..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_keystonev3.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import clients -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF - - -class TestKeystoneV3Driver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestKeystoneV3Driver, cls).skip_checks() - if not (CONF.network.project_networks_reachable or - CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def setUp(cls): - super(TestKeystoneV3Driver, cls).setUp() - cls.os_primary = clients.Manager( - cls.os_admin.auth_provider.credentials) - cls.keystone = cls.os_primary.identity_v3_client - cls.projects_client = cls.os_primary.projects_client - cls.domains_client = cls.os_primary.domains_client - cls.roles_client = cls.os_primary.roles_v3_client - cls.users_client = cls.os_primary.users_v3_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'keystonev3') - - @test.attr(type='smoke') - def test_keystone_users_table(self): - user_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'users')['columns']) - user_id_col = next(i for i, c in enumerate(user_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_users(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - users = self.users_client.list_users()['users'] - user_map = {} - for user in users: - user_map[user['id']] = user - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'users')) - for row in results['results']: - try: - user_row = user_map[row['data'][user_id_col]] - except KeyError: - return False - for index in range(len(user_schema)): - if ((user_schema[index]['name'] == 'default_project_id' and - 'default_project_id' not in user_row)): - # Keystone does not return the tenantId or email column - # if not present. - pass - elif (str(row['data'][index]) != - str(user_row[user_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_users, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @test.attr(type='smoke') - def test_keystone_roles_table(self): - role_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'roles')['columns']) - role_id_col = next(i for i, c in enumerate(role_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_roles(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - roles = self.roles_client.list_roles()['roles'] - roles_map = {} - for role in roles: - roles_map[role['id']] = role - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'roles')) - for row in results['results']: - try: - role_row = roles_map[row['data'][role_id_col]] - except KeyError: - return False - for index in range(len(role_schema)): - if (str(row['data'][index]) != - str(role_row[role_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_roles, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @test.attr(type='smoke') - def test_keystone_domains_table(self): - domains_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'domains')['columns']) - domain_id_col = next(i for i, c in enumerate(domains_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_domains(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - domains = self.domains_client.list_domains()['domains'] - domains_map = {} - for domain in domains: - domains_map[domain['id']] = domain - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'domains')) - for row in results['results']: - try: - domain_row = domains_map[row['data'][domain_id_col]] - except KeyError: - return False - for index in range(len(domains_schema)): - if (str(row['data'][index]) != - str(domain_row[domains_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_domains, - duration=100, sleep_for=4): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @test.attr(type='smoke') - def test_keystone_projects_table(self): - projects_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'projects')['columns']) - project_id_col = next(i for i, c in enumerate(projects_schema) - if c['name'] == 'id') - - def _check_data_table_keystone_projects(): - # Fetch data from keystone each time, because this test may start - # before keystone has all the users. - projects = self.projects_client.list_projects()['projects'] - projects_map = {} - for project in projects: - projects_map[project['id']] = project - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'projects')) - for row in results['results']: - try: - project_row = projects_map[row['data'][project_id_col]] - except KeyError: - return False - for index in range(len(projects_schema)): - if (str(row['data'][index]) != - str(project_row[projects_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true( - func=_check_data_table_keystone_projects, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @test.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('keystonev3'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_murano.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_murano.py deleted file mode 100644 index b64b5717..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_murano.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import random -import string - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestMuranoDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestMuranoDriver, cls).skip_checks() - if not getattr(CONF.service_available, 'murano', False): - msg = ("%s skipped as murano is not available" % - cls.__class__.__name__) - raise cls.skipException(msg) - - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def setUp(self): - super(TestMuranoDriver, self).setUp() - self.congress_client = ( - self.os_admin.congress_client) - - @decorators.attr(type='smoke') - @test.services('compute') - def test_murano_predeployment(self): - - def _delete_policy_rules(policy_name): - result = self.congress_client.list_policy_rules( - policy_name)['results'] - for rule in result: - self.congress_client.delete_policy_rule( - policy_name, - rule['id']) - - def _create_random_policy(): - policy_name = "murano_%s" % ''.join(random.choice(string.lowercase) - for x in range(10)) - body = {"name": policy_name} - resp = self.congress_client.create_policy(body) - self.addCleanup(_delete_policy_rules, resp['name']) - return resp['name'] - - def _create_datasource(): - body = {"config": {"username": CONF.auth.admin_username, - "tenant_name": CONF.auth.admin_project_name, - "password": CONF.auth.admin_password, - "auth_url": CONF.identity.uri}, - "driver": "murano", - "name": "murano"} - try: - datasource = self.congress_client.create_datasource(body)['id'] - self.addCleanup(self.congress_client.delete_datasource, - datasource) - except exceptions.Conflict: - pass - - def _create_rule(policy_name, rule): - self.congress_client.create_policy_rule(policy_name, rule) - - def _simulate_policy(policy_name, query): - resp = self.congress_client.execute_policy_action( - policy_name, - "simulate", - False, - False, - query) - return resp['result'] - - rule1 = { - "rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id," - "flavor, vcpus, ram, disk, ephemeral, rxtx_factor)," - "equal(flavor, \"m1.medium\")" - } - - rule2 = { - "rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id," - "flavor, vcpus, ram, disk, ephemeral, rxtx_factor)," - "equal(flavor, \"m1.small\")" - } - - rule3 = { - "rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id," - "flavor, vcpus, ram, disk, ephemeral, rxtx_factor)," - "equal(flavor, \"m1.tiny\")" - } - - rule4 = { - "rule": "murano_pending_envs(env_id) :- " - "murano:objects(env_id, tenant_id, \"io.murano.Environment\")," - "murano:states(env_id, env_state)," - "equal(env_state, \"pending\")" - } - - rule5 = { - "rule": "murano_instances(env_id, instance_id) :- " - "murano:objects(env_id, tenant_id, \"io.murano.Environment\")," - "murano:objects(service_id, env_id, service_type)," - "murano:parent_types(service_id, \"io.murano.Object\")," - "murano:parent_types(service_id, \"io.murano.Application\")," - "murano:parent_types(service_id, service_type)," - "murano:objects(instance_id, service_id, instance_type)," - "murano:parent_types(instance_id," - "\"io.murano.resources.Instance\")," - "murano:parent_types(instance_id, \"io.murano.Object\")," - "murano:parent_types(instance_id, instance_type)" - } - - rule6 = { - "rule": "murano_instance_flavors(instance_id, flavor) :- " - "murano:properties(instance_id, \"flavor\", flavor)" - } - - rule7 = { - "rule": "predeploy_error(env_id) :- " - "murano_pending_envs(env_id)," - "murano_instances(env_id, instance_id)," - "murano_instance_flavors(instance_id, flavor)," - "not allowed_flavors(flavor)" - } - - sim_query1 = { - "query": "predeploy_error(env_id)", - "action_policy": "action", - "sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\"," - "\"io.murano.Environment\") murano:states+(\"env_uuid\", " - "\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", " - "\"service_type\") murano:parent_types+(\"service_uuid\", " - "\"io.murano.Object\") murano:parent_types+(\"service_uuid\", " - "\"io.murano.Application\") murano:parent_types+(\"service_uuid\"," - "\"service_type\") murano:objects+(\"instance_uuid\", " - "\"service_uuid\", \"service_type\") murano:objects+(\"" - "instance_uuid\", \"service_uuid\", \"instance_type\") " - "murano:parent_types+(\"instance_uuid\", " - "\"io.murano.resources.Instance\") murano:parent_types+(\"" - "instance_uuid\", \"io.murano.Object\") murano:parent_types+(\"" - "instance_uuid\", \"instance_type\") murano:properties+(\"" - "instance_uuid\", \"flavor\", \"m1.small\")" - } - - sim_query2 = { - "query": "predeploy_error(env_id)", - "action_policy": "action", - "sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\"," - "\"io.murano.Environment\") murano:states+(\"env_uuid\", " - "\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", " - "\"service_type\") murano:parent_types+(\"service_uuid\", " - "\"io.murano.Object\") murano:parent_types+(\"service_uuid\", " - "\"io.murano.Application\") murano:parent_types+(\"service_uuid\"," - "\"service_type\") murano:objects+(\"instance_uuid\", " - "\"service_uuid\", \"service_type\") murano:objects+(\"" - "instance_uuid\", \"service_uuid\", \"instance_type\") " - "murano:parent_types+(\"instance_uuid\", " - "\"io.murano.resources.Instance\") murano:parent_types+(\"" - "instance_uuid\", \"io.murano.Object\") murano:parent_types+(\"" - "instance_uuid\", \"instance_type\") murano:properties+(\"" - "instance_uuid\", \"flavor\", \"m1.large\")" - } - - _create_datasource() - policy_name = _create_random_policy() - _create_rule(policy_name, rule1) - - _create_rule(policy_name, rule2) - _create_rule(policy_name, rule3) - _create_rule(policy_name, rule4) - _create_rule(policy_name, rule5) - _create_rule(policy_name, rule6) - _create_rule(policy_name, rule7) - result = _simulate_policy(policy_name, sim_query1) - self.assertEmpty(result) - result = _simulate_policy(policy_name, sim_query2) - self.assertEqual('predeploy_error("env_uuid")', result[0]) - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('murano'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_neutronv2.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_neutronv2.py deleted file mode 100644 index 5eda4216..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_neutronv2.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -from tempest import clients -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import helper -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF - - -class TestNeutronV2Driver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestNeutronV2Driver, cls).skip_checks() - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - if not CONF.service_available.neutron: - skip_msg = ("%s skipped as neutron is not available" - % cls.__name__) - raise cls.skipException(skip_msg) - - def setUp(cls): - super(TestNeutronV2Driver, cls).setUp() - cls.os_primary = clients.Manager( - cls.os_admin.auth_provider.credentials) - cls.networks_client = cls.os_primary.networks_client - cls.subnets_client = cls.os_primary.subnets_client - cls.ports_client = cls.os_primary.ports_client - cls.security_groups_client = cls.os_primary.security_groups_client - cls.routers_client = cls.os_primary.routers_client - cls.datasource_id = manager_congress.get_datasource_id( - cls.os_admin.congress_client, 'neutronv2') - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_networks_table(self): - - @helper.retry_on_exception - def _check_data(): - networks = self.networks_client.list_networks() - network_map = {} - for network in networks['networks']: - network_map[network['id']] = network - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - network_schema = (client.show_datasource_table_schema( - self.datasource_id, 'networks')['columns']) - - results = (client.list_datasource_rows( - self.datasource_id, 'networks')) - for row in results['results']: - network_row = network_map[row['data'][0]] - for index in range(len(network_schema)): - if (str(row['data'][index]) != - str(network_row[network_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_ports_tables(self): - port_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'ports')['columns']) - - port_sec_binding_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'security_group_port_bindings')['columns']) - - fixed_ips_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'fixed_ips')['columns']) - - @helper.retry_on_exception - def _check_data(): - ports_from_neutron = self.ports_client.list_ports() - port_map = {} - for port in ports_from_neutron['ports']: - port_map[port['id']] = port - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - ports = (client.list_datasource_rows(self.datasource_id, 'ports')) - security_group_port_bindings = ( - client.list_datasource_rows( - self.datasource_id, 'security_group_port_bindings')) - fixed_ips = ( - client.list_datasource_rows(self.datasource_id, 'fixed_ips')) - - # Validate ports table - for row in ports['results']: - port_row = port_map[row['data'][0]] - for index in range(len(port_schema)): - if (str(row['data'][index]) != - str(port_row[port_schema[index]['name']])): - return False - - # validate security_group_port_bindings table - for row in security_group_port_bindings['results']: - port_row = port_map[row['data'][0]] - for index in range(len(port_sec_binding_schema)): - row_index = port_sec_binding_schema[index]['name'] - # Translate port_id -> id - if row_index == 'port_id': - if (str(row['data'][index]) != - str(port_row['id'])): - return False - elif row_index == 'security_group_id': - if (str(row['data'][index]) not in - port_row['security_groups']): - return False - - # validate fixed_ips - for row in fixed_ips['results']: - port_row = port_map[row['data'][0]] - for index in range(len(fixed_ips_schema)): - row_index = fixed_ips_schema[index]['name'] - if row_index in ['subnet_id', 'ip_address']: - if not port_row['fixed_ips']: - continue - for fixed_ip in port_row['fixed_ips']: - if row['data'][index] == fixed_ip[row_index]: - break - else: - # no subnet_id/ip_address match found - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_subnets_tables(self): - subnet_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'subnets')['columns']) - - host_routes_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'host_routes')['columns']) - - dns_nameservers_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'dns_nameservers')['columns']) - - allocation_pools_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'allocation_pools')['columns']) - - @helper.retry_on_exception - def _check_data(): - subnets_from_neutron = self.subnets_client.list_subnets() - subnet_map = {} - for subnet in subnets_from_neutron['subnets']: - subnet_map[subnet['id']] = subnet - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - subnets = ( - client.list_datasource_rows(self.datasource_id, 'subnets')) - host_routes = ( - client.list_datasource_rows(self.datasource_id, 'host_routes')) - dns_nameservers = ( - client.list_datasource_rows( - self.datasource_id, 'dns_nameservers')) - allocation_pools = ( - client.list_datasource_rows( - self.datasource_id, 'allocation_pools')) - # Validate subnets table - for row in subnets['results']: - subnet_row = subnet_map[row['data'][0]] - for index in range(len(subnet_schema)): - if (str(row['data'][index]) != - str(subnet_row[subnet_schema[index]['name']])): - return False - - # validate dns_nameservers - for row in dns_nameservers['results']: - subnet_row = subnet_map[row['data'][0]] - for index in range(len(dns_nameservers_schema)): - row_index = dns_nameservers_schema[index]['name'] - if row_index in ['dns_nameserver']: - if (row['data'][index] - not in subnet_row['dns_nameservers']): - return False - - # validate host_routes - for row in host_routes['results']: - subnet_row = subnet_map[row['data'][0]] - for index in range(len(host_routes_schema)): - row_index = host_routes_schema[index]['name'] - if row_index in ['destination', 'nexthop']: - if not subnet_row['host_routes']: - continue - for host_route in subnet_row['host_routes']: - if row['data'][index] == host_route[row_index]: - break - else: - # no destination/nexthop match found - return False - - # validate allocation_pools - for row in allocation_pools['results']: - subnet_row = subnet_map[row['data'][0]] - for index in range(len(allocation_pools_schema)): - row_index = allocation_pools_schema[index]['name'] - if row_index in ['start', 'end']: - if not subnet_row['allocation_pools']: - continue - for allocation_pool in subnet_row['allocation_pools']: - if (row['data'][index] == - allocation_pool[row_index]): - break - else: - # no destination/nexthop match found - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_routers_tables(self): - router_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'routers')['columns']) - - ext_gw_info_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'external_gateway_infos')['columns']) - - @helper.retry_on_exception - def _check_data(): - routers_from_neutron = self.routers_client.list_routers() - router_map = {} - for router in routers_from_neutron['routers']: - router_map[router['id']] = router - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - routers = ( - client.list_datasource_rows(self.datasource_id, 'routers')) - - ext_gw_info = ( - client.list_datasource_rows( - self.datasource_id, 'external_gateway_infos')) - - # Validate routers table - for row in routers['results']: - router_row = router_map[row['data'][0]] - for index in range(len(router_schema)): - if (str(row['data'][index]) != - str(router_row[router_schema[index]['name']])): - return False - - # validate external_gateway_infos - for row in ext_gw_info['results']: - router_ext_gw_info = ( - router_map[row['data'][0]]['external_gateway_info']) - # populate router_id - router_ext_gw_info['router_id'] = row['data'][0] - for index in range(len(ext_gw_info_schema)): - val = router_ext_gw_info[ext_gw_info_schema[index]['name']] - if (str(row['data'][index]) != str(val)): - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_security_groups_table(self): - sg_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'security_groups')['columns']) - - @helper.retry_on_exception - def _check_data(): - client = self.security_groups_client - security_groups_neutron = client.list_security_groups() - security_groups_map = {} - for security_group in security_groups_neutron['security_groups']: - security_groups_map[security_group['id']] = security_group - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - security_groups = ( - client.list_datasource_rows( - self.datasource_id, 'security_groups')) - - # Validate security_group table - for row in security_groups['results']: - sg_row = security_groups_map[row['data'][0]] - for index in range(len(sg_schema)): - if (str(row['data'][index]) != - str(sg_row[sg_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('network') - def test_neutronv2_security_group_rules_table(self): - sgrs_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'security_group_rules')['columns']) - - @helper.retry_on_exception - def _check_data(): - client = self.security_groups_client - security_groups_neutron = client.list_security_groups() - sgrs_map = {} # security_group_rules - for sg in security_groups_neutron['security_groups']: - for sgr in sg['security_group_rules']: - sgrs_map[sgr['id']] = sgr - - client = self.os_admin.congress_client - client.request_refresh(self.datasource_id) - time.sleep(1) - - security_group_rules = ( - client.list_datasource_rows( - self.datasource_id, 'security_group_rules')) - - # Validate security_group_rules table - for row in security_group_rules['results']: - sg_rule_row = sgrs_map[row['data'][1]] - for index in range(len(sgrs_schema)): - if (str(row['data'][index]) != - str(sg_rule_row[sgrs_schema[index]['name']])): - return False - return True - - if not test_utils.call_until_true(func=_check_data, - duration=200, sleep_for=10): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('neutronv2'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_nova.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_nova.py deleted file mode 100644 index aca212c6..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_nova.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import helper -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF - - -class TestNovaDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestNovaDriver, cls).skip_checks() - if not CONF.service_available.nova: - skip_msg = ("%s skipped as nova is not available" % cls.__name__) - raise cls.skipException(skip_msg) - - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def setUp(self): - super(TestNovaDriver, self).setUp() - self.keypairs = {} - self.servers = [] - self.datasource_id = manager_congress.get_datasource_id( - self.os_admin.congress_client, 'nova') - - @decorators.attr(type='smoke') - @test.services('compute', 'network') - def test_nova_datasource_driver_servers(self): - self._setup_network_and_servers() - - server_schema = ( - self.os_admin.congress_client.show_datasource_table_schema( - self.datasource_id, 'servers')['columns']) - # Convert some of the column names. - - def convert_col(col): - if col == 'host_id': - return 'hostId' - elif col == 'image_id': - return 'image' - elif col == 'flavor_id': - return 'flavor' - elif col == 'zone': - return 'OS-EXT-AZ:availability_zone' - elif col == 'host_name': - return 'OS-EXT-SRV-ATTR:hypervisor_hostname' - else: - return col - - keys = [convert_col(c['name']) for c in server_schema] - - @helper.retry_on_exception - def _check_data_table_nova_servers(): - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'servers')) - for row in results['results']: - match = True - for index in range(len(keys)): - if keys[index] in ['image', 'flavor']: - val = self.servers[0][keys[index]]['id'] - # Test servers created doesn't have this attribute, - # so ignoring the same in tempest tests. - elif keys[index] in \ - ['OS-EXT-SRV-ATTR:hypervisor_hostname']: - continue - else: - val = self.servers[0][keys[index]] - - if row['data'][index] != val: - match = False - break - if match: - return True - return False - - if not test_utils.call_until_true(func=_check_data_table_nova_servers, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - @test.services('compute', 'network') - def test_nova_datasource_driver_flavors(self): - - @helper.retry_on_exception - def _check_data_table_nova_flavors(): - # Fetch data from nova each time, because this test may start - # before nova has all the users. - flavors = self.flavors_client.list_flavors(detail=True) - flavor_id_map = {} - for flavor in flavors['flavors']: - flavor_id_map[flavor['id']] = flavor - - results = ( - self.os_admin.congress_client.list_datasource_rows( - self.datasource_id, 'flavors')) - # TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA: - # prefix is for. - keys = ['id', 'name', 'vcpus', 'ram', 'disk', - 'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor'] - for row in results['results']: - match = True - try: - flavor_row = flavor_id_map[row['data'][0]] - except KeyError: - return False - for index in range(len(keys)): - if row['data'][index] != flavor_row[keys[index]]: - match = False - break - if match: - return True - return False - - if not test_utils.call_until_true(func=_check_data_table_nova_flavors, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('nova'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_datasources/test_swift.py b/congress_tempest_tests/tests/scenario/congress_datasources/test_swift.py deleted file mode 100644 index d2aaa1ad..00000000 --- a/congress_tempest_tests/tests/scenario/congress_datasources/test_swift.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2017 VMware Inc. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from congress_tempest_tests.tests.scenario import manager_congress - - -CONF = config.CONF - - -class TestSwiftDriver(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestSwiftDriver, cls).skip_checks() - if not CONF.service_available.swift: - msg = ("%s skipped because swift service is not configured" % - cls.__class__.__name__) - raise cls.skipException(msg) - - # TODO(testing): checks on correctness of data in updates - - # swift driver experiences auth error in test - @decorators.skip_because(bug="980688") - @decorators.attr(type='smoke') - def test_update_no_error(self): - if not test_utils.call_until_true( - func=lambda: self.check_datasource_no_error('swift'), - duration=30, sleep_for=5): - raise exceptions.TimeoutException('Datasource could not poll ' - 'without error.') diff --git a/congress_tempest_tests/tests/scenario/congress_ha/__init__.py b/congress_tempest_tests/tests/scenario/congress_ha/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/congress_tempest_tests/tests/scenario/congress_ha/test_ha.py b/congress_tempest_tests/tests/scenario/congress_ha/test_ha.py deleted file mode 100644 index 9fd321ed..00000000 --- a/congress_tempest_tests/tests/scenario/congress_ha/test_ha.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import socket -import subprocess -import tempfile - -from oslo_log import log as logging -from tempest.common import credentials_factory as credentials -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import manager as tempestmanager -from urllib3 import exceptions as urllib3_exceptions - -from congress_tempest_tests.services.policy import policy_client -from congress_tempest_tests.tests.scenario import helper -from congress_tempest_tests.tests.scenario import manager_congress - -CONF = config.CONF -LOG = logging.getLogger(__name__) - - -class TestHA(manager_congress.ScenarioPolicyBase): - - def setUp(self): - super(TestHA, self).setUp() - self.keypairs = {} - self.servers = [] - self.replicas = {} - self.services_client = self.os_admin.identity_services_v3_client - self.endpoints_client = self.os_admin.endpoints_v3_client - self.client = self.admin_manager.congress_client - - def _prepare_replica(self, port_num): - replica_url = "http://127.0.0.1:%d" % port_num - resp = self.services_client.create_service( - name='congressha', - type=CONF.congressha.replica_type, - description='policy ha service') - self.replica_service_id = resp['service']['id'] - resp = self.endpoints_client.create_endpoint( - service_id=self.replica_service_id, - region=CONF.identity.region, - interface='public', - url=replica_url) - self.replica_endpoint_id = resp['endpoint']['id'] - - def _cleanup_replica(self): - self.endpoints_client.delete_endpoint(self.replica_endpoint_id) - self.services_client.delete_service(self.replica_service_id) - - def start_replica(self, port_num): - self._prepare_replica(port_num) - f = tempfile.NamedTemporaryFile(mode='w', suffix='.conf', - prefix='congress%d-' % port_num, - dir='/tmp', delete=False) - conf_file = f.name - template = open('/etc/congress/congress.conf') - conf = template.read() - - # Add 'bind_port' and 'datasource_sync_period' to conf file. - index = conf.find('[DEFAULT]') + len('[DEFAULT]\n') - conf = (conf[:index] + - 'bind_port = %d\n' % port_num + - conf[index:]) - # set datasource sync period interval to 5 - conf = conf.replace('datasource_sync_period = 30', - 'datasource_sync_period = 5') - sindex = conf.find('signing_dir') - conf = conf[:sindex] + '#' + conf[sindex:] - conf = conf + '\n[dse]\nbus_id = replica-node\n' - LOG.debug("Configuration file for replica: %s\n", conf) - f.write(conf) - f.close() - - # start all services on replica node - api = self.start_service('api', conf_file) - pe = self.start_service('policy-engine', conf_file) - data = self.start_service('datasources', conf_file) - - assert port_num not in self.replicas - LOG.debug("successfully started replica services\n") - self.replicas[port_num] = ([api, pe, data], conf_file) - - def start_service(self, name, conf_file): - port_num = CONF.congressha.replica_port - out = tempfile.NamedTemporaryFile( - mode='w', suffix='.out', - prefix='congress-%s-%d-' % (name, port_num), - dir='/tmp', delete=False) - - err = tempfile.NamedTemporaryFile( - mode='w', suffix='.err', - prefix='congress-%s-%d-' % (name, port_num), - dir='/tmp', delete=False) - - service = '--' + name - node = name + '-replica-node' - args = ['/usr/bin/python', 'bin/congress-server', service, - '--node-id', node, '--config-file', conf_file] - - p = subprocess.Popen(args, stdout=out, stderr=err, - cwd=helper.root_path()) - return p - - def stop_replica(self, port_num): - procs, conf_file = self.replicas[port_num] - # Using proc.terminate() will block at proc.wait(), no idea why yet - # kill all processes - for p in procs: - p.kill() - p.wait() - - os.unlink(conf_file) - self.replicas[port_num] = (None, conf_file) - self._cleanup_replica() - - def create_client(self, client_type): - creds = credentials.get_configured_admin_credentials('identity_admin') - auth_prov = tempestmanager.get_auth_provider(creds) - return policy_client.PolicyClient( - auth_prov, client_type, - CONF.identity.region) - - def _check_replica_server_status(self, client): - try: - LOG.debug("Check replica server status") - client.list_policy() - LOG.debug("replica server ready") - return True - except exceptions.Unauthorized: - LOG.debug("connection refused") - return False - except (socket.error, urllib3_exceptions.MaxRetryError): - LOG.debug("Replica server not ready") - return False - except Exception: - raise - return False - - def find_fake(self, client): - datasources = client.list_datasources() - for r in datasources['results']: - if r['name'] == 'fake': - LOG.debug('existing fake driver: %s', str(r['id'])) - return r['id'] - return None - - def _check_resource_exists(self, client, resource): - try: - body = None - if resource == 'datasource': - LOG.debug("Check datasource exists") - body = self.client.list_datasource_status('fake') - else: - LOG.debug("Check policy exists") - body = self.client.list_policy_status('fake') - - LOG.debug("resource status: %s", str(body)) - - except exceptions.NotFound: - LOG.debug("resource 'fake' not found") - return False - return True - - def _check_resource_missing(self, client, resource): - return not self._check_resource_exists(client, resource) - - def create_fake(self, client): - # Create fake datasource if it does not exist. Returns the - # fake datasource id. - fake_id = self.find_fake(client) - if fake_id: - return fake_id - - item = {'id': None, - 'name': 'fake', - 'driver': 'fake_datasource', - 'config': {"username": "fakeu", - "tenant_name": "faket", - "password": "fakep", - "auth_url": "http://127.0.0.1:5000/v2"}, - 'description': 'bar', - 'enabled': True} - ret = client.create_datasource(item) - LOG.debug('created fake driver: %s', str(ret['id'])) - return ret['id'] - - @decorators.attr(type='smoke') - def test_datasource_db_sync_add_remove(self): - # Verify that a replica adds a datasource when a datasource - # appears in the database. - replica_server = False - try: - # Check fake if exists. else create - fake_id = self.create_fake(self.client) - - # Start replica - self.start_replica(CONF.congressha.replica_port) - - replica_client = self.create_client(CONF.congressha.replica_type) - - # Check replica server status - if not test_utils.call_until_true( - func=lambda: self._check_replica_server_status( - replica_client), - duration=60, sleep_for=1): - raise exceptions.TimeoutException("Replica Server not ready") - # Relica server is up - replica_server = True - - # primary server might sync later than replica server due to - # diff in datasource sync interval(P-30, replica-5). So checking - # replica first - - # Verify that replica server synced fake dataservice and policy - if not test_utils.call_until_true( - func=lambda: self._check_resource_exists( - replica_client, 'datasource'), - duration=60, sleep_for=1): - raise exceptions.TimeoutException( - "replica doesn't have fake dataservice, data sync failed") - if not test_utils.call_until_true( - func=lambda: self._check_resource_exists( - replica_client, 'policy'), - duration=60, sleep_for=1): - raise exceptions.TimeoutException( - "replica doesn't have fake policy, policy sync failed") - - # Verify that primary server synced fake dataservice and policy - if not test_utils.call_until_true( - func=lambda: self._check_resource_exists( - self.client, 'datasource'), - duration=90, sleep_for=1): - raise exceptions.TimeoutException( - "primary doesn't have fake dataservice, data sync failed") - if not test_utils.call_until_true( - func=lambda: self._check_resource_exists( - self.client, 'policy'), - duration=90, sleep_for=1): - raise exceptions.TimeoutException( - "primary doesn't have fake policy, policy sync failed") - - # Remove fake from primary server instance. - LOG.debug("removing fake datasource %s", str(fake_id)) - self.client.delete_datasource(fake_id) - - # Verify that replica server has no fake datasource and fake policy - if not test_utils.call_until_true( - func=lambda: self._check_resource_missing( - replica_client, 'datasource'), - duration=60, sleep_for=1): - raise exceptions.TimeoutException( - "replica still has fake dataservice, sync failed") - if not test_utils.call_until_true( - func=lambda: self._check_resource_missing( - replica_client, 'policy'), - duration=60, sleep_for=1): - raise exceptions.TimeoutException( - "replica still fake policy, policy synchronizer failed") - - LOG.debug("removed fake datasource from replica instance") - - # Verify that primary server has no fake datasource and fake policy - if not test_utils.call_until_true( - func=lambda: self._check_resource_missing( - self.client, 'datasource'), - duration=90, sleep_for=1): - raise exceptions.TimeoutException( - "primary still has fake dataservice, sync failed") - if not test_utils.call_until_true( - func=lambda: self._check_resource_missing( - self.client, 'policy'), - duration=90, sleep_for=1): - raise exceptions.TimeoutException( - "primary still fake policy, policy synchronizer failed") - - LOG.debug("removed fake datasource from primary instance") - - finally: - if replica_server: - self.stop_replica(CONF.congressha.replica_port) diff --git a/congress_tempest_tests/tests/scenario/helper.py b/congress_tempest_tests/tests/scenario/helper.py deleted file mode 100644 index d99a9fb0..00000000 --- a/congress_tempest_tests/tests/scenario/helper.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2015 Hewlett Packard. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os - -import tenacity - - -@tenacity.retry(stop=tenacity.stop_after_attempt(20), - wait=tenacity.wait_fixed(1)) -def retry_check_function_return_value(f, expected_value, error_msg=None): - """Check if function f returns expected value.""" - if not error_msg: - error_msg = 'Expected value "%s" not found' % expected_value - r = f() - if r != expected_value: - raise Exception(error_msg) - - -def retry_on_exception(f): - """Decorator to retry on an exception.""" - def wrapper(): - try: - return f() - except Exception: - return False - return wrapper - - -def root_path(): - """Return path to root of source code.""" - x = os.path.realpath(__file__) - x, y = os.path.split(x) # drop "helper.py" - x, y = os.path.split(x) # drop "scenario" - x, y = os.path.split(x) # drop "tests" - x, y = os.path.split(x) # drop "congress_tempest_tests" - return x diff --git a/congress_tempest_tests/tests/scenario/manager.py b/congress_tempest_tests/tests/scenario/manager.py deleted file mode 100644 index 9b033534..00000000 --- a/congress_tempest_tests/tests/scenario/manager.py +++ /dev/null @@ -1,968 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# local congress copy of upstream file. Maintained temporarily while -# file undergoes refactoring upstream. - -import subprocess - -import netaddr -from oslo_log import log -from oslo_utils import netutils - -from tempest.common import compute -from tempest.common import image as common_image -from tempest.common.utils.linux import remote_client -from tempest.common.utils import net_utils -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest.lib import exceptions as lib_exc -import tempest.test - -CONF = config.CONF - -LOG = log.getLogger(__name__) - - -class ScenarioTest(tempest.test.BaseTestCase): - """Base class for scenario tests. Uses tempest own clients. """ - - credentials = ['primary'] - - @classmethod - def setup_clients(cls): - super(ScenarioTest, cls).setup_clients() - # Clients (in alphabetical order) - cls.flavors_client = cls.os_primary.flavors_client - cls.compute_floating_ips_client = ( - cls.os_primary.compute_floating_ips_client) - if CONF.service_available.glance: - # Check if glance v1 is available to determine which client to use. - if CONF.image_feature_enabled.api_v1: - cls.image_client = cls.os_primary.image_client - elif CONF.image_feature_enabled.api_v2: - cls.image_client = cls.os_primary.image_client_v2 - else: - raise lib_exc.InvalidConfiguration( - 'Either api_v1 or api_v2 must be True in ' - '[image-feature-enabled].') - # Compute image client - cls.compute_images_client = cls.os_primary.compute_images_client - cls.keypairs_client = cls.os_primary.keypairs_client - # Nova security groups client - cls.compute_security_groups_client = ( - cls.os_primary.compute_security_groups_client) - cls.compute_security_group_rules_client = ( - cls.os_primary.compute_security_group_rules_client) - cls.servers_client = cls.os_primary.servers_client - cls.interface_client = cls.os_primary.interfaces_client - # Neutron network client - cls.networks_client = cls.os_primary.networks_client - cls.ports_client = cls.os_primary.ports_client - cls.routers_client = cls.os_primary.routers_client - cls.subnets_client = cls.os_primary.subnets_client - cls.floating_ips_client = cls.os_primary.floating_ips_client - cls.security_groups_client = cls.os_primary.security_groups_client - cls.security_group_rules_client = ( - cls.os_primary.security_group_rules_client) - - if CONF.volume_feature_enabled.api_v2: - cls.volumes_client = cls.os_primary.volumes_v2_client - cls.snapshots_client = cls.os_primary.snapshots_v2_client - else: - cls.volumes_client = cls.os_primary.volumes_client - cls.snapshots_client = cls.os_primary.snapshots_client - - # ## Test functions library - # - # The create_[resource] functions only return body and discard the - # resp part which is not used in scenario tests - - def _create_port(self, network_id, client=None, namestart='port-quotatest', - **kwargs): - if not client: - client = self.ports_client - name = data_utils.rand_name(namestart) - result = client.create_port( - name=name, - network_id=network_id, - **kwargs) - self.assertIsNotNone(result, 'Unable to allocate port') - port = result['port'] - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_port, port['id']) - return port - - def create_keypair(self, client=None): - if not client: - client = self.keypairs_client - name = data_utils.rand_name(self.__class__.__name__) - # We don't need to create a keypair by pubkey in scenario - body = client.create_keypair(name=name) - self.addCleanup(client.delete_keypair, name) - return body['keypair'] - - def create_server(self, name=None, image_id=None, flavor=None, - validatable=False, wait_until='ACTIVE', - clients=None, **kwargs): - """Wrapper utility that returns a test server. - - This wrapper utility calls the common create test server and - returns a test server. The purpose of this wrapper is to minimize - the impact on the code of the tests already using this - function. - """ - - # NOTE(jlanoux): As a first step, ssh checks in the scenario - # tests need to be run regardless of the run_validation and - # validatable parameters and thus until the ssh validation job - # becomes voting in CI. The test resources management and IP - # association are taken care of in the scenario tests. - # Therefore, the validatable parameter is set to false in all - # those tests. In this way create_server just return a standard - # server and the scenario tests always perform ssh checks. - - # Needed for the cross_tenant_traffic test: - if clients is None: - clients = self.os_primary - - if name is None: - name = data_utils.rand_name(self.__class__.__name__ + "-server") - - vnic_type = CONF.network.port_vnic_type - - # If vnic_type is configured create port for - # every network - if vnic_type: - ports = [] - - create_port_body = {'binding:vnic_type': vnic_type, - 'namestart': 'port-smoke'} - if kwargs: - # Convert security group names to security group ids - # to pass to create_port - if 'security_groups' in kwargs: - security_groups = \ - clients.security_groups_client.list_security_groups( - ).get('security_groups') - sec_dict = dict([(s['name'], s['id']) - for s in security_groups]) - - sec_groups_names = [s['name'] for s in kwargs.pop( - 'security_groups')] - security_groups_ids = [sec_dict[s] - for s in sec_groups_names] - - if security_groups_ids: - create_port_body[ - 'security_groups'] = security_groups_ids - networks = kwargs.pop('networks', []) - else: - networks = [] - - # If there are no networks passed to us we look up - # for the project's private networks and create a port. - # The same behaviour as we would expect when passing - # the call to the clients with no networks - if not networks: - networks = clients.networks_client.list_networks( - **{'router:external': False, 'fields': 'id'})['networks'] - - # It's net['uuid'] if networks come from kwargs - # and net['id'] if they come from - # clients.networks_client.list_networks - for net in networks: - net_id = net.get('uuid', net.get('id')) - if 'port' not in net: - port = self._create_port(network_id=net_id, - client=clients.ports_client, - **create_port_body) - ports.append({'port': port['id']}) - else: - ports.append({'port': net['port']}) - if ports: - kwargs['networks'] = ports - self.ports = ports - - tenant_network = self.get_tenant_network() - - body, servers = compute.create_test_server( - clients, - tenant_network=tenant_network, - wait_until=wait_until, - name=name, flavor=flavor, - image_id=image_id, **kwargs) - - self.addCleanup(waiters.wait_for_server_termination, - clients.servers_client, body['id']) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - clients.servers_client.delete_server, body['id']) - server = clients.servers_client.show_server(body['id'])['server'] - return server - - def create_volume(self, size=None, name=None, snapshot_id=None, - imageRef=None, volume_type=None): - if size is None: - size = CONF.volume.volume_size - if imageRef: - image = self.compute_images_client.show_image(imageRef)['image'] - min_disk = image.get('minDisk') - size = max(size, min_disk) - if name is None: - name = data_utils.rand_name(self.__class__.__name__ + "-volume") - kwargs = {'display_name': name, - 'snapshot_id': snapshot_id, - 'imageRef': imageRef, - 'volume_type': volume_type, - 'size': size} - volume = self.volumes_client.create_volume(**kwargs)['volume'] - - self.addCleanup(self.volumes_client.wait_for_resource_deletion, - volume['id']) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - self.volumes_client.delete_volume, volume['id']) - - # NOTE(e0ne): Cinder API v2 uses name instead of display_name - if 'display_name' in volume: - self.assertEqual(name, volume['display_name']) - else: - self.assertEqual(name, volume['name']) - waiters.wait_for_volume_resource_status(self.volumes_client, - volume['id'], 'available') - # The volume retrieved on creation has a non-up-to-date status. - # Retrieval after it becomes active ensures correct details. - volume = self.volumes_client.show_volume(volume['id'])['volume'] - return volume - - def _create_loginable_secgroup_rule(self, secgroup_id=None): - _client = self.compute_security_groups_client - _client_rules = self.compute_security_group_rules_client - if secgroup_id is None: - sgs = _client.list_security_groups()['security_groups'] - for sg in sgs: - if sg['name'] == 'default': - secgroup_id = sg['id'] - - # These rules are intended to permit inbound ssh and icmp - # traffic from all sources, so no group_id is provided. - # Setting a group_id would only permit traffic from ports - # belonging to the same security group. - rulesets = [ - { - # ssh - 'ip_protocol': 'tcp', - 'from_port': 22, - 'to_port': 22, - 'cidr': '0.0.0.0/0', - }, - { - # ping - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '0.0.0.0/0', - } - ] - rules = list() - for ruleset in rulesets: - sg_rule = _client_rules.create_security_group_rule( - parent_group_id=secgroup_id, **ruleset)['security_group_rule'] - rules.append(sg_rule) - return rules - - def _create_security_group(self): - # Create security group - sg_name = data_utils.rand_name(self.__class__.__name__) - sg_desc = sg_name + " description" - secgroup = self.compute_security_groups_client.create_security_group( - name=sg_name, description=sg_desc)['security_group'] - self.assertEqual(secgroup['name'], sg_name) - self.assertEqual(secgroup['description'], sg_desc) - self.addCleanup( - test_utils.call_and_ignore_notfound_exc, - self.compute_security_groups_client.delete_security_group, - secgroup['id']) - - # Add rules to the security group - self._create_loginable_secgroup_rule(secgroup['id']) - - return secgroup - - def get_remote_client(self, ip_address, username=None, private_key=None): - """Get a SSH client to a remote server - - @param ip_address the server floating or fixed IP address to use - for ssh validation - @param username name of the Linux account on the remote server - @param private_key the SSH private key to use - @return a RemoteClient object - """ - - if username is None: - username = CONF.validation.image_ssh_user - # Set this with 'keypair' or others to log in with keypair or - # username/password. - if CONF.validation.auth_method == 'keypair': - password = None - if private_key is None: - private_key = self.keypair['private_key'] - else: - password = CONF.validation.image_ssh_password - private_key = None - linux_client = remote_client.RemoteClient(ip_address, username, - pkey=private_key, - password=password) - try: - linux_client.validate_authentication() - except Exception as e: - message = ('Initializing SSH connection to %(ip)s failed. ' - 'Error: %(error)s' % {'ip': ip_address, - 'error': e}) - caller = test_utils.find_test_caller() - if caller: - message = '(%s) %s' % (caller, message) - LOG.exception(message) - self._log_console_output() - raise - - return linux_client - - def _image_create(self, name, fmt, path, - disk_format=None, properties=None): - if properties is None: - properties = {} - name = data_utils.rand_name('%s-' % name) - params = { - 'name': name, - 'container_format': fmt, - 'disk_format': disk_format or fmt, - } - if CONF.image_feature_enabled.api_v1: - params['is_public'] = 'False' - params['properties'] = properties - params = {'headers': common_image.image_meta_to_headers(**params)} - else: - params['visibility'] = 'private' - # Additional properties are flattened out in the v2 API. - params.update(properties) - body = self.image_client.create_image(**params) - image = body['image'] if 'image' in body else body - self.addCleanup(self.image_client.delete_image, image['id']) - self.assertEqual("queued", image['status']) - with open(path, 'rb') as image_file: - if CONF.image_feature_enabled.api_v1: - self.image_client.update_image(image['id'], data=image_file) - else: - self.image_client.store_image_file(image['id'], image_file) - return image['id'] - - def _log_console_output(self, servers=None): - if not CONF.compute_feature_enabled.console_output: - LOG.debug('Console output not supported, cannot log') - return - if not servers: - servers = self.servers_client.list_servers() - servers = servers['servers'] - for server in servers: - try: - console_output = self.servers_client.get_console_output( - server['id'])['output'] - LOG.debug('Console output for %s\nbody=\n%s', - server['id'], console_output) - except lib_exc.NotFound: - LOG.debug("Server %s disappeared(deleted) while looking " - "for the console log", server['id']) - - def _log_net_info(self, exc): - # network debug is called as part of ssh init - if not isinstance(exc, lib_exc.SSHTimeout): - LOG.debug('Network information on a devstack host') - - def ping_ip_address(self, ip_address, should_succeed=True, - ping_timeout=None, mtu=None): - timeout = ping_timeout or CONF.validation.ping_timeout - cmd = ['ping', '-c1', '-w1'] - - if mtu: - cmd += [ - # don't fragment - '-M', 'do', - # ping receives just the size of ICMP payload - '-s', str(net_utils.get_ping_payload_size(mtu, 4)) - ] - cmd.append(ip_address) - - def ping(): - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - proc.communicate() - - return (proc.returncode == 0) == should_succeed - - caller = test_utils.find_test_caller() - LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the' - ' expected result is %(should_succeed)s', { - 'caller': caller, 'ip': ip_address, 'timeout': timeout, - 'should_succeed': - 'reachable' if should_succeed else 'unreachable' - }) - result = test_utils.call_until_true(ping, timeout, 1) - LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the ' - 'ping result is %(result)s', { - 'caller': caller, 'ip': ip_address, 'timeout': timeout, - 'result': 'expected' if result else 'unexpected' - }) - return result - - def check_vm_connectivity(self, ip_address, - username=None, - private_key=None, - should_connect=True, - mtu=None): - """Check server connectivity - - :param ip_address: server to test against - :param username: server's ssh username - :param private_key: server's ssh private key to be used - :param should_connect: True/False indicates positive/negative test - positive - attempt ping and ssh - negative - attempt ping and fail if succeed - :param mtu: network MTU to use for connectivity validation - - :raises: AssertError if the result of the connectivity check does - not match the value of the should_connect param - """ - if should_connect: - msg = "Timed out waiting for %s to become reachable" % ip_address - else: - msg = "ip address %s is reachable" % ip_address - self.assertTrue(self.ping_ip_address(ip_address, - should_succeed=should_connect, - mtu=mtu), - msg=msg) - if should_connect: - # no need to check ssh for negative connectivity - self.get_remote_client(ip_address, username, private_key) - - -class NetworkScenarioTest(ScenarioTest): - """Base class for network scenario tests. - - This class provide helpers for network scenario tests, using the neutron - API. Helpers from ancestor which use the nova network API are overridden - with the neutron API. - - This Class also enforces using Neutron instead of novanetwork. - Subclassed tests will be skipped if Neutron is not enabled - - """ - - credentials = ['primary', 'admin'] - - @classmethod - def skip_checks(cls): - super(NetworkScenarioTest, cls).skip_checks() - if not CONF.service_available.neutron: - raise cls.skipException('Neutron not available') - - def _create_network(self, networks_client=None, - tenant_id=None, - namestart='network-smoke-', - port_security_enabled=True): - if not networks_client: - networks_client = self.networks_client - if not tenant_id: - tenant_id = networks_client.tenant_id - name = data_utils.rand_name(namestart) - network_kwargs = dict(name=name, tenant_id=tenant_id) - # Neutron disables port security by default so we have to check the - # config before trying to create the network with port_security_enabled - if CONF.network_feature_enabled.port_security: - network_kwargs['port_security_enabled'] = port_security_enabled - result = networks_client.create_network(**network_kwargs) - network = result['network'] - - self.assertEqual(network['name'], name) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - networks_client.delete_network, - network['id']) - return network - - def _create_subnet(self, network, subnets_client=None, - routers_client=None, namestart='subnet-smoke', - **kwargs): - """Create a subnet for the given network - - within the cidr block configured for tenant networks. - """ - if not subnets_client: - subnets_client = self.subnets_client - if not routers_client: - routers_client = self.routers_client - - def cidr_in_use(cidr, tenant_id): - """Check cidr existence - - :returns: True if subnet with cidr already exist in tenant - False else - """ - cidr_in_use = self.os_admin.subnets_client.list_subnets( - tenant_id=tenant_id, cidr=cidr)['subnets'] - return len(cidr_in_use) != 0 - - ip_version = kwargs.pop('ip_version', 4) - - if ip_version == 6: - tenant_cidr = netaddr.IPNetwork( - CONF.network.project_network_v6_cidr) - num_bits = CONF.network.project_network_v6_mask_bits - else: - tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr) - num_bits = CONF.network.project_network_mask_bits - - result = None - str_cidr = None - # Repeatedly attempt subnet creation with sequential cidr - # blocks until an unallocated block is found. - for subnet_cidr in tenant_cidr.subnet(num_bits): - str_cidr = str(subnet_cidr) - if cidr_in_use(str_cidr, tenant_id=network['tenant_id']): - continue - - subnet = dict( - name=data_utils.rand_name(namestart), - network_id=network['id'], - tenant_id=network['tenant_id'], - cidr=str_cidr, - ip_version=ip_version, - **kwargs - ) - try: - result = subnets_client.create_subnet(**subnet) - break - except lib_exc.Conflict as e: - is_overlapping_cidr = 'overlaps with another subnet' in str(e) - if not is_overlapping_cidr: - raise - self.assertIsNotNone(result, 'Unable to allocate tenant network') - - subnet = result['subnet'] - self.assertEqual(subnet['cidr'], str_cidr) - - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - subnets_client.delete_subnet, subnet['id']) - - return subnet - - def _get_server_port_id_and_ip4(self, server, ip_addr=None): - ports = self.os_admin.ports_client.list_ports( - device_id=server['id'], fixed_ip=ip_addr)['ports'] - # A port can have more than one IP address in some cases. - # If the network is dual-stack (IPv4 + IPv6), this port is associated - # with 2 subnets - p_status = ['ACTIVE'] - # NOTE(vsaienko) With Ironic, instances live on separate hardware - # servers. Neutron does not bind ports for Ironic instances, as a - # result the port remains in the DOWN state. - # TODO(vsaienko) remove once bug: #1599836 is resolved. - if getattr(CONF.service_available, 'ironic', False): - p_status.append('DOWN') - port_map = [(p["id"], fxip["ip_address"]) - for p in ports - for fxip in p["fixed_ips"] - if netutils.is_valid_ipv4(fxip["ip_address"]) - and p['status'] in p_status] - inactive = [p for p in ports if p['status'] != 'ACTIVE'] - if inactive: - LOG.warning("Instance has ports that are not ACTIVE: %s", inactive) - - self.assertNotEqual(0, len(port_map), - "No IPv4 addresses found in: %s" % ports) - self.assertEqual(len(port_map), 1, - "Found multiple IPv4 addresses: %s. " - "Unable to determine which port to target." - % port_map) - return port_map[0] - - def _get_network_by_name(self, network_name): - net = self.os_admin.networks_client.list_networks( - name=network_name)['networks'] - self.assertNotEqual(len(net), 0, - "Unable to get network by name: %s" % network_name) - return net[0] - - def create_floating_ip(self, thing, external_network_id=None, - port_id=None, client=None): - """Create a floating IP and associates to a resource/port on Neutron""" - if not external_network_id: - external_network_id = CONF.network.public_network_id - if not client: - client = self.floating_ips_client - if not port_id: - port_id, ip4 = self._get_server_port_id_and_ip4(thing) - else: - ip4 = None - result = client.create_floatingip( - floating_network_id=external_network_id, - port_id=port_id, - tenant_id=thing['tenant_id'], - fixed_ip_address=ip4 - ) - floating_ip = result['floatingip'] - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_floatingip, - floating_ip['id']) - return floating_ip - - def _associate_floating_ip(self, floating_ip, server): - port_id, _ = self._get_server_port_id_and_ip4(server) - kwargs = dict(port_id=port_id) - floating_ip = self.floating_ips_client.update_floatingip( - floating_ip['id'], **kwargs)['floatingip'] - self.assertEqual(port_id, floating_ip['port_id']) - return floating_ip - - def _disassociate_floating_ip(self, floating_ip): - """:param floating_ip: floating_ips_client.create_floatingip""" - kwargs = dict(port_id=None) - floating_ip = self.floating_ips_client.update_floatingip( - floating_ip['id'], **kwargs)['floatingip'] - self.assertIsNone(floating_ip['port_id']) - return floating_ip - - def _check_tenant_network_connectivity(self, server, - username, - private_key, - should_connect=True, - servers_for_debug=None): - if not CONF.network.project_networks_reachable: - msg = 'Tenant networks not configured to be reachable.' - LOG.info(msg) - return - # The target login is assumed to have been configured for - # key-based authentication by cloud-init. - try: - for net_name, ip_addresses in server['addresses'].items(): - for ip_address in ip_addresses: - self.check_vm_connectivity(ip_address['addr'], - username, - private_key, - should_connect=should_connect) - except Exception as e: - LOG.exception('Tenant network connectivity check failed') - self._log_console_output(servers_for_debug) - self._log_net_info(e) - raise - - def _check_remote_connectivity(self, source, dest, should_succeed=True, - nic=None): - """check ping server via source ssh connection - - :param source: RemoteClient: an ssh connection from which to ping - :param dest: and IP to ping against - :param should_succeed: boolean should ping succeed or not - :param nic: specific network interface to ping from - :returns: boolean -- should_succeed == ping - :returns: ping is false if ping failed - """ - def ping_remote(): - try: - source.ping_host(dest, nic=nic) - except lib_exc.SSHExecCommandFailed: - LOG.warning('Failed to ping IP: %s via a ssh connection ' - 'from: %s.', dest, source.ssh_client.host) - return not should_succeed - return should_succeed - - return test_utils.call_until_true(ping_remote, - CONF.validation.ping_timeout, - 1) - - def _create_security_group(self, security_group_rules_client=None, - tenant_id=None, - namestart='secgroup-smoke', - security_groups_client=None): - if security_group_rules_client is None: - security_group_rules_client = self.security_group_rules_client - if security_groups_client is None: - security_groups_client = self.security_groups_client - if tenant_id is None: - tenant_id = security_groups_client.tenant_id - secgroup = self._create_empty_security_group( - namestart=namestart, client=security_groups_client, - tenant_id=tenant_id) - - # Add rules to the security group - rules = self._create_loginable_secgroup_rule( - security_group_rules_client=security_group_rules_client, - secgroup=secgroup, - security_groups_client=security_groups_client) - for rule in rules: - self.assertEqual(tenant_id, rule['tenant_id']) - self.assertEqual(secgroup['id'], rule['security_group_id']) - return secgroup - - def _create_empty_security_group(self, client=None, tenant_id=None, - namestart='secgroup-smoke'): - """Create a security group without rules. - - Default rules will be created: - - IPv4 egress to any - - IPv6 egress to any - - :param tenant_id: secgroup will be created in this tenant - :returns: the created security group - """ - if client is None: - client = self.security_groups_client - if not tenant_id: - tenant_id = client.tenant_id - sg_name = data_utils.rand_name(namestart) - sg_desc = sg_name + " description" - sg_dict = dict(name=sg_name, - description=sg_desc) - sg_dict['tenant_id'] = tenant_id - result = client.create_security_group(**sg_dict) - - secgroup = result['security_group'] - self.assertEqual(secgroup['name'], sg_name) - self.assertEqual(tenant_id, secgroup['tenant_id']) - self.assertEqual(secgroup['description'], sg_desc) - - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_security_group, secgroup['id']) - return secgroup - - def _default_security_group(self, client=None, tenant_id=None): - """Get default secgroup for given tenant_id. - - :returns: default secgroup for given tenant - """ - if client is None: - client = self.security_groups_client - if not tenant_id: - tenant_id = client.tenant_id - sgs = [ - sg for sg in list(client.list_security_groups().values())[0] - if sg['tenant_id'] == tenant_id and sg['name'] == 'default' - ] - msg = "No default security group for tenant %s." % (tenant_id) - self.assertGreater(len(sgs), 0, msg) - return sgs[0] - - def _create_security_group_rule(self, secgroup=None, - sec_group_rules_client=None, - tenant_id=None, - security_groups_client=None, **kwargs): - """Create a rule from a dictionary of rule parameters. - - Create a rule in a secgroup. if secgroup not defined will search for - default secgroup in tenant_id. - - :param secgroup: the security group. - :param tenant_id: if secgroup not passed -- the tenant in which to - search for default secgroup - :param kwargs: a dictionary containing rule parameters: - for example, to allow incoming ssh: - rule = { - direction: 'ingress' - protocol:'tcp', - port_range_min: 22, - port_range_max: 22 - } - """ - if sec_group_rules_client is None: - sec_group_rules_client = self.security_group_rules_client - if security_groups_client is None: - security_groups_client = self.security_groups_client - if not tenant_id: - tenant_id = security_groups_client.tenant_id - if secgroup is None: - secgroup = self._default_security_group( - client=security_groups_client, tenant_id=tenant_id) - - ruleset = dict(security_group_id=secgroup['id'], - tenant_id=secgroup['tenant_id']) - ruleset.update(kwargs) - - sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset) - sg_rule = sg_rule['security_group_rule'] - - self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id']) - self.assertEqual(secgroup['id'], sg_rule['security_group_id']) - - return sg_rule - - def _create_loginable_secgroup_rule(self, security_group_rules_client=None, - secgroup=None, - security_groups_client=None): - """Create loginable security group rule - - This function will create: - 1. egress and ingress tcp port 22 allow rule in order to allow ssh - access for ipv4. - 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6. - 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4. - """ - - if security_group_rules_client is None: - security_group_rules_client = self.security_group_rules_client - if security_groups_client is None: - security_groups_client = self.security_groups_client - rules = [] - rulesets = [ - dict( - # ssh - protocol='tcp', - port_range_min=22, - port_range_max=22, - ), - dict( - # ping - protocol='icmp', - ), - dict( - # ipv6-icmp for ping6 - protocol='icmp', - ethertype='IPv6', - ) - ] - sec_group_rules_client = security_group_rules_client - for ruleset in rulesets: - for r_direction in ['ingress', 'egress']: - ruleset['direction'] = r_direction - try: - sg_rule = self._create_security_group_rule( - sec_group_rules_client=sec_group_rules_client, - secgroup=secgroup, - security_groups_client=security_groups_client, - **ruleset) - except lib_exc.Conflict as ex: - # if rule already exist - skip rule and continue - msg = 'Security group rule already exists' - if msg not in ex._error_string: - raise ex - else: - self.assertEqual(r_direction, sg_rule['direction']) - rules.append(sg_rule) - - return rules - - def _get_router(self, client=None, tenant_id=None): - """Retrieve a router for the given tenant id. - - If a public router has been configured, it will be returned. - - If a public router has not been configured, but a public - network has, a tenant router will be created and returned that - routes traffic to the public network. - """ - if not client: - client = self.routers_client - if not tenant_id: - tenant_id = client.tenant_id - router_id = CONF.network.public_router_id - network_id = CONF.network.public_network_id - if router_id: - body = client.show_router(router_id) - return body['router'] - elif network_id: - router = self._create_router(client, tenant_id) - kwargs = {'external_gateway_info': dict(network_id=network_id)} - router = client.update_router(router['id'], **kwargs)['router'] - return router - else: - raise Exception("Neither of 'public_router_id' or " - "'public_network_id' has been defined.") - - def _create_router(self, client=None, tenant_id=None, - namestart='router-smoke'): - if not client: - client = self.routers_client - if not tenant_id: - tenant_id = client.tenant_id - name = data_utils.rand_name(namestart) - result = client.create_router(name=name, - admin_state_up=True, - tenant_id=tenant_id) - router = result['router'] - self.assertEqual(router['name'], name) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_router, - router['id']) - return router - - # def _update_router_admin_state(self, router, admin_state_up): - # kwargs = dict(admin_state_up=admin_state_up) - # router = self.routers_client.update_router( - # router['id'], **kwargs)['router'] - # self.assertEqual(admin_state_up, router['admin_state_up']) - - def create_networks(self, networks_client=None, - routers_client=None, subnets_client=None, - tenant_id=None, dns_nameservers=None, - port_security_enabled=True): - """Create a network with a subnet connected to a router. - - The baremetal driver is a special case since all nodes are - on the same shared network. - - :param tenant_id: id of tenant to create resources in. - :param dns_nameservers: list of dns servers to send to subnet. - :returns: network, subnet, router - """ - if CONF.network.shared_physical_network: - # NOTE(Shrews): This exception is for environments where tenant - # credential isolation is available, but network separation is - # not (the current baremetal case). Likely can be removed when - # test account mgmt is reworked: - # https://blueprints.launchpad.net/tempest/+spec/test-accounts - if not CONF.compute.fixed_network_name: - m = 'fixed_network_name must be specified in config' - raise lib_exc.InvalidConfiguration(m) - network = self._get_network_by_name( - CONF.compute.fixed_network_name) - router = None - subnet = None - else: - network = self._create_network( - networks_client=networks_client, - tenant_id=tenant_id, - port_security_enabled=port_security_enabled) - router = self._get_router(client=routers_client, - tenant_id=tenant_id) - subnet_kwargs = dict(network=network, - subnets_client=subnets_client, - routers_client=routers_client) - # use explicit check because empty list is a valid option - if dns_nameservers is not None: - subnet_kwargs['dns_nameservers'] = dns_nameservers - subnet = self._create_subnet(**subnet_kwargs) - if not routers_client: - routers_client = self.routers_client - router_id = router['id'] - routers_client.add_router_interface(router_id, - subnet_id=subnet['id']) - - # save a cleanup job to remove this association between - # router and subnet - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - routers_client.remove_router_interface, router_id, - subnet_id=subnet['id']) - return network, subnet, router diff --git a/congress_tempest_tests/tests/scenario/manager_congress.py b/congress_tempest_tests/tests/scenario/manager_congress.py deleted file mode 100644 index ad9bc678..00000000 --- a/congress_tempest_tests/tests/scenario/manager_congress.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import re - -from oslo_log import log as logging -from tempest.common import credentials_factory as credentials -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest import manager as tempestmanager - -from congress_tempest_tests.services.policy import policy_client -# use local copy of tempest scenario manager during upstream refactoring -from congress_tempest_tests.tests.scenario import manager - -CONF = config.CONF -LOG = logging.getLogger(__name__) - -Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple', - ['floating_ip', 'server']) - - -def get_datasource_id(client, name): - datasources = client.list_datasources() - for datasource in datasources['results']: - if datasource['name'] == name: - return datasource['id'] - raise Exception("Datasource %s not found." % name) - - -# Note: these tests all use neutron today so we mix with that. -class ScenarioPolicyBase(manager.NetworkScenarioTest): - @classmethod - def setUpClass(cls): - super(ScenarioPolicyBase, cls).setUpClass() - # auth provider for admin credentials - creds = credentials.get_configured_admin_credentials('identity_admin') - auth_prov = tempestmanager.get_auth_provider(creds) - cls.setup_required_clients(auth_prov) - - @classmethod - def setup_required_clients(cls, auth_prov): - # Get congress client - cls.os_admin.congress_client = policy_client.PolicyClient( - auth_prov, "policy", CONF.identity.region) - - # Get telemtery_client - if getattr(CONF.service_available, 'ceilometer', False): - import ceilometer.tests.tempest.service.client as telemetry_client - cls.os_admin.telemetry_client = ( - telemetry_client.TelemetryClient( - auth_prov, - CONF.telemetry.catalog_type, CONF.identity.region, - endpoint_type=CONF.telemetry.endpoint_type)) - - # Get alarms client - if getattr(CONF.service_available, 'aodh_plugin', False): - import aodh.tests.tempest.service.client as alarms_client - cls.os_admin.alarms_client = ( - alarms_client.AlarmingClient( - auth_prov, - CONF.alarming_plugin.catalog_type, CONF.identity.region, - CONF.alarming_plugin.endpoint_type)) - - def _setup_network_and_servers(self): - self.security_group = self._create_security_group() - self.network, self.subnet, self.router = self.create_networks() - self.check_networks() - - name = data_utils.rand_name('server-smoke') - server = self._create_server(name, self.network) - self._check_tenant_network_connectivity() - - floating_ip = self.create_floating_ip(server) - self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server) - - def check_networks(self): - """Check for newly created network/subnet/router. - - Checks that we see the newly created network/subnet/router via - checking the result of list_[networks,routers,subnets]. - """ - - seen_nets = self.os_admin.networks_client.list_networks() - seen_names = [n['name'] for n in seen_nets['networks']] - seen_ids = [n['id'] for n in seen_nets['networks']] - self.assertIn(self.network['name'], seen_names) - self.assertIn(self.network['id'], seen_ids) - - if self.subnet: - seen_subnets = self.os_admin.subnets_client.list_subnets() - seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']] - seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']] - self.assertIn(self.network['id'], seen_net_ids) - self.assertIn(self.subnet['id'], seen_subnet_ids) - - if self.router: - seen_routers = self.os_admin.routers_client.list_routers() - seen_router_ids = [n['id'] for n in seen_routers['routers']] - seen_router_names = [n['name'] for n in seen_routers['routers']] - self.assertIn(self.router['name'], - seen_router_names) - self.assertIn(self.router['id'], - seen_router_ids) - - def check_datasource_no_error(self, datasource_name): - """Check that datasource has no error on latest update""" - ds_status = self.os_admin.congress_client.list_datasource_status( - datasource_name) - if (ds_status['initialized'] == 'True' and - ds_status['number_of_updates'] != '0' and - ds_status['last_error'] == 'None'): - return True - else: - LOG.debug('datasource %s not initialized, not polled, or shows ' - 'error. Full status: %s', datasource_name, ds_status) - return False - - def _create_server(self, name, network): - keypair = self.create_keypair() - self.keypairs[keypair['name']] = keypair - security_groups = [{'name': self.security_group['name']}] - create_kwargs = { - 'networks': [ - {'uuid': network['id']}, - ], - 'key_name': keypair['name'], - 'security_groups': security_groups, - } - server = self.create_server(name=name, wait_until='ACTIVE', - **create_kwargs) - self.servers.append(server) - return server - - def _get_server_key(self, server): - return self.keypairs[server['key_name']]['private_key'] - - def _check_tenant_network_connectivity(self): - ssh_login = CONF.validation.image_ssh_user - for server in self.servers: - # call the common method in the parent class - super(ScenarioPolicyBase, self)._check_tenant_network_connectivity( - server, ssh_login, self._get_server_key(server), - servers_for_debug=self.servers) - - def _create_and_associate_floating_ips(self, server): - public_network_id = CONF.network.public_network_id - floating_ip = self._create_floating_ip(server, public_network_id) - self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server) - - def _check_public_network_connectivity(self, should_connect=True, - msg=None): - ssh_login = CONF.compute.image_ssh_user - floating_ip, server = self.floating_ip_tuple - ip_address = floating_ip.floating_ip_address - private_key = None - if should_connect: - private_key = self._get_server_key(server) - # call the common method in the parent class - super(ScenarioPolicyBase, self)._check_public_network_connectivity( - ip_address, ssh_login, private_key, should_connect, msg, - self.servers) - - def _disassociate_floating_ips(self): - floating_ip, server = self.floating_ip_tuple - self._disassociate_floating_ip(floating_ip) - self.floating_ip_tuple = Floating_IP_tuple( - floating_ip, None) - - def _reassociate_floating_ips(self): - floating_ip, server = self.floating_ip_tuple - name = data_utils.rand_name('new_server-smoke-') - # create a new server for the floating ip - server = self._create_server(name, self.network) - self._associate_floating_ip(floating_ip, server) - self.floating_ip_tuple = Floating_IP_tuple( - floating_ip, server) - - def _create_new_network(self): - self.new_net = self._create_network(tenant_id=self.tenant_id) - self.new_subnet = self._create_subnet( - network=self.new_net, - gateway_ip=None) - - def _get_server_nics(self, ssh_client): - reg = re.compile(r'(?P\d+): (?P\w+):') - ipatxt = ssh_client.exec_command("ip address") - return reg.findall(ipatxt) - - def _check_network_internal_connectivity(self, network): - """via ssh check VM internal connectivity: - - - ping internal gateway and DHCP port, implying in-tenant connectivity - pinging both, because L3 and DHCP agents might be on different nodes. - """ - floating_ip, server = self.floating_ip_tuple - # get internal ports' ips: - # get all network ports in the new network - ports = self.os_admin.ports_client.list_ports( - tenant_id=server['tenant_id'], network_id=network.id)['ports'] - - internal_ips = (p['fixed_ips'][0]['ip_address'] for p in ports - if p['device_owner'].startswith('network')) - - self._check_server_connectivity(floating_ip, internal_ips) - - def _check_network_external_connectivity(self): - """ping public network default gateway to imply external conn.""" - if not CONF.network.public_network_id: - msg = 'public network not defined.' - LOG.info(msg) - return - - subnet = self.os_admin.subnets_client.list_subnets( - network_id=CONF.network.public_network_id)['subnets'] - self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet)) - - external_ips = [subnet[0]['gateway_ip']] - self._check_server_connectivity(self.floating_ip_tuple.floating_ip, - external_ips) - - def _check_server_connectivity(self, floating_ip, address_list): - ip_address = floating_ip.floating_ip_address - private_key = self._get_server_key(self.floating_ip_tuple.server) - ssh_source = self._ssh_to_server(ip_address, private_key) - - for remote_ip in address_list: - try: - self.assertTrue(self._check_remote_connectivity(ssh_source, - remote_ip), - "Timed out waiting for %s to become " - "reachable" % remote_ip) - except Exception: - LOG.exception("Unable to access {dest} via ssh to " - "floating-ip {src}".format(dest=remote_ip, - src=floating_ip)) - raise diff --git a/congress_tempest_tests/tests/scenario/test_congress_basic_ops.py b/congress_tempest_tests/tests/scenario/test_congress_basic_ops.py deleted file mode 100644 index 3fbaf3c7..00000000 --- a/congress_tempest_tests/tests/scenario/test_congress_basic_ops.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions -from tempest import test - -from congress_tempest_tests.tests.scenario import helper -from congress_tempest_tests.tests.scenario import manager_congress - -import random -import string -import time - - -CONF = config.CONF - - -class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestPolicyBasicOps, cls).skip_checks() - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def setUp(self): - super(TestPolicyBasicOps, self).setUp() - self.keypairs = {} - self.servers = [] - - def _create_random_policy(self): - policy_name = "nova_%s" % ''.join(random.choice(string.lowercase) - for x in range(10)) - body = {"name": policy_name} - resp = self.os_admin.congress_client.create_policy(body) - self.addCleanup(self.os_admin.congress_client.delete_policy, - resp['id']) - return resp['name'] - - def _create_policy_rule(self, policy_name, rule, rule_name=None, - comment=None): - body = {'rule': rule} - if rule_name: - body['name'] = rule_name - if comment: - body['comment'] = comment - client = self.os_admin.congress_client - response = client.create_policy_rule(policy_name, body) - if response: - self.addCleanup(client.delete_policy_rule, policy_name, - response['id']) - return response - else: - raise Exception('Failed to create policy rule (%s, %s)' - % (policy_name, rule)) - - def _create_test_server(self, name=None): - image_ref = CONF.compute.image_ref - flavor_ref = CONF.compute.flavor_ref - keypair = self.create_keypair() - security_group = self._create_security_group() - security_groups = [{'name': security_group['name']}] - create_kwargs = {'key_name': keypair['name'], - 'security_groups': security_groups} - instance = self.create_server(name=name, - image_id=image_ref, - flavor=flavor_ref, - wait_until='ACTIVE', - **create_kwargs) - return instance - - @decorators.attr(type='smoke') - @test.services('compute', 'network') - def test_execution_action(self): - metadata = {'testkey1': 'value3'} - res = {'meta': {'testkey1': 'value3'}} - server = self._create_test_server() - congress_client = self.os_admin.congress_client - servers_client = self.os_admin.servers_client - policy = self._create_random_policy() - service = 'nova' - action = 'servers.set_meta' - action_args = {'args': {'positional': [], - 'named': {'server': server['id'], - 'metadata': metadata}}} - body = action_args - - f = lambda: servers_client.show_server_metadata_item(server['id'], - 'testkey1') - # execute via datasource api - body.update({'name': action}) - congress_client.execute_datasource_action(service, "execute", body) - helper.retry_check_function_return_value(f, res) - - # execute via policy api - body.update({'name': service + ':' + action}) - congress_client.execute_policy_action(policy, "execute", False, - False, body) - helper.retry_check_function_return_value(f, res) - - @decorators.attr(type='smoke') - @test.services('compute', 'network') - def test_policy_basic_op(self): - self._setup_network_and_servers() - body = {"rule": "port_security_group(id, security_group_name) " - ":-neutronv2:ports(id, tenant_id, name, network_id," - "mac_address, admin_state_up, status, device_id, " - "device_owner)," - "neutronv2:security_group_port_bindings(id, " - "security_group_id), neutronv2:security_groups(" - "security_group_id, tenant_id1, security_group_name," - "description)"} - results = self.os_admin.congress_client.create_policy_rule( - 'classification', body) - rule_id = results['id'] - self.addCleanup( - self.os_admin.congress_client.delete_policy_rule, - 'classification', rule_id) - - # Find the ports of on this server - ports = self.os_admin.ports_client.list_ports( - device_id=self.servers[0]['id'])['ports'] - - def check_data(): - results = self.os_admin.congress_client.list_policy_rows( - 'classification', 'port_security_group') - for row in results['results']: - if (row['data'][0] == ports[0]['id'] and - row['data'][1] == - self.servers[0]['security_groups'][0]['name']): - return True - else: - return False - - time.sleep(65) # sleep for replicated PE sync - # Note(ekcs): do not use retry because we want to make sure the call - # succeeds on the first try after adequate time. - # If retry used, it may pass based on succeding on one replica but - # failing on all others. - self.assertTrue(check_data(), - "Data did not converge in time or failure in server") - - @decorators.attr(type='smoke') - @test.services('compute', 'network') - def test_reactive_enforcement(self): - servers_client = self.os_admin.servers_client - server_name = 'server_under_test' - server = self._create_test_server(name=server_name) - policy_name = self._create_random_policy() - meta_key = 'meta_test_key1' - meta_val = 'value1' - meta_data = {'meta': {meta_key: meta_val}} - rules = [ - 'execute[nova:servers_set_meta(id, "%s", "%s")] :- ' - 'test_servers(id)' % (meta_key, meta_val), - 'test_servers(id) :- ' - 'nova:servers(id, name, host_id, status, tenant_id,' - 'user_id, image_id, flavor_id, zone, host_name),' - 'equal(name, "%s")' % server_name] - - for rule in rules: - self._create_policy_rule(policy_name, rule) - f = lambda: servers_client.show_server_metadata_item(server['id'], - meta_key) - time.sleep(80) # sleep for replicated PE sync - # Note: seems reactive enforcement takes a bit longer - # succeeds on the first try after adequate time. - # If retry used, it may pass based on succeding on one replica but - # failing on all others. - self.assertEqual(f(), meta_data) - - -class TestPolicyLibraryBasicOps(manager_congress.ScenarioPolicyBase): - @decorators.attr(type='smoke') - def test_policy_library_basic_op(self): - response = self.admin_manager.congress_client.list_library_policy() - initial_state = response['results'] - - self.assertGreater( - len(initial_state), 0, 'library policy shows no policies, ' - 'indicating failed load-on-startup.') - - test_policy = { - "name": "test_policy", - "description": "test policy description", - "kind": "nonrecursive", - "abbreviation": "abbr", - "rules": [{"rule": "p(x) :- q(x)", "comment": "test comment", - "name": "test name"}, - {"rule": "p(x) :- q2(x)", "comment": "test comment2", - "name": "test name2"}] - } - response = self.admin_manager.congress_client.create_library_policy( - test_policy) - policy_id = response['id'] - test_policy['id'] = policy_id - - def delete_if_found(id_): - try: - self.admin_manager.congress_client.delete_library_policy(id_) - except exceptions.NotFound: - pass - - self.addCleanup(delete_if_found, policy_id) - - response = self.admin_manager.congress_client.list_library_policy() - new_state = response['results'] - - self.assertEqual(len(initial_state) + 1, len(new_state), - 'new library policy not reflected in list results') - self.assertIn(test_policy, new_state, - 'new library policy not reflected in list results') - - self.admin_manager.congress_client.delete_library_policy(policy_id) - - response = self.admin_manager.congress_client.list_library_policy() - new_state = response['results'] - - self.assertEqual(len(initial_state), len(new_state), - 'library policy delete not reflected in list results') - self.assertNotIn(test_policy, new_state, - 'library policy delete not reflected in list results') - - -class TestCongressDataSources(manager_congress.ScenarioPolicyBase): - - @classmethod - def skip_checks(cls): - super(TestCongressDataSources, cls).skip_checks() - if not (CONF.network.project_networks_reachable - or CONF.network.public_network_id): - msg = ('Either project_networks_reachable must be "true", or ' - 'public_network_id must be defined.') - cls.enabled = False - raise cls.skipException(msg) - - def test_all_loaded_datasources_are_initialized(self): - - @helper.retry_on_exception - def _check_all_datasources_are_initialized(): - datasources = self.os_admin.congress_client.list_datasources() - for datasource in datasources['results']: - results = ( - self.os_admin.congress_client.list_datasource_status( - datasource['id'])) - if results['initialized'] != 'True': - return False - return True - - if not test_utils.call_until_true( - func=_check_all_datasources_are_initialized, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") - - def test_all_datasources_have_tables(self): - - @helper.retry_on_exception - def check_data(): - datasources = self.os_admin.congress_client.list_datasources() - for datasource in datasources['results']: - results = ( - self.os_admin.congress_client.list_datasource_tables( - datasource['id'])) - # NOTE(arosen): if there are no results here we return false as - # there is something wrong with a driver as it doesn't expose - # any tables. - if not results['results']: - return False - return True - - if not test_utils.call_until_true(func=check_data, - duration=100, sleep_for=5): - raise exceptions.TimeoutException("Data did not converge in time " - "or failure in server") diff --git a/contrib/nova/README b/contrib/nova/README deleted file mode 100644 index f99a294e..00000000 --- a/contrib/nova/README +++ /dev/null @@ -1,22 +0,0 @@ - -Steps to install this policy extension: -1. Install congress client on nova controllers -2. Install (copy) policy module (congress.py) under nova/api -3. In api-paste.ini, declare congress filter and insert it to the execution flow. - Congress filter usually sits in between the final application 'osapi_compute_app_v2' and keystone context. - For example, congress filter can be declared as - " - [filter:congress] - paste.filter_factory = nova.api.congress:Congress.factory - " - And insert congress into the execution flow like - " - keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext congress osapi_compute_app_v2 - " -4. Push policies into congress server as indicated in "sample_policies" - -P.S. This policy enforcement is supposed to go with admin credentials in order to have a whole picture at domain level. - Therefore we turn on "all_tenent" option while looking up resource usage across the entire domain. - - -More info can be found within this public slides: https://drive.google.com/file/d/0B5VvD3PSoDPaLTVIWG1NNDhQRFE/view?usp=sharing \ No newline at end of file diff --git a/contrib/nova/congress.py b/contrib/nova/congress.py deleted file mode 100644 index 67e83014..00000000 --- a/contrib/nova/congress.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015 Symantec. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Congress Policy Middleware. - -""" -import json - -from nova.i18n import _ -from nova import wsgi -from oslo_config import cfg -from oslo_log import log as logging -import webob.dec -import webob.exc - -# policy enforcement flow -from congressclient.v1 import client -import keystoneclient -from keystoneclient.v3 import client as ksv3client -from novaclient import client as nova - -LOG = logging.getLogger(__name__) - - -class Congress(wsgi.Middleware): - """Make a request context from keystone headers.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - - if req.environ['REQUEST_METHOD'] != 'POST': - return self.application - - raw_path = req.environ['RAW_PATH_INFO'] - - if "metadata" in raw_path: - return self.application - - if "servers/action" in raw_path: - return self.application - - flavor_ref = json.loads(req.body)['server']['flavorRef'] - - token = req.environ['HTTP_X_AUTH_TOKEN'] - - tenant_name = req.environ['HTTP_X_TENANT_NAME'] - - CONF = cfg.CONF - - # obtain identity endpoint url - url = CONF.keystone_authtoken.auth_url - - # obtain one of support keystone api versions - raw_versions = keystoneclient.discover.available_versions(url, - session=None) - version = raw_versions[-1]['id'] - - # assemble auth_url - auth_url = url + '/' + version - - auth = keystoneclient.auth.identity.v2.Token( - auth_url=auth_url, - token=token, tenant_name=tenant_name) - - session = keystoneclient.session.Session(auth=auth) - congress = client.Client(session=session, - auth=None, - interface='publicURL', - service_type='policy') - - # Aggregating resource usage within domain level - domain = req.environ['HTTP_X_PROJECT_DOMAIN_NAME'] - - # obtain list of projects under this domain - k3_client = ksv3client.Client(session=session) - - projects = k3_client.projects.list(domain=domain) - # obtain list of hosts under each of these projects - - nova_c = nova.Client("2", session=session) - ram_p = 0 - disk_p = 0 - cpus_p = 0 - for project in projects: - - search_opts = { - 'all_tenants': 1, - 'tenant_id': project.id, - } - - servers_p = nova_c.servers.list(search_opts=search_opts) - - # locate flavor of each host - for server in servers_p: - - info = nova_c.servers.get(server=server) - flavor_id = info._info['flavor']['id'] - fd = nova_c.flavors.get(flavor=flavor_id) - ram_p += fd.ram - disk_p += fd.disk - disk_p += fd.ephemeral - cpus_p += fd.vcpus - - # incrementally add each type of resource - # assemble query policy based on the data-usage - # with memory_p, disk_p and cpus_p - - fd = nova_c.flavors.get(flavor=flavor_ref) - ram_p += fd.ram - disk_p += fd.disk - disk_p += fd.ephemeral - cpus_p += fd.vcpus - domain_resource = ("(" + domain + "," + str(ram_p) + "," + - str(disk_p) + "," + str(cpus_p) + ")") - - validation_result = congress.execute_policy_action( - "classification", - "simulate", - False, - True, - {'query': 'domain_resource_usage_exceeded (domain)', - # this needs to be defined in congress server - 'action_policy': 'nova_quota_action', - 'sequence': 'domain_resource+'+domain_resource}) - - if validation_result["result"]: - - messages = validation_result["result"] - - if messages: - result_str = "\n ".join(map(str, messages)) - msg = _( - "quota is not sufficient for this VM deployment").format( - "\n " + result_str) - LOG.error(msg) - - LOG.debug(messages) - return webob.exc.HTTPUnauthorized(explanation=msg) - else: - LOG.info('Model valid') - - return self.application diff --git a/contrib/nova/sample_policies b/contrib/nova/sample_policies deleted file mode 100644 index 8ef67b81..00000000 --- a/contrib/nova/sample_policies +++ /dev/null @@ -1,20 +0,0 @@ -Sample policies in Datalog format: - -domain_resource_usage_exceeded (domain):- - domain_resource(domain, ram, disk, cpus), - gt(ram, 550). - -domain_resource_usage_exceeded (domain):- - domain_resource(domain, ram, disk, cpus), - gt(disk, 2). - -domain_resource_usage_exceeded (domain):- - domain_resource(domain, ram, disk, cpus), - gt(cpu, 2). - - - -# How to push these policies into congress server - openstack congress policy rule create classification 'domain_resource_usage_exceeded (domain):- domain_resource(domain, ram, disk, cpus), gt(cpus, 2)' - openstack congress policy rule create classification 'domain_resource_usage_exceeded (domain):- domain_resource(domain, ram, disk, cpus), gt(disk, 2)' - openstack congress policy rule create classification 'domain_resource_usage_exceeded (domain):- domain_resource(domain, ram, disk, cpus), gt(ram, 550)' diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100755 index a8aab020..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/env bash -# Plugin file for congress services -#---------------------------------- - -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined - -# Functions in this file are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - congress exercises - - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -# Test if any Congress services are enabled -# is_congress_enabled -function is_congress_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"congress" ]] && return 0 - return 1 -} - -# configure_congress() -# Set common config for all congress server and agents. -function configure_congress { - setup_develop $CONGRESS_DIR - # Put config files in ``CONGRESS_CONF_DIR`` for everyone to find - if [[ ! -d $CONGRESS_CONF_DIR ]]; then - sudo mkdir -p $CONGRESS_CONF_DIR - fi - sudo chown $STACK_USER $CONGRESS_CONF_DIR - - touch $CONGRESS_CONF - sudo chown $STACK_USER $CONGRESS_CONF - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $CONGRESS_CONF DEFAULT project_id - fi - CONGRESS_API_PASTE_FILE=$CONGRESS_CONF_DIR/api-paste.ini - CONGRESS_POLICY_FILE=$CONGRESS_CONF_DIR/policy.json - - cp $CONGRESS_DIR/etc/api-paste.ini $CONGRESS_API_PASTE_FILE - cp $CONGRESS_DIR/etc/policy.json $CONGRESS_POLICY_FILE - mkdir $CONGRESS_LIBRARY_DIR - cp -r $CONGRESS_DIR/library/* $CONGRESS_LIBRARY_DIR - - # Update either configuration file - iniset $CONGRESS_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CONGRESS_CONF oslo_policy policy_file $CONGRESS_POLICY_FILE - iniset $CONGRESS_CONF DEFAULT auth_strategy $CONGRESS_AUTH_STRATEGY - iniset $CONGRESS_CONF DEFAULT datasource_sync_period 30 - iniset $CONGRESS_CONF DEFAULT replicated_policy_engine "$CONGRESS_REPLICATED" - - # if [ "$CONGRESS_MULTIPROCESS_DEPLOYMENT" == "False" ]; then - # iniset $CONGRESS_CONF DEFAULT transport_url $CONGRESS_TRANSPORT_URL - # fi - - CONGRESS_DRIVERS="congress.datasources.neutronv2_driver.NeutronV2Driver," - CONGRESS_DRIVERS+="congress.datasources.glancev2_driver.GlanceV2Driver," - CONGRESS_DRIVERS+="congress.datasources.nova_driver.NovaDriver," - CONGRESS_DRIVERS+="congress.datasources.keystonev3_driver.KeystoneV3Driver," - CONGRESS_DRIVERS+="congress.datasources.ceilometer_driver.CeilometerDriver," - CONGRESS_DRIVERS+="congress.datasources.cinder_driver.CinderDriver," - CONGRESS_DRIVERS+="congress.datasources.swift_driver.SwiftDriver," - CONGRESS_DRIVERS+="congress.datasources.plexxi_driver.PlexxiDriver," - CONGRESS_DRIVERS+="congress.datasources.vCenter_driver.VCenterDriver," - CONGRESS_DRIVERS+="congress.datasources.murano_driver.MuranoDriver," - CONGRESS_DRIVERS+="congress.datasources.ironic_driver.IronicDriver," - CONGRESS_DRIVERS+="congress.datasources.heatv1_driver.HeatV1Driver," - CONGRESS_DRIVERS+="congress.datasources.doctor_driver.DoctorDriver," - CONGRESS_DRIVERS+="congress.datasources.aodh_driver.AodhDriver," - CONGRESS_DRIVERS+="congress.tests.fake_datasource.FakeDataSource" - - iniset $CONGRESS_CONF DEFAULT drivers $CONGRESS_DRIVERS - - iniset $CONGRESS_CONF database connection `database_connection_url $CONGRESS_DB_NAME` - - _congress_setup_keystone $CONGRESS_CONF keystone_authtoken - -} - -function configure_congress_datasources { - _configure_service neutron neutronv2 - _configure_service nova nova - _configure_service key keystonev3 - _configure_service ceilometer ceilometer - _configure_service cinder cinder - _configure_service swift swift - _configure_service glance glancev2 - _configure_service murano murano - _configure_service ironic ironic - _configure_service heat heat - _configure_service aodh aodh - -} - -function _configure_service { - if is_service_enabled $1; then - openstack congress datasource create $2 "$2" \ - --config poll_time=10 \ - --config username=$OS_USERNAME \ - --config tenant_name=$OS_PROJECT_NAME \ - --config password=$OS_PASSWORD \ - --config auth_url=http://$SERVICE_HOST/identity - fi -} - -function create_predefined_policy { - if [ -n $CONGRESS_PREDEFINED_POLICY_FILE ] ; then - python $CONGRESS_DIR/scripts/preload-policies/output_policy_command.py \ - $CONGRESS_PREDEFINED_POLICY_FILE | while read CONGRESS_CMD - do - $CONGRESS_CMD - done - fi -} - -function _install_congress_dashboard { - git_clone $CONGRESSDASHBOARD_REPO $CONGRESSDASHBOARD_DIR $CONGRESSDASHBOARD_BRANCH - setup_develop $CONGRESSDASHBOARD_DIR - _congress_setup_horizon -} - -# create_congress_cache_dir() - Part of the _congress_setup_keystone() process -function create_congress_cache_dir { - # Create cache dir - sudo mkdir -p $CONGRESS_AUTH_CACHE_DIR - sudo chown $STACK_USER $CONGRESS_AUTH_CACHE_DIR - rm -f $CONGRESS_AUTH_CACHE_DIR/* -} - -# create_congress_accounts() - Set up common required congress accounts - -# Tenant User Roles -# --------------------------------------------------------------------- -# service congress admin # if enabled - -# Migrated from keystone_data.sh -function create_congress_accounts { - if [[ "$ENABLED_SERVICES" =~ "congress" ]]; then - - create_service_user "congress" - - local congress_service=$(get_or_create_service "congress" \ - "policy" "Congress Service") - get_or_create_endpoint $congress_service \ - "$REGION_NAME" \ - "http://$SERVICE_HOST:$CONGRESS_PORT/" \ - "http://$SERVICE_HOST:$CONGRESS_PORT/" \ - "http://$SERVICE_HOST:$CONGRESS_PORT/" - fi -} - -# init_congress() - Initialize databases, etc. -function init_congress { - recreate_database $CONGRESS_DB_NAME utf8 - # Run Congress db migrations - congress-db-manage --config-file $CONGRESS_CONF upgrade head -} - -# install_congress() - install dependency, collect client source and prepare -function install_congress { - # congress requires java so we install it here - if is_ubuntu; then - install_package default-jre - elif is_fedora; then - install_package jre - else - die $LINENO "Congress devstack only supports Debian and Red Hat-based" - fi - git_clone $CONGRESSCLIENT_REPO $CONGRESSCLIENT_DIR $CONGRESSCLIENT_BRANCH - setup_develop $CONGRESSCLIENT_DIR - - if is_service_enabled horizon; then - _install_congress_dashboard - fi -} - -# Start running processes, including screen -function start_congress_service_and_check { - # build config-file options - local cfg_file - local CFG_FILE_OPTIONS="--config-file $CONGRESS_CONF" - - # Start the congress services in seperate processes - echo_summary "Installing congress services" - - if [ "$CONGRESS_MULTIPROCESS_DEPLOYMENT" == "False" ]; then - echo "Installing congress as single process" - run_process congress "$CONGRESS_BIN_DIR/congress-server --node-id=allinonenode $CFG_FILE_OPTIONS" - else - echo "Installing congress as multi process" - run_process congress-api "$CONGRESS_BIN_DIR/congress-server --api --node-id=apinode $CFG_FILE_OPTIONS" - run_process congress-engine "$CONGRESS_BIN_DIR/congress-server --policy-engine --node-id=enginenode $CFG_FILE_OPTIONS" - run_process congress-datasources "$CONGRESS_BIN_DIR/congress-server --datasources --node-id=datanode $CFG_FILE_OPTIONS" - fi - - # Start multiple PE's - if [ "$CONGRESS_REPLICATED" == "True" ]; then - run_process congress-engine "$CONGRESS_BIN_DIR/congress-server --policy-engine --node-id=enginenode-2 $CFG_FILE_OPTIONS" - run_process congress-engine "$CONGRESS_BIN_DIR/congress-server --policy-engine --node-id=enginenode-3 $CFG_FILE_OPTIONS" - fi - - echo "Waiting for Congress to start..." - # FIXME(arosen): using curl right now to check if congress is alive once we implement version use check below. - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy $CONGRESS_HOST http://$CONGRESS_HOST:$CONGRESS_PORT; do sleep 1; done"; then - die $LINENO "Congress did not start" - fi -# if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$CONGRESS_HOST:$CONGRESS_PORT; do sleep 1; done"; then -# die $LINENO "Congress did not start" -# fi -} - - -# stop_congress() - Stop running processes (non-screen) -function stop_congress { - : -} - -# cleanup_congress() - Remove residual data files, anything left over from previous -# runs that would need to clean up. -function cleanup_congress { - sudo rm -rf $CONGRESS_AUTH_CACHE_DIR $CONGRESS_CONF_DIR -} - - -# Configures keystone integration for congress service -function _congress_setup_keystone { - local conf_file=$1 - local section=$2 - local use_auth_url=$3 - - if [[ -z $skip_auth_cache ]]; then - iniset $conf_file $section signing_dir $CONGRESS_AUTH_CACHE_DIR - # Create cache dir - create_congress_cache_dir - fi - - configure_auth_token_middleware $conf_file $CONGRESS_ADMIN_USERNAME $CONGRESS_AUTH_CACHE_DIR $section -} - -# Set up Horizon integration with Congress -function _congress_setup_horizon { - # Dashboard panels - ln -fs $CONGRESSDASHBOARD_DIR/congress_dashboard/enabled/_50_policy.py $HORIZON_DIR/openstack_dashboard/local/enabled/ - ln -fs $CONGRESSDASHBOARD_DIR/congress_dashboard/enabled/_60_policies.py $HORIZON_DIR/openstack_dashboard/local/enabled/ - ln -fs $CONGRESSDASHBOARD_DIR/congress_dashboard/enabled/_70_datasources.py $HORIZON_DIR/openstack_dashboard/local/enabled/ - - # Restart Horizon - restart_apache_server -} - -# Main dispatcher -#---------------- - -if is_service_enabled congress; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Congress" - install_congress - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Congress" - configure_congress - - if is_service_enabled key; then - create_congress_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize Congress - init_congress - - # Start the congress API and Congress taskmgr components - echo_summary "Starting Congress" - start_congress_service_and_check - configure_congress_datasources - create_predefined_policy - fi - - if [[ "$1" == "unstack" ]]; then - stop_congress - fi - - if [[ "$1" == "clean" ]]; then - cleanup_congress - fi -fi - -# Restore xtrace -$XTRACE diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 3760937a..00000000 --- a/devstack/settings +++ /dev/null @@ -1,59 +0,0 @@ -# DevStack settings -#------------------ - -# Congress Configuration -# ---------------------- - -# Set up default directories -CONGRESS_DIR=$DEST/congress -CONGRESSCLIENT_DIR=$DEST/python-congressclient -CONGRESS_AUTH_CACHE_DIR=${CONGRESS_AUTH_CACHE_DIR:-/var/cache/congress} -CONGRESSDASHBOARD_DIR=$DEST/congress-dashboard - -# Support entry points installation of console scripts -if [[ -d $CONGRESS_DIR/bin/congress-server ]]; then - CONGRESS_BIN_DIR=$CONGRESS_DIR/bin -else - CONGRESS_BIN_DIR=$(get_python_exec_prefix) -fi - -CONGRESS_CONF_DIR=/etc/congress -CONGRESS_CONF=$CONGRESS_CONF_DIR/congress.conf - -# Default Congress Port -CONGRESS_PORT=${CONGRESS_PORT:-1789} -# Default Congress Host -CONGRESS_HOST=${CONGRESS_HOST:-$SERVICE_HOST} -# Default admin username -CONGRESS_ADMIN_USERNAME=${CONGRESS_ADMIN_USERNAME:-congress} -# Default auth strategy -CONGRESS_AUTH_STRATEGY=${CONGRESS_AUTH_STRATEGY:-keystone} -# Default name for Congress database -CONGRESS_DB_NAME=${CONGRESS_DB_NAME:-congress} -# To run congress in HA mode -CONGRESS_REPLICATED=${CONGRESS_REPLICATED:-False} -# Default messaging driver -CONGRESS_TRANSPORT_URL=${CONGRESS_TRANSPORT_URL:-kombu+memory:////} -# Mutli process deployment -CONGRESS_MULTIPROCESS_DEPLOYMENT=${CONGRESS_MULTIPROCESS_DEPLOYMENT:-False} -# Directory path to library policy files -CONGRESS_LIBRARY_DIR=$CONGRESS_CONF_DIR/library -# File path to predefined policy and rules -CONGRESS_PREDEFINED_POLICY_FILE=${CONGRESS_PREDEFINED_POLICY_FILE:-""} - -# Setup default directories for client -#------------------------------------- - -# congress service -CONGRESS_REPO=${CONGRESS_REPO:-${GIT_BASE}/openstack/congress.git} -CONGRESS_BRANCH=${CONGRESS_BRANCH:-master} - -# congress client library test -CONGRESSCLIENT_REPO=${CONGRESSCLIENT_REPO:-${GIT_BASE}/openstack/python-congressclient.git} -CONGRESSCLIENT_BRANCH=${CONGRESSCLIENT_BRANCH:-master} - -# congress dashboard -CONGRESSDASHBOARD_REPO=${CONGRESSDASHBOARD_REPO:-${GIT_BASE}/openstack/congress-dashboard.git} -CONGRESSDASHBOARD_BRANCH=${CONGRESSDASHBOARD_BRANCH:-master} - -enable_service congress congress-api congress-engine congress-datasources diff --git a/doc/source/README.rst b/doc/source/README.rst deleted file mode 100644 index 38ba8043..00000000 --- a/doc/source/README.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst \ No newline at end of file diff --git a/doc/source/api.rst b/doc/source/api.rst deleted file mode 100644 index 0395c30a..00000000 --- a/doc/source/api.rst +++ /dev/null @@ -1,221 +0,0 @@ - -.. _api: - -=== -API -=== - -The design document for the API can be found below. This document contains -the API as of the current release:: - - https://docs.google.com/document/d/14hM7-GSm3CcyohPT2Q7GalyrQRohVcx77hxEx4AO4Bk/edit# - -There are two top-level concepts in today's API: Policies and Data-sources. - -* Policies have *rules* that describe the permitted states of the cloud, - along with *tables* representing abstractions of the cloud state. -* Data-sources have *tables* representing the current state of the cloud. -* The *tables* of both policies and data-sources have rows that describe - their contents. - - -1. Policy (/v1/) -================ - -You can create and delete policies. Two policies are provided by -the system, and you are not permitted to delete them: *classification* -and *action*. A policy has the following fields: - -* name: a unique name that is human-readable -* abbreviation: a shorter name that appears in traces -* description: an explanation of this policy's purpose -* kind: kind of policy. Supported kinds are - - a) nonrecursive, - b) action, - c) database, - d) materialized - - The default is *nonrecursive* and unless you are writing action - descriptions for use with ``simulate`` you should always use the - default. - - -======= ============================ ================================ -Op URL Result -======= ============================ ================================ -GET .../policies List policies -GET .../policies/ Read policy properties -POST .../policies/ Create new policy -DELETE .../policies/ Delete policy -======= ============================ ================================ - -You can also utilize the simulation API call, which answers hypothetical -questions: if we were to change the state of the cloud in this way, -what would the answer to this query be? See :ref:`enforcement` for -more details and examples:: - - POST .../policies/ - ?action=simulate - [&delta=true] # return just change in - [&trace=true] # also return explanation of result - - Request Body - { - "query" : "", # string query like: 'error(x)' - "sequence": "", # changes to state like: 'p+(1) p-(2)' - "action_policy" : "" # name of a policy: 'action' - } - -2. Policy Rules (/v1/policies//...) -============================================== - -Each policy is a collection of rules. Congress supports the usual CRUD -operations for changing that collection. A rule has the following fields: - -* ID: a unique identifier -* name: a human-friendly identifier -* rule: a string representing the actual rule as described in :ref:`policy` -* comment: description or comment related to the rule - -======= ======================= ====================== -Op URL Result -======= ======================= ====================== -GET .../rules List policy rules -POST .../rules Create policy rule -GET .../rules/ Read policy rule -DELETE .../rules/ Delete policy rule -======= ======================= ====================== - - -3. Policy Tables (/v1/policies//...) -=============================================== - -All the tables mentioned in the rules of a policy can be queried -via the API. They have only an ID field. - -======= ========================== ===================================== -Op URL Result -======= ========================== ===================================== -GET .../tables List tables -GET .../tables/ Read table properties -======= ========================== ===================================== - - -4. Policy Table Rows (/v1/policies//tables//...) -===================================================================== - -Rules are used to instruct Congress how to create new tables from existing -tables. Congress allows you to query the actual contents of tables -at any point in time. Congress will also provide a trace of how -it computed a table, to help policy authors understand why -certain rows belong to the table and others do not. - -======= ====================== ===================================================== -Op URL Result -======= ====================== ===================================================== -GET .../rows List rows -GET .../rows?trace=true List rows with explanation (use 'printf' to display) -======= ====================== ===================================================== - - -5. Drivers (/v1/system/) -==================================== -A driver is a piece of code that once instantiated and configured interacts -with a specific cloud service like Nova or Neutron. A driver has the following -fields. - -* ID: a human-friendly unique identifier -* description: an explanation of which type of cloud service this driver - interacts with - -======= ======================== ============================================== -Op URL Result -======= ======================== ============================================== -GET .../drivers List drivers -GET .../drivers/ Read driver properties -======= ======================== ============================================== - -Drivers were deprecated in Liberty, but the design changed slightly so that -we could retain them. - - -6. Data sources (/v1/) -====================== - -A data source is an instantiated and configured driver that interacts with a -particular instance of a cloud service (like Nova or Neutron). You can -construct multiple datasources using the same driver. For example, if you have -two instances of Neutron running, one in production and one in test and you -want to write policy over both of them, you would create two datasources using -the Neutron driver and give them different names and configuration options. For -example, you might call one datasource 'neutron_prod' and the other -'neutron_test' and configure them with different IP addresses. - -A datasource has the following fields. - -* ID: a unique identifier -* name: a human-friendly unique that is unique across datasources and policies -* driver: the name of the driver code that this datasource is running -* config: a dictionary capturing the configuration of this datasource -* description: an explanation of the purpose of this datasource -* enabled: whether or not this datasource is functioning (which is always True) - - -======= ================================ ====================================== -Op URL Result -======= ================================ ====================================== -GET .../data-sources List data sources -POST .../data-sources Create data source -DELETE .../data-sources/ Delete data source -GET .../data-sources//schema Show schema (tables and table-columns) -GET .../data-sources//status Show data source status -GET .../data-sources//actions List supported data source actions -======= ================================ ====================================== - - - -7. Data source Tables (/v1/data-sources//...) -==================================================== - -Each data source maintains a collection of tables (very similar to a Policy). -The list of available tables for each data source is available via the API. -A table just has an ID field. - -======= ========================== ========================================= -Op URL Result -======= ========================== ========================================= -GET .../tables List data sources -GET .../tables/ Read data source properties -GET .../tables//spec Show a table schema -======= ========================== ========================================= - - - -8. Data source Table Rows (/v1/data-sources//tables//...) -========================================================================== - -The contents of each data source table (the rows of each table) can be queried -via the API as well. A row has just a Data field, which is a list of values. - -======= ========================== ================================= -Op URL Result -======= ========================== ================================= -GET .../rows List rows -======= ========================== ================================= - - - -9. Versions (/) -=============== - -You can see the supported API versions. - -======= ========================== ================================= -Op URL Result -======= ========================== ================================= -GET .../ List supported versions -GET .../ Read version -======= ========================== ================================= - - - diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index b7169ea9..00000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,110 +0,0 @@ - -.. _concepts: - - -============ -Architecture -============ - -Congress consists of the Congress policy engine and a driver for any number of -other cloud services that act as sources of information about the cloud:: - - Policy Engine - | - ------------------+------------------ - | | | - Nova Driver Neutron Driver Keystone Driver - | | | .... - Nova Neutron Keystone - - -1. Cloud Services, Drivers, and State -------------------------------------- - -A service is anything that manages cloud state. For example, -OpenStack components like Nova, Neutron, Cinder, Swift, Heat, and -Keystone are all services. Software like ActiveDirectory, inventory management -systems, anti-virus scanners, intrusion detection systems, and -relational databases are also services. - -Congress uses a driver to connect each service to the policy engine. -A driver fetches cloud state from its respective cloud service, and -then feeds that state to the policy engine in the form of tables. -A table is a collection of rows; each row is a collection of columns; -each row-column entry stores simple data like numbers or strings. - -For example, the Nova driver periodically makes API calls to Nova to fetch -the list of virtual machines in the cloud, and the properties -associated with each VM. The Nova driver then populates a table in -the policy engine with the Nova state. For example, the Nova driver -populates a table like this::: - - --------------------------------------------- - | VM id | Name | Status | Power State | ... | - --------------------------------------------- - | 12345 | foo | ACTIVE | Running | ... | - | ... | | | | | - --------------------------------------------- - - -The state for each service will be unique to that service. For -Neutron, the existing logical networks, subnets, and ports make up -that state. For Nova, the existing VMs along with their disk and -memory space make up that state. For an anti-virus scanner, the -results of all its most recent scans are the state. The -:ref:`Services ` section describes services and drivers in -more detail. - - -2. Policy ---------- - -A Congress policy defines all those states of the cloud that are permitted: -all those combinations of service tables that are possible when the cloud is -behaving as intended. Since listing the permitted states explicitly is an -insurmountable task, policy authors describe the permitted states implicitly -by writing a collection of if-then statements that are always true when the -cloud is behaving as intended. - -More precisely, Congress uses Datalog as its policy language. Datalog is a -declarative language and is similar in many ways to SQL, Prolog, and -first-order logic. Datalog has been the subject of research and -development for the past 50 years, which means there is -a wealth of tools, algorithms, and deployment experience surrounding it. -The :ref:`Policy ` section describes policies in more detail. - -3. Capabilities ---------------- - -Once Congress is given a policy, it has three -capabilities: - -* monitoring the cloud for policy violations -* preventing violations before they occur -* correcting violations after they occur - -In the future, Congress will also record the history of policy and its -violations for the purpose of audit. -The :ref:`Monitoring and Enforcement ` section describes -these capabilities in more detail. - - -4. Congress Server and API --------------------------- - -Congress runs as a standalone server process and presents a RESTful -API for clients; drivers run as part of the server. -Instructions for installing and starting the Congress server can be -found in the :ref:`Readme ` file. - - -The API allows clients to perform the following operations: - -* insert and delete policy statements -* check for policy violations -* ask hypothetical questions: if the cloud were to undergo these changes, - would that cause any policy violations? -* execute actions - -The :ref:`API ` section describes the API in more detail. - diff --git a/doc/source/cloudservices.rst b/doc/source/cloudservices.rst deleted file mode 100644 index 9b5e5a60..00000000 --- a/doc/source/cloudservices.rst +++ /dev/null @@ -1,622 +0,0 @@ - -.. _cloudservices: - -============== -Cloud Services -============== - -1. Congress Works With All Services -=================================== - -Congress will work with any cloud service, as long as Congress can -represent the service's state in *table* format. A table is a -collection of rows, where each row is a collection of columns, and -each row-column entry contains a string or a number. - -For example, Neutron contains a mapping between IP addresses and the -ports they are assigned to; neutron represents this state as the -following table.:: - - ====================================== ========== - ID IP - ====================================== ========== - "66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" - "66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.2" - "73e31d4c-a49c-11e3-be40-425861b86ab6" "10.0.0.3" - ====================================== ========== - -2. Drivers -========== - -To plug a new service into Congress, you write a small piece of code, -called a *driver*, that queries the new service (usually through API calls) -and translates the service state into tables of data. Out of the box -Congress includes drivers for a number of common services (see below). - -For example, the driver for Neutron invokes the Neutron API calls that list -networks, ports, security groups, and routers. The driver translates each of -the JSON objects that the API calls return into tables (where in Python a table -is a list of tuples). The Neutron driver is implemented here:: - - congress/datasources/neutronv2_driver.py - -Once the driver is available, you install it into Congress, -you configure it (such as with an IP address/port/username), and you -write policy that references the tables populated by that driver. - -2.1 Driver installation ------------------------ -To install a new driver, you must add its location to the Congress -configuration file and restart the server. Congress has a single -configuration parameter (called `drivers`) that is a list of all the -installed drivers. To install a new driver, simply add to this list -and restart. - -For example, to install the Neutron driver, you add the following to the -list of drivers in the configuration file:: - - congress.datasources.neutronv2_driver.NeutronV2Driver - -If you have Nova and Neutron installed, you configure Congress as:: - - drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.nova_driver.NovaDriver - - -2.2 Driver configuration (DEPRECATED) and writing policy --------------------------------------------------------- -Once the driver code is in place, you can use it to create a `datasource` whose -data is available to Congress policies. To create a datasource, you use the API and -provide a unique name (the name you will use in policy to refer to the service), the -name of the datasource driver you want to use, and additional connection details -needed by your service (such as an IP and a username/password). - -For example, using the Congress CLI, you can create a datasource named 'neutron_test' using the -'neutronv2' driver:: - - $ openstack congress datasource create - --config username= - --config password= - --config tenant_name= - --config auth_url= - - $ openstack congress datasource create neutronv2 neutron_test - --config username=neutron - --config password=password - --config tenant_name=cloudservices - --config auth_url=http://10.10.10.10:5000/v2.0 - -And if you had a second instance of Neutron running to manage -your production network, you could create a second datasource (named say 'neutron_prod') -using the neutronv2 driver so that you could write policy over both instances of Neutron. - -When you write policy, you would use the name 'neutron_test:ports' to reference the 'ports' -table generated by the 'neutron_test' datasource, and you use 'neutron_test:networks' to -reference the 'networks' table generated by the 'neutron_test' datasource. Similarly, -you use 'neutron_prod:ports' and 'neutron_prod:networks' to reference the -tables populated by the 'neutron_prod' datasource. -(More details about writing policy can be found in the -:ref:`Policy ` section.) - - -3. Currently Supported Drivers -============================== - -Congress currently has drivers for each of the following services. Each driver -has a differing degree of coverage for the available API calls. - - - OpenStack Aodh - - OpenStack Ceilometer - - OpenStack Cinder - - OpenStack Glance (v2) - - OpenStack Heat - - OpenStack Ironic - - OpenStack Keystone (v2 & v3) - - OpenStack Monasca - - OpenStack Murano - - OpenStack Neutron (v2) - - OpenStack Nova - - OpenStack Swift - - Cloud Foundry - - Plexxi - - vCenter - - OPNFV Doctor - -Using the API or CLI, you can review the list of tables and columns that a driver supports. -Roughly, you can think of each table as a collection of objects (like networks or servers), -and the columns of that table as the attributes of those objects (like name, status, or ID). -The value of each row-column entry is a (Python) string or number. If -the attribute as returned by the API call is a complex object, that object -is flattened into its own table (or tables). - -For example:: - - $ openstack congress datasource schema show nova - +--------------+------------------------------------------------+ - | table | columns | - +--------------+------------------------------------------------+ - | flavors | {'name': 'id', 'description': 'None'}, | - | | {'name': 'name', 'description': 'None'}, | - | | {'name': 'vcpus', 'description': 'None'}, | - | | {'name': 'ram', 'description': 'None'}, | - | | {'name': 'disk', 'description': 'None'}, | - | | {'name': 'ephemeral', 'description': 'None'}, | - | | {'name': 'rxtx_factor', 'description': 'None'} | - | | | - | hosts | {'name': 'host_name', 'description': 'None'}, | - | | {'name': 'service', 'description': 'None'}, | - | | {'name': 'zone', 'description': 'None'} | - | | | - | floating_IPs | {'name': 'fixed_ip', 'description': 'None'}, | - | | {'name': 'id', 'description': 'None'}, | - | | {'name': 'ip', 'description': 'None'}, | - | | {'name': 'host_id', 'description': 'None'}, | - | | {'name': 'pool', 'description': 'None'} | - | | | - | servers | {'name': 'id', 'description': 'None'}, | - | | {'name': 'name', 'description': 'None'}, | - | | {'name': 'host_id', 'description': 'None'}, | - | | {'name': 'status', 'description': 'None'}, | - | | {'name': 'tenant_id', 'description': 'None'}, | - | | {'name': 'user_id', 'description': 'None'}, | - | | {'name': 'image_id', 'description': 'None'}, | - | | {'name': 'flavor_id', 'description': 'None'} | - | | | - +--------------+------------------------------------------------+ - -.. _datasource_driver: - -4. Writing a Datasource Driver -============================== - -This section is a tutorial for those of you interested in writing your own -datasource driver. It can be safely skipped otherwise. - -4.1 Implementing a Datasource Driver ------------------------------------- - -All the Datasource drivers extend the code found in:: - - congress/datasources/datasource_driver.py - -Typically, you will create a subclass of -``datasource_driver.PollingDataSourceDriver`` or -``datasource_driver.PushedDataSourceDriver`` depending on the type of your -datasource driver. Each instance of that class will correspond to a different -service using that driver. - -The following steps detail how to implement a polling datasource driver. - -1. Create a new Python module ``congress/datasources/new_driver.py`` - -2. Create a subclass of :code: ``PollingDataSourceDriver``. - - ``from congress.datasources.datasource_driver import PollingDataSourceDriver`` - - ``class MyDriver(PollingDataSourceDriver)`` - -3. Implement the constructor :func:`MyDriver.__init__` - - ``def __init__(name, args)`` - - You must call the DataSourceDriver's constructor. - - ``super(MyDriver, self).__init__(name, args)`` - -4. Implement the function :func:`MyDriver.update_from_datasource` - - ``def update_from_datasource(self)`` - - This function is called to update ``self.state`` to reflect the new - state of the service. ``self.state`` is a dictionary that maps a - tablename (as a string) to a set of tuples (to a collection of tables). - Each tuple element must be either a number or string. This function - implements the polling logic for the service. - -5. By convention, it is useful for debugging purposes to include a -``main`` that calls update_from_datasource, and prints out the raw -API results along with the tables that were generated. - -To install and test the newly written driver, please follow the new driver -installation procedure mentioned in :ref: `Driver installation ` -section. - - -4.2 Converting API results into Tables --------------------------------------- -Since Congress requires the state of each dataservice to be represented as -tables, we must convert the results of each API call (which may be comprised -of dictionaries, lists, dictionaries embedded within lists, etc.) into tables. - -4.2.1 Convenience translators -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Congress provides a translation method to make the translation from API -results into tables convenient. The translation method takes a description of -the API data structure, and converts objects of that structure into rows of -one or more tables (depending on the data structure). For example, this is a -partial snippet from the Neutron driver:: - - networks_translator = { - 'translation-type': 'HDICT', - 'table-name': 'networks', - 'selector-type': 'DICT_SELECTOR', - 'field-translators': - ({'fieldname': 'id', 'translator': value_trans}, - {'fieldname': 'name', 'translator': value_trans}, - {'fieldname': 'tenant_id', 'translator': value_trans}, - {'fieldname': 'subnets', 'col': 'subnet_group_id', - 'translator': {'translation-type': 'LIST', - 'table-name': 'networks.subnets', - 'id-col': 'subnet_group_id', - 'val-col': 'subnet', - 'translator': value_trans}})} - -This networks_translator describes a python dictionary data structure that -contains four keys: id, name, tenant_id, and subnets. The value for the -subnets key is a list of subnet_group_ids each of which is a number. For -example: - - { "id": 1234, - "name": "Network Foo", - "tenant_id": 5678, - "subnets": [ 100, 101 ] } - -Given the networks_translator description, the translator creates two tables. -The first table is named "networks" with a column for name, subnets, -tenant_id, and id. The second table will be named "networks.subnet" and will -contain two columns, one containing the subnet_group_id, and the second -containing an ID that associates the row in the network to the rows in the -networks.subnets table. - -To use the translation methods, the driver defines a translator such as -networks_translator and then passes the API response objects to -translate_objs() which is defined in congress/datasources/datasource_driver.py -See congress/datasources/neutron_driver.py as an example. - -4.2.2 Custom data conversion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -The convenience translators may be insufficient in some cases, for example, -the data source may provide data in an unusual format, the convenience -translators may be inefficient, or the fixed translation method may result in -an unsuitable table schema. In such cases, a driver may need to implement its -own translation. In those cases, we have a few recommendations. - -**Recommendation 1: Row = object.** Typically an API call will return a -collection of objects (e.g. networks, virtual machines, disks). Conceptually -it is convenient to represent each object with a row in a table. The columns -of that row are the attributes of each object. For example, a table of all -virtual machines will have columns for memory, disk, flavor, and image. - -Table: virtual_machine - -====================================== ====== ==== ====== ===================================== -ID Memory Disk Flavor Image -====================================== ====== ==== ====== ===================================== -66dafde0-a49c-11e3-be40-425861b86ab6 256GB 1TB 1 83e31d4c-a49c-11e3-be40-425861b86ab6 -73e31d4c-a49c-11e3-be40-425861b86ab6 10GB 2TB 2 93e31d4c-a49c-11e3-be40-425861b86ab6 -====================================== ====== ==== ====== ===================================== - - -**Recommendation 2. Avoid wide tables.** Wide tables (i.e. tables with many -columns) are hard to use for a policy-writer. Breaking such tables up into -smaller ones is often a good idea. In the above example, we could create 4 -tables with 2 columns instead of 1 table with 5 columns. - -Table: virtual_machine.memory - -====================================== ====== -ID Memory -====================================== ====== -66dafde0-a49c-11e3-be40-425861b86ab6 256GB -73e31d4c-a49c-11e3-be40-425861b86ab6 10GB -====================================== ====== - -Table: virtual_machine.disk - -====================================== ====== -ID Disk -====================================== ====== -66dafde0-a49c-11e3-be40-425861b86ab6 1TB -73e31d4c-a49c-11e3-be40-425861b86ab6 2TB -====================================== ====== - -Table: virtual_machine.flavor - -====================================== ====== -ID Flavor -====================================== ====== -66dafde0-a49c-11e3-be40-425861b86ab6 1 -73e31d4c-a49c-11e3-be40-425861b86ab6 2 -====================================== ====== - -Table: virtual_machine.image - -====================================== ===================================== -ID Image -====================================== ===================================== -66dafde0-a49c-11e3-be40-425861b86ab6 83e31d4c-a49c-11e3-be40-425861b86ab6 -73e31d4c-a49c-11e3-be40-425861b86ab6 93e31d4c-a49c-11e3-be40-425861b86ab6 -====================================== ===================================== - - -**Recommendation 3. Try these design patterns.** Below we give a few design -patterns. Notice that when an object has an attribute whose value is a -structured object itself (e.g. a list of dictionaries), we must recursively -flatten that subobject into tables. - -- A List of dictionary converted to tuples - - Original data:: - - [{'key1':'value1','key2':'value2'}, - {'key1':'value3','key2':'value4'} - ] - - Tuple:: - - [('value1', 'value2'), - ('value3', 'value4') - ] - -- List of Dictionary with a nested List - - Original data:: - - [{'key1':'value1','key2':['v1','v2']}, - {'key1':'value2','key2':['v3','v4']} - ] - - Tuple:: - - [('value1', 'uuid1'), - ('value1', 'uuid2'), - ('value2', 'uuid3'), - ('value2', 'uuid4') - ] - - [('uuid1', 'v1'), - ('uuid2', 'v2'), - ('uuid3', 'v3'), - ('uuid4', 'v4') - ] - - *Note* : uuid* are congress generated uuids - -- List of Dictionary with a nested dictionary - - Original data:: - - [{'key1':'value1','key2':{'k1':'v1'}}, - {'key1':'value2','key2':{'k1':'v2'}} - ] - - Tuple:: - - [('value1', 'uuid1'), - ('value2', 'uuid2') - ] - - [('uuid1', 'k1', 'v1'), - ('uuid2', 'k1', 'v2'), - ] - - *Note* : uuid* are congress generated uuids - -4.3 Writing a Datasource driver test ------------------------------------- - -Once you've written a driver, you'll want to add a unit test for it. To help, this section describes how the unit test for the Glance driver works. Here are the relevant files. - -* Driver code: congress/datasources/glance_v2driver.py -* Test code: congress/tests/datasources/test_glancev2_driver.py (appearing in full at the end of this section) - -The test code has two methods: setUp() and test_update_from_datasource(). - -4.3.1 Glance setup -~~~~~~~~~~~~~~~~~~ - -We begin our description with the setUp() method of the test. - -.. code-block:: python - - def setUp(self): - -First the test creates a fake (actually a mock) Keystone. Most clients talk to Keystone, so having a fake one seems to be necessary to make the Glance client work properly. - -.. code-block:: python - - self.keystone_client_p = mock.patch( - "keystoneclient.v2_0.client.Client") - self.keystone_client_p.start() - -Next the test creates a fake Glance client. Glance is an OpenStack service that stores (among other things) operating system Images that you can use to create a new VM. The Glance datasource driver makes a call to .images.list() to retrieve the list of those images, and then turns that list of images into tables. The test creates a fake Glance client so it can control the return value of .images.list(). - -.. code-block:: python - - self.glance_client_p = mock.patch("glanceclient.v2.client.Client") - self.glance_client_p.start() - -Next the test instantiates the GlanceV2Driver class, which contains the code for the Glance driver. Passing 'poll_time' as 0 is probably unnecessary here, but it tells the driver not to poll automatically. Passing 'client' is important because it tells the GlanceV2Driver class to use a mocked version of the Glance client instead of creating its own. - -.. code-block:: python - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = glancev2_driver.GlanceV2Driver(args=args) - -Next the test defines which value it wants .images.list() to return. The test itself will check if the Glance driver code properly translates this return value into tables. So this is the actual input to the test. Either you can write this by hand, or you can run the heat-client and print out the results. - -.. code-block:: python - - self.mock_images = {'images': [ - {u'checksum': u'9e486c3bf76219a6a37add392e425b36', - u'container_format': u'bare', - u'created_at': u'2014-10-01T20:28:08Z’, - ... - - -4.3.2 Glance test -~~~~~~~~~~~~~~~~~ - - -test_update_from_datasource() is the actual test, where we have the datasource driver grab the list of Glance images and translate them to tables. The test runs the update_from_datasource() method like normal except it ensures the return value of .images.list() is self.mock_images. - -.. code-block:: python - - def test_update_from_datasource(self): - -The first thing the method does is set the return value of self.driver.glance.images.list() to self.mock_images[‘images’]. Then it calls update_from_datasource() in the usual way, which translates self.mock_images['images'] into tables and stores the result into the driver's self.state dictionary. - -.. code-block:: python - - with mock.patch.object(self.driver.glance.images, "list") as img_list: - img_list.return_value = self.mock_images['images'] - self.driver.update_from_datasource() - -Next the test defines the tables that update_from_datasource() should construct. Actually, the test defines the expected value of Glance’s self.state when update_from_datasource() finishes. Remember that self.state is a dictionary mapping a table name to the set of tuples that belong to the table. For Glance, there’s just one table: ‘images’, and so the expected self.state is a dictionary with one key ‘images’ and one value: a set of tuples. - -.. code-block:: python - - expected = {'images': set([ - (u'6934941f-3eef-43f7-9198-9b3c188e4aab', - u'active', - u'cirros-0.3.2-x86_64-uec', - u'ami', - u'2014-10-01T20:28:06Z', - u'2014-10-01T20:28:07Z', - u'ami', - u'4dfdcf14a20940799d89c7a5e7345978', - 'False', - 0, - 0, - u'4eada48c2843d2a262c814ddc92ecf2c', - 25165824, - u'/v2/images/6934941f-3eef-43f7-9198-9b3c188e4aab/file', - u'15ed89b8-588d-47ad-8ee0-207ed8010569', - u'c244d5c7-1c83-414c-a90d-af7cea1dd3b5', - u'/v2/schemas/image', - u'public'), - ... - - -At this point in the test, update_from_datasource() has already been run, so all it does is check that the driver's self.state has the expected value. - -.. code-block:: python - - self.assertEqual(self.driver.state, expected) - - -4.3.3 Glance test code in full -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -.. code-block:: python - - import mock - - from congress.datasources import glancev2_driver - from congress.tests import base - from congress.tests import helper - - - class TestGlanceV2Driver(base.TestCase): - - def setUp(self): - super(TestGlanceV2Driver, self).setUp() - self.keystone_client_p = mock.patch( - "keystoneclient.v2_0.client.Client") - self.keystone_client_p.start() - self.glance_client_p = mock.patch("glanceclient.v2.client.Client") - self.glance_client_p.start() - - args = helper.datasource_openstack_args() - args['poll_time'] = 0 - args['client'] = mock.MagicMock() - self.driver = glancev2_driver.GlanceV2Driver(args=args) - - self.mock_images = {'images': [ - {u'checksum': u'9e486c3bf76219a6a37add392e425b36', - u'container_format': u'bare', - u'created_at': u'2014-10-01T20:28:08Z', - u'disk_format': u'qcow2', - u'file': u'/v2/images/c42736e7-8b09-4906-abd2-d6dc8673c297/file', - u'id': u'c42736e7-8b09-4906-abd2-d6dc8673c297', - u'min_disk': 0, - u'min_ram': 0, - u'name': u'Fedora-x86_64-20-20140618-sda', - u'owner': u'4dfdcf14a20940799d89c7a5e7345978', - u'protected': False, - u'schema': u'/v2/schemas/image', - u'size': 209649664, - u'status': u'active', - u'tags': ['type=xen2', 'type=xen'], - u'updated_at': u'2014-10-01T20:28:09Z', - u'visibility': u'public'}, - {u'checksum': u'4eada48c2843d2a262c814ddc92ecf2c', - u'container_format': u'ami', - u'created_at': u'2014-10-01T20:28:06Z', - u'disk_format': u'ami', - u'file': u'/v2/images/6934941f-3eef-43f7-9198-9b3c188e4aab/file', - u'id': u'6934941f-3eef-43f7-9198-9b3c188e4aab', - u'kernel_id': u'15ed89b8-588d-47ad-8ee0-207ed8010569', - u'min_disk': 0, - u'min_ram': 0, - u'name': u'cirros-0.3.2-x86_64-uec', - u'owner': u'4dfdcf14a20940799d89c7a5e7345978', - u'protected': False, - u'ramdisk_id': u'c244d5c7-1c83-414c-a90d-af7cea1dd3b5', - u'schema': u'/v2/schemas/image', - u'size': 25165824, - u'status': u'active', - u'tags': [], - u'updated_at': u'2014-10-01T20:28:07Z', - u'visibility': u'public'}]} - - def test_update_from_datasource(self): - with mock.patch.object(self.driver.glance.images, "list") as img_list: - img_list.return_value = self.mock_images['images'] - self.driver.update_from_datasource() - expected = {'images': set([ - (u'6934941f-3eef-43f7-9198-9b3c188e4aab', - u'active', - u'cirros-0.3.2-x86_64-uec', - u'ami', - u'2014-10-01T20:28:06Z', - u'2014-10-01T20:28:07Z', - u'ami', - u'4dfdcf14a20940799d89c7a5e7345978', - 'False', - 0, - 0, - u'4eada48c2843d2a262c814ddc92ecf2c', - 25165824, - u'/v2/images/6934941f-3eef-43f7-9198-9b3c188e4aab/file', - u'15ed89b8-588d-47ad-8ee0-207ed8010569', - u'c244d5c7-1c83-414c-a90d-af7cea1dd3b5', - u'/v2/schemas/image', - u'public'), - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', - u'active', - u'Fedora-x86_64-20-20140618-sda', - u'bare', - u'2014-10-01T20:28:08Z', - u'2014-10-01T20:28:09Z', - u'qcow2', - u'4dfdcf14a20940799d89c7a5e7345978', - 'False', - 0, - 0, - u'9e486c3bf76219a6a37add392e425b36', - 209649664, - u'/v2/images/c42736e7-8b09-4906-abd2-d6dc8673c297/file', - 'None', - 'None', - u'/v2/schemas/image', - u'public')]), - 'tags': set([ - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', 'type=xen'), - (u'c42736e7-8b09-4906-abd2-d6dc8673c297', 'type=xen2')])} - self.assertEqual(self.driver.state, expected) diff --git a/doc/source/codeoverview.rst b/doc/source/codeoverview.rst deleted file mode 100644 index f4a8b9bd..00000000 --- a/doc/source/codeoverview.rst +++ /dev/null @@ -1,153 +0,0 @@ - - -.. _codeoverview: - -============== -Code Overview -============== -This page gives a brief overview of the code structure that implements -Congress. - - -1. External information -======================= - -The main source of information is the Congress wiki. There are two separate -codebases that implement Congress: the server and the python client bindings. - -* wiki: https://wiki.openstack.org/wiki/Congress -* server: https://git.openstack.org/cgit/openstack/congress -* client: https://git.openstack.org/cgit/openstack/python-congressclient - -The structure of the client code is the same as that for other recent -OpenStack python clients. The bulk of the Congress code is contained -within the server. The remainder of this page describes the layout -of the server code. - - -2. Server directory structure -============================= - -Here are the most important components of the code, described by how they are -laid out in the repository. - -* ``congress/harness.py``: instantiates message bus and installs datasource - drivers and policy engine onto the bus -* ``congress/datalog``: implementation of Datalog policy language -* ``congress/policy_engines``: entities running on the message bus that - understand policy languages -* ``congress/datasources``: datasource drivers: thin wrappers/adapters for - integrating services like Nova, Neutron -* ``congress/dse2``: message bus that the policy engine and datasources use to - communicate -* ``congress/api``: API data models (entry points into the system from the API) -* ``contrib``: code for integrating into other services - - -3. Datalog -========== - -First is a description of the files and folders in congress/datalog. These files -implement Datalog: the language Congress uses for describing policies. - -* ``congress/datalog/Congress.g``: Antlr3 grammar defining the syntax of Datalog. - ``make`` uses Congress.g to generate CongressLexer.py and CongressParser.py, - which contain the code used to convert strings into Python datastructures. -* ``congress/datalog/compile.py``: - - * Convert policy strings into Python datastructures that represent those - strings. - * Includes datastructures for individual policy statements. - * Also includes additional syntax checks that are not handled by the grammar. - -* ``congress/datalog/unify.py``: unification routines used at the heart of the - policy reasoning algorithms. - - -Second is a brief overview of the fundamental datastructures used to represent -individual policy statements. - -* ``congress/datalog/compile.py:Rule``: represents a single rule of the form - ``head1, ..., headn :- body1, ..., bodym``. Each headi and bodyi are - Literals. -* ``congress/datalog/compile.py:Literal``: represents a possibly negated atom of - the form ``[not] table(arg1, ..., argn)``. Each argi is a term. -* ``congress/datalog/compile.py:Term``: represents an argument to a Literal. Is - either a Variable or an ObjectConstant. -* ``congress/datalog/compile.py:ObjectConstant``: special kind of Term that - represents a fixed string or number. -* ``congress/datalog/compile.py:Variable``: special kind of Term that is a - placeholder used in a rule to represent an ObjectConstant. - -Third is an overview of the datastructures used to represent entire policies. -There are several different kinds of policies that you can choose from when -creating a new policy. Each makes different tradeoffs in terms of time/space -or in terms of the kind of policy statements that are permitted. Internally -these are called 'theories'. - -* ``congress/datalog/nonrecursive.py:NonrecursiveRuleTheory``: represents an - arbitrary collection of rules (without recursion). No precomputation of - table contents is performed. Small memory footprint, but query time can be - large. (A Prolog implementation of rules.) This is the default - datastructure used when creating a new policy. - -* ``congress/datalog/ruleset.py:Ruleset``: represents a collection of - rules, with indexing for faster query evaluation. - Used by NonrecursiveRuleTheory. - -* ``congress/datalog/factset.py:FactSet``: represents a collection of - non-negated Literals without variables, e.g. ``p(1, "alice")``. - Designed for minimal memory overhead. - -* ``congress/datalog/materialized.py:MaterializedViewTheory``: represents an - arbitrary collection of rules (even allows recursion). Contents of all - tables are computed and stored each time policy changes. Large memory - footprint, but query time is small when asking for the contents of any - table. Not actively maintained. - -* ``congress/datalog/database.py:Database``: represents a - collection of non-negated Literals without variables, e.g. ``p(1, "alice"``. - Similar to a FactSet but with additional overhead. Used by the - MaterializedViewTheory internally. Not actively maintained. - - -4. Policy engines -================= -The congress/policy_engines directory contains implementations and wrappers for -policy engines. At the time of writing, there are 2 policy engines in this -directory: the domain-agnostic policy engine (agnostic.py) and the skeleton -of a policy engine specialized for VM-placement (vm_placement.py). We -detail only the domain-agnostic policy engine. - -4.1 Domain-agnostic policy engine ---------------------------------- - -Source code found in ``congress/policy_engines/agnostic.py``. - -* class ``Runtime`` is the top-level class for the policy engine. It - implements the creation/deletion of (different kinds of) policies, the - insertion/deletion of policy statements, and all the other functionality - built on top of the Datalog implementation. - -* class ``DseRuntime`` inherits from ``Runtime`` to make it run on the DSE - message bus. It handles publishing/subscribing to the tables exported by the - datasources. - -Below we give a list of the top-level entry points to the domain-agnostic -Runtime class---the top-level class for the domain agnostic policy engine. - -* ``create_policy``, ``delete_policy``: implement multiple policies -* ``select``: ask for the answer to a standard database query - (e.g. the contents of a table) for a specified policy -* ``insert``, ``delete``: insert or delete a single policy statement - into a specified policy -* ``update``: batch of inserts/deletes into multiple policies -* ``simulate``: apply a sequence of updates (temporarily), answer a - query, and roll-back the updates. -* ``TriggerRegistry``: central datastructure for triggers - (the mechanism used to implement manual-reactive-enforcement rules). - See ``initialize_tables`` and ``_update_obj`` to see how and when - triggers are executed. - - - diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index e9464549..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'openstackdocstheme', - #'sphinx.ext.intersphinx', -] - -# openstackdocstheme options -repository_name = 'openstack/congress' -bug_project = 'congress' -bug_tag = '' -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'congress' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['congress.'] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} - -[extensions] -todo_include_todos=True - diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 8cb3146f..00000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../CONTRIBUTING.rst \ No newline at end of file diff --git a/doc/source/deployment.rst b/doc/source/deployment.rst deleted file mode 100644 index 8b8eb96e..00000000 --- a/doc/source/deployment.rst +++ /dev/null @@ -1,108 +0,0 @@ - -.. _deployment: - -========== -Deployment -========== -Congress has two modes for deployment: single-process and multi-process. -If you are interested in test-driving Congress or are not concerned -about high-availability, the single-process deployment is best because it -is easiest to set up. If you are interested in making Congress highly-available -you want the multi-process deployment. - -In the single-process version, you run Congress as a single operating-system -process on one node (i.e. container, VM, physical machine). - -In the multi-process version, you start with the 3 components of Congress -(the API, the policy engine, and the datasource drivers). You choose how many -copies of each component you want to run, how you want to distribute those -components across processes, and how you want to distribute those processes -across nodes. - -Section :ref:`config` describes the common configuration options for both -single-process and multi-process deployments. After that :ref:`ha_overview` -and :ref:`ha_deployment` describe how to set up the multi-process deployment. - - -.. _config: - ---------------------- -Configuration Options ---------------------- - -In this section we highlight the configuration options that are specific -to Congress. To generate a sample configuration file that lists all -available options, along with descriptions, run the following commands:: - - $ cd /path/to/congress - $ tox -egenconfig - -The tox command will create the file ``etc/congress.conf.sample``, which has -a comprehensive list of options. All options have default values, which -means that even if you specify no options Congress will run. - -The options most important to Congress are described below, all of which -appear under the [DEFAULT] section of the configuration file. - -``drivers`` - The list of permitted datasource drivers. Default is the empty list. - The list is a comma separated list of Python class paths. For example: - drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver - -``datasource_sync_period`` - The number of seconds to wait between synchronizing datasource config - from the database. Default is 0. - -``enable_execute_action`` - Whether or not congress will execute actions. If false, Congress will - never execute any actions to do manual reactive enforcement, even if there - are policy statements that say actions should be executed and the - conditions of those actions become true. Default is True. - -One of Congress's new experimental features is distributing its services -across multiple services and even hosts. Here are the options for using -that feature. - -``bus_id`` - Unique ID of DSE bus. Can be any string. Defaults to 'bus'. - ID should be same across all the processes of a single congress instance - and should be unique across different congress instances. - Used if you want to create multiple, distributed instances of Congress and - can be ignored if only one congress instance is deployed as single process - in rabbitMQ cluster. Appears in the [dse] section. - -Here are the most often-used, but standard OpenStack options. These -are specified in the [DEFAULT] section of the configuration file. - -``auth_strategy`` - Method for authenticating Congress users. - Can be assigned to either 'keystone' meaning that the user must provide - Keystone credentials or to 'noauth' meaning that no authentication is - required. Default is 'keystone'. - -``verbose`` - Controls whether the INFO-level of logging is enabled. If false, logging - level will be set to WARNING. Default is true. Deprecated. - -``debug`` - Whether or not the DEBUG-level of logging is enabled. Default is false. - -``transport_url`` - URL to the shared messaging service. It is not needed in a single-process - Congress deployment, but must be specified in a multi-process Congress - deployment. - -.. code-block:: text - - [DEFAULT] - transport_url = rabbit://:@: - -============= -HA Deployment -============= - -.. toctree:: - :maxdepth: 2 - - ha-overview - ha-deployment diff --git a/doc/source/enforcement.rst b/doc/source/enforcement.rst deleted file mode 100644 index 74471b29..00000000 --- a/doc/source/enforcement.rst +++ /dev/null @@ -1,372 +0,0 @@ - -.. _enforcement: - - -========================== -Monitoring and Enforcement -========================== - -Congress is given two inputs: the other cloud -services in the datacenter and a policy describing the desired state of those -services. Congress does two things with those inputs: monitoring and -enforcement. *Monitoring* means passively comparing the actual state of the -other cloud services and the desired state (i.e. policy) and flagging -mismatches. *Enforcement* means actively working -to ensure that the actual state of the other cloud services is also a desired -state (i.e. that the other services obey policy). - -1. Monitoring -============= -Recall from :ref:`Policy ` that policy violations are represented with the -table *error*. To ask Congress for a list of all policy violations, we -simply ask it for the contents of the *error* table. - -For example, recall our policy from :ref:`Policy `: each Neutron port has at -most one IP address. For that policy, the *error* table is has 1 row for -each Neutron port that has more than 1 IP address. Each of those rows -specify the UUID for the port, and two different IP addresses. So if we -had the following mapping of Neutron ports to IP addresses: - -====================================== ========== -ID IP -====================================== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.2" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.3" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.4" -"8caead95-67d5-4f45-b01b-4082cddce425" "10.0.0.5" -====================================== ========== - -the *error* table would be something like the one shown below. - -====================================== ========== ========== -ID IP 1 IP 2 -====================================== ========== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" "10.0.0.2" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.3" "10.0.0.4" -====================================== ========== ========== - -The API would return this table as the following collection of Datalog facts -(encoded as a string):: - - error("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.1", "10.0.0.2") - error("73e31d4c-e89b-12d3-a456-426655440000", "10.0.0.3", "10.0.0.4") - -It is the responsibility of the client to periodically ask the server for the -contents of the error table. - - -2. Proactive Enforcement -======================== -Often we want policy to be enforced, not just monitored. *Proactive -enforcement* is the term we use to mean preventing policy violations before -they occur. Proactive enforcement requires having enforcement points in the -cloud that stop changes before they happen. Cloud services like Nova, -Neutron, and Cinder are good examples of enforcement points. For example, -Nova could refuse to provision a VM that would cause a policy violation, -thereby proactively enforcing policy. - -To enable other cloud services like Nova to check if a proposed change in the -cloud state would violate policy, the cloud service can consult Congress -using its :func:`simulate` functionality. The idea for :func:`simulate` is -that we ask Congress to answer a query after having -temporarily made some changes to data and policies. Simulation allows us to -explore the effects of proposed changes. Typically simulation is used to ask: -if I made these changes, would there be any new policy violations? -For example, provisioning a new VM might add rows to several of Nova's tables. -After receiving an API call that requests a new VM be provisioned, Nova could -ask Congress if adding those rows would create any new policy violations. -If new violations arise, Nova could refuse to provision the VM, thereby -proactively enforcing the policy. - - -In this writeup we assume you are using the python-client. - -Suppose you want to know the policy violations after making the following -changes. - - 1. insert a row into the *nova:servers* table with ID uuid1, 2TB of disk, - and 10GB of memory - 2. delete the row from *neutron:security_groups* with the ID “uuid2” and name - “alice_default_group” - -(Here we assume the nova:servers table has columns ID, disk-size, and memory -and that neutron:security groups has columns ID, and name.) - -To do a simulation from the command line, you use the following command:: - - $ openstack congress policy simulate - -* : the name of the policy in which to run the query -* : a string representing the query you would like to run after - applying the change sequence -* : a string codifying a sequence of insertions and deletions - of data and rules. Insertions are denoted by '+' and deletions by '-' -* : the name of another policy of type 'action' describing - the effects of any actions occurring in . Actions are not - necessary and are explained later. Without actions, this argument can be - anything (and will in the future be optional). - -For our nova:servers and neutron:security_groups example, we would run the -following command to find all of the policy violations after inserting a row -into nova:servers and then deleting a row out of neutron:security_groups:: - - $ openstack congress policy simulate classification - 'error(x)’ - 'nova:servers+(“uuid1”, “2TB”, “10 GB”) - neutron:security_groups-(“uuid2”, “alice_default_group”)' - action - -**More examples** - -Suppose the table 'p' is a collection of key-value pairs: p(key, value). -Let's begin by creating a policy and adding some key/value pairs for 'p':: - - $ openstack congress policy create alice - $ openstack congress policy rule create alice 'p(101, 0)' - $ openstack congress policy rule create alice 'p(202, "abc")' - $ openstack congress policy rule create alice 'p(302, 9)' - -Let's also add a statement that says there's an error if a single key has -multiple values or if any value is assigned 9:: - - $ openstack congress policy rule create alice - 'error(x) :- p(x, val1), p(x, val2), not equal(val1, val2)' - $ openstack congress policy rule create alice 'error(x) :- p(x, 9)' - - -Each of the following is an example of a simulation query you might want to run. - -a) **Basic usage**. Simulate adding the value 5 to key 101 and ask for the contents of p:: - - $ openstack congress policy simulate alice 'p(x,y)' 'p+(101, 5)' action - p(101, 0) - p(101, 5) - p(202, "abc") - p(302, 9) - -b) **Error table**. Simulate adding the value 5 to key 101 and ask for the contents of error:: - - $ openstack congress policy simulate alice 'error(x)' 'p+(101, 5)' action - error(101) - error(302) - -c) **Inserts and Deletes**. Simulate adding the value 5 to key 101 and deleting 0 and ask for the contents of error:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 5) p-(101, 0)' action - error(302) - - -d) **Error changes**. Simulate changing the value of key 101 to 9 and query the **change** in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 9) p-(101, 0)' action --delta - error+(101) - - -f) **Multiple error changes**. Simulate changing 101:9, 202:9, 302:1 and query the *change* in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 9) p-(101, 0) p+(202, 9) p-(202, "abc") p+(302, 1) p-(302, 9)' - action --delta - error+(202) - error+(101) - error-(302) - - -g) **Order matters**. Simulate changing 101:9, 202:9, 302:1, and finally 101:15 (in that order). Then query the *change* in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 9) p-(101, 0) p+(202, 9) p-(202, "abc") p+(302, 1) p-(302, 9) - p+(101, 15) p-(101, 9)' action --delta - error+(202) - error-(302) - - -h) **Tracing**. Simulate changing 101:9 and query the *change* in the error table, while asking for a debug trace of the computation:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 9) p-(101, 0)' action --delta --trace - error+(101) - RT : ** Simulate: Querying error(x) - Clas : Call: error(x) - Clas : | Call: p(x, 9) - Clas : | Exit: p(302, 9) - Clas : Exit: error(302) - Clas : Redo: error(302) - Clas : | Redo: p(302, 9) - Clas : | Fail: p(x, 9) - Clas : Fail: error(x) - Clas : Found answer [error(302)] - RT : Original result of error(x) is [error(302)] - RT : ** Simulate: Applying sequence [set(101, 9)] - Action: Call: action(x) - ... - -i) **Changing rules**. Simulate adding 101: 5 (which results in 101 having 2 values) and deleting the rule that says each key must have at most 1 value. Then query the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'p+(101, 5) error-(x) :- p(x, val1), p(x, val2), not equal(val1, val2)' - action - error(302) - -The syntax for inserting/deleting rules is a bit awkward since we just afix -a + or - to the head of the rule. Ideally we would afix the +/- to the rule -as a whole. This syntactic sugar will be added in a future release. - -There is also currently the limitation that you can only insert/delete rules -from the policy you are querying. And you cannot insert/delete action -description rules. - - -2.1 Simulation with Actions ---------------------------- - -The downside to the simulation functionality just described is that the -cloud service wanting to prevent policy violations would need to compute the -proposed changes in terms of the *tables* that Congress uses to represent its -internal state. Ideally a cloud service would have no idea which tables -Congress uses to represent its internals. But even if each cloud service -knew which tables Congress was using, it would still need convert each API -call into a collection of changes on its internal tables. - -For example, an API call for Nova to provision a new VM might change several -tables. An API call to Heat to provision a new app might change tables in -several different cloud services. Translating each API call exposed by a -cloud service into the collection of Congress table changes is sometimes -impractical. - -In the key/value examples above, the caller needed to know the current -state of the key/value store in order to accurately describe the changes -she wanted to make. Setting the key 101 to value 9 meant knowing that its -current value was 0 so that during the simulation we could say to delete the -assignment of 101 to 0 and add the assignment of 101 to 9. - -It would be preferable if an external cloud service could simply ask Congress -if the API call it is about to execute is permitted by the policy. -To do that, we must tell Congress what each of those actions do in terms of -the cloud-service tables. Each of these *action descriptions* describe which -rows are inserted/deleted from which tables if the action were to be executed -in the current state of the cloud. Those action descriptions are written in -Datalog and are stored in a policy of type 'action'. - -Action description policy statements are regular Datalog rules with one main -exception: they use + and - to adorn the table in the head of a rule to indicate -whether they are describing how to *insert* table rows or to *delete* table rows, -respectively. - -For example in the key-value store, we can define an action 'set(key, value)' -that deletes the current value assigned to 'key' and adds 'value' in its place. -To describe this action, we write two things: a declaration to Congress that -*set* is indeed an action using the reserved table name *action* and -rules that describe which table rows *set* inserts and which rows it deletes:: - - action("set") - p+(x,y) :- set(x,y) - p-(x,oldy) :- set(x,y), p(x,oldy) - -Note: Insertion takes precedence over deletion, which means that if a row is -both inserted and deleted by an action, the row will be inserted. - -To insert these rows, we create a policy of type 'action' and then insert -these rules into that policy:: - - $ openstack congress policy create aliceactions --kind 'action' - $ openstack congress policy rule create aliceactions 'action("set")' - $ openstack congress policy rule create aliceactions 'p+(x,y) :- set(x,y)' - $ openstack congress policy rule create aliceactions 'p-(x,oldy) :- set(x,y), p(x,oldy)' - -Below we illustrate how to use *set* to simplify the simulation queries -shown previously. - -a) **Inserts and Deletes**. Set key 101 to value 5 and ask for the contents of error:: - - $ openstack congress policy simulate alice 'error(x)' 'set(101, 5)' aliceactions - error(302) - - -b) **Multiple error changes**. Simulate changing 101:9, 202:9, 302:1 and query the *change* in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'set(101, 9) set(202, 9) set(302, 1)' aliceactions --delta - error+(202) - error+(101) - error-(302) - - -c) **Order matters**. Simulate changing 101:9, 202:9, 302:1, and finally 101:15 (in that order). Then query the *change* in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'set(101, 9) set(202, 9) set(302, 1) set(101, 15)' aliceactions --delta - error+(202) - error-(302) - -d) **Mixing actions and state-changes**. Simulate changing 101:9 and adding value 7 for key 202. Then query the *change* in the error table:: - - $ openstack congress policy simulate alice 'error(x)' - 'set(101, 9) p+(202, 7)' aliceactions --delta - error+(202) - error+(101) - - -3. Manual Reactive Enforcement -============================== -Not all policies can be enforced proactively on all clouds, which means that sometimes -the cloud will violate policy. Once policy violations happen, Congress can take action -to transition the cloud back into one of the states permitted by policy. We call this -*reactive enforcement*. Currently, to reactively enforce policy, -Congress relies on people to tell it which actions to execute and when to execute them, -hence we call it *manual* reactive enforcement. - -Of course, Congress tries to make it easy for people to tell it how to react to policy -violations. People write policy statements -that look almost the same as standard Datalog rules, except the rules use the modal *execute* in -the head. For more information about the Datalog language and how to write these rules, -see :ref:`Policy `. - -Take a simple example that is easy and relatively safe to try out. The policy we want is -that no server should have an ACTIVE status. The policy we write tells Congress -how to react when this policy is violated: it says to ask Nova to execute ``pause()`` -every time it sees a server with ACTIVE status:: - - $ openstack congress policy create reactive - $ openstack congress policy rule create reactive - 'execute[nova:servers.pause(x)] :- nova:servers(id=x, status="ACTIVE")' - -The way this works is that everytime Congress gets new data about the state of the cloud, -it figures out whether that new data causes any new rows to be added to the -``nova:servers.pause(x)`` table. (While policy writers know that nova:servers.pause isn't a table -in the usual sense, the Datalog implementation treats it like a normal table and computes -all the rows that belong to it in the usual way.) If there are new rows added to the -``nova:servers.pause(x)`` table, Congress asks Nova to execute ``servers.pause`` for every row -that was newly created. The arguments passed to ``servers.pause`` are the columns in each row. - -For example, if two servers have their status set to ACTIVE, Congress receives the following -data (in actuality the data comes in with all the columns set, but here we use column references -for the sake of pedagogy):: - - nova:servers(id="66dafde0-a49c-11e3-be40-425861b86ab6", status="ACTIVE") - nova:servers(id="73e31d4c-a49c-11e3-be40-425861b86ab6", status="ACTIVE") - -Congress will then ask Nova to execute the following commands:: - - nova:servers.pause("66dafde0-a49c-11e3-be40-425861b86ab6") - nova:servers.pause("73e31d4c-a49c-11e3-be40-425861b86ab6") - -Congress will not wait for a response from Nova. Nor will it change the status of the two servers that it -asked Nova to pause in its ``nova:servers`` table. Congress will simply execute the pause() actions and -wait for new data to arrive, just like always. -Eventually Nova executes the pause() requests, the status of -those servers change, and Congress receives another data update:: - - nova:servers(id="66dafde0-a49c-11e3-be40-425861b86ab6", status="PAUSED") - nova:servers(id="73e31d4c-a49c-11e3-be40-425861b86ab6", status="PAUSED") - -At this point, Congress updates the status of those servers in its ``nova:servers`` table to PAUSED. -But this time, Congress will find that no new rows were **added** to the ``nova:servers.pause(x)`` -table and so will execute no actions. (Two rows were deleted, but Congress ignores deletions.) - -In short, Congress executes actions exactly when new rows are inserted into a table augmented -with the *execute* modal. - diff --git a/doc/source/ha-deployment.rst b/doc/source/ha-deployment.rst deleted file mode 100644 index 329b480b..00000000 --- a/doc/source/ha-deployment.rst +++ /dev/null @@ -1,185 +0,0 @@ - -.. _ha_deployment: - -############# -HA Deployment -############# - -Overview -======== - -This section shows how to deploy Congress with High Availability (HA). For an -architectural overview, please see the :ref:`HA Overview `. - -An HA deployment of Congress involves five main steps. - -#. Deploy messaging and database infrastructure to be shared by all the - Congress nodes. -#. Prepare the hosts to run Congress nodes. -#. Deploy N (at least 2) policy-engine nodes. -#. Deploy one datasource-drivers node. -#. Deploy a load-balancer to load-balance between the N policy-engine nodes. - -The following sections describe each step in more detail. - - -Shared Services -=============== - -All the Congress nodes share a database backend. To setup a database backend -for Congress, please follow the database portion of -`separate install instructions`__. - -__ http://docs.openstack.org/developer/congress/README.html?highlight=readme#separate-install - -Various solutions exist to avoid creating a single point of failure with the -database backend. - -Note: If a replicated database solution is used, it must support table -locking. Galera, for example, would not work. This limitation is expected to -be removed in the Ocata release. - -A shared messaging service is also required. Refer to `Shared Messaging`__ for -instructions for installing and configuring RabbitMQ. - -__ http://docs.openstack.org/ha-guide/shared-messaging.html - - -Hosts Preparation -================= - -Congress should be installed on each host expected to run a Congress node. -Please follow the directions in `separate install instructions`__ to install -Congress on each host, skipping the local database portion. - -__ http://docs.openstack.org/developer/congress/README.html?highlight=readme#separate-install - -In the configuration file, a ``transport_url`` should be specified to use the -RabbitMQ messaging service configured in step 1. - -For example: - -.. code-block:: text - - [DEFAULT] - transport_url = rabbit://:@:5672 - -In addition, the ``replicated_policy_engine`` option should be set to ``True``. - -.. code-block:: text - - [DEFAULT] - replicated_policy_engine = True - -All hosts should be configured with a database connection that points to the -shared database deployed in step 1, not the local address shown in -`separate install instructions`__. - -__ http://docs.openstack.org/developer/congress/README.html?highlight=readme#separate-install - -For example: - -.. code-block:: text - - [database] - connection = mysql+pymysql://root:@/congress?charset=utf8 - - -Datasource Drivers Node -======================= - -In this step, we deploy a single datasource-drivers node in warm-standby style. - -The datasource-drivers node can be started directly with the following command: - - .. code-block:: console - - $ python /usr/local/bin/congress-server --datasources --node-id= - -A unique node-id (distinct from all the policy-engine nodes) must be specified. - -For warm-standby deployment, an external manager is used to launch and manage -the datasource-drivers node. In this document, we sketch how to deploy the -datasource-drivers node with `Pacemaker`_ . - -See the `OpenStack High Availability Guide`__ for general usage of Pacemaker -and how to deploy Pacemaker cluster stack. The guide also has some HA -configuration guidance for other OpenStack projects. - -__ http://docs.openstack.org/ha-guide/index.html -.. _Pacemaker: http://clusterlabs.org/ - -Prepare OCF resource agent ----------------------------- - -You need a custom Resource Agent (RA) for DataSoure Node HA. The custom RA is -located in Congress repository, ``/path/to/congress/script/ocf/congress-datasource``. -Install the RA with following steps. - -.. code-block:: sh - - $ cd /usr/lib/ocf/resource.d - $ mkdir openstack - $ cd openstack - $ cp /path/to/congress/script/ocf/congress-datasource ./congress-datasource - $ chmod a+rx congress-datasource - -Configuring the Resource Agent -------------------------------- - -You can now add the Pacemaker configuration for Congress DataSource Node resource. -Connect to the Pacemaker cluster with the *crm configure* command and add the -following cluster resources. After adding the resource make sure *commit* -the change. - -.. code-block:: sh - - primitive ds-node ocf:openstack:congress-datasource \ - params config="/etc/congress/congress.conf" \ - node_id="datasource-node" \ - op monitor interval="30s" timeout="30s" - -Make sure that all nodes in the cluster have same config file with same name and -path since DataSource Node resource, ``ds-node``, uses config file defined at -*config* parameter to launch the resource. - -The RA has following configurable parameters. - -* config: a path of Congress's config file -* node_id(Option): a node id of the datasource node. Default is "datasource-node". -* binary(Option): a path of Congress binary Default is "/usr/local/bin/congress-server". -* additional_parameters(Option): additional parameters of congress-server - - -Policy Engine Nodes -=================== - -In this step, we deploy N (at least 2) policy-engine nodes, each with an -associated API server. This step should be done only after the -`Datasource Drivers Node`_ is deployed. Each node can be started as follows: - - .. code-block:: console - - $ python /usr/local/bin/congress-server --api --policy-engine --node-id= - -Each node must have a unique node-id specified as a commandline option. - -For high availability, each node is usually deployed on a different host. If -multiple nodes are to be deployed on the same host, each node must have a -different port specified using the ``bind_port`` configuration option in the -congress configuration file. - - -Load-balancer -============= - -A load-balancer should be used to distribute incoming API requests to the N -policy-engine (and API service) nodes deployed in step 3. -It is recommended that a sticky configuration be used to avoid exposing a user -to out-of-sync artifacts when the user hits different policy-engine nodes. - -`HAProxy `_ is a popular load-balancer for this -purpose. The HAProxy section of the `OpenStack High Availability Guide`__ -has instructions for deploying HAProxy for high availability. - -__ http://docs.openstack.org/ha-guide/index.html \ No newline at end of file diff --git a/doc/source/ha-overview.rst b/doc/source/ha-overview.rst deleted file mode 100644 index 6a9ca716..00000000 --- a/doc/source/ha-overview.rst +++ /dev/null @@ -1,169 +0,0 @@ - -.. _ha_overview: - -########### -HA Overview -########### -Some applications require Congress to be highly available. Some -applications require a Congress Policy Engine (PE) to handle a high volume of -queries. This guide describes Congress support for High Availability (HA) -High Throughput (HT) deployment. - -Please see the `OpenStack High Availability Guide`__ for details on how to -install and configure OpenStack for High Availability. - -__ http://docs.openstack.org/ha-guide/index.html - - -HA Types -======== - -Warm Standby -------------- -Warm Standby is when a software component is installed and available on the -secondary node. The secondary node is up and running. In the case of a -failure on the primary node, the software component is started on the -secondary node. This process is usually automated using a cluster manager. -Data is regularly mirrored to the secondary system using disk based replication -or shared disk. This generally provides a recovery time of a few minutes. - -Active-Active (Load-Balanced) ------------------------------- -In this method, both the primary and secondary systems are active and -processing requests in parallel. Data replication happens through software -capabilities and would be bi-directional. This generally provides a recovery -time that is instantaneous. - - -Congress HAHT -============= -Congress provides Active-Active for the Policy Engine and Warm Standby for -the Datasource Drivers. - -Run N instances of the Congress Policy Engine in active-active -configuration, so both the primary and secondary systems are active -and processing requests in parallel. - -One Datasource Driver (DSD) per physical datasource, publishing data on -oslo-messaging to all policy engines. - -.. code-block:: text - - +-------------------------------------+ +--------------+ - | Load Balancer (eg. HAProxy) | <----+ Push client | - +----+-------------+-------------+----+ +--------------+ - | | | - PE | PE | PE | all+DSDs node - +---------+ +---------+ +---------+ +-----------------+ - | +-----+ | | +-----+ | | +-----+ | | +-----+ +-----+ | - | | API | | | | API | | | | API | | | | DSD | | DSD | | - | +-----+ | | +-----+ | | +-----+ | | +-----+ +-----+ | - | +-----+ | | +-----+ | | +-----+ | | +-----+ +-----+ | - | | PE | | | | PE | | | | PE | | | | DSD | | DSD | | - | +-----+ | | +-----+ | | +-----+ | | +-----+ +-----+ | - +---------+ +---------+ +---------+ +--------+--------+ - | | | | - | | | | - +--+----------+-------------+--------+--------+ - | | - | | - +-------+----+ +------------------------+-----------------+ - | Oslo Msg | | DBs (policy, config, push data, exec log)| - +------------+ +------------------------------------------+ - - -Details -------------- - -- Datasource Drivers (DSDs): - - - One datasource driver per physical datasource - - All DSDs run in a single DSE node (process) - - Push DSDs: optionally persist data in push data DB, so a new snapshot - can be obtained whenever needed - - Warm Standby: - - - Only one set of DSDs running at a given time; backup instances ready - to launch - - For pull DSDs, warm standby is most appropriate because warm startup - time is low (seconds) relative to frequency of data pulls - - For push DSDs, warm standby is generally sufficient except for use cases - that demand sub-second latency even during a failover -- Policy Engine (PE): - - - Replicate policy engine in active-active configuration. - - Policy synchronized across PE instances via Policy DB - - Every instance subscribes to the same data on oslo-messaging - - Reactive Enforcement: - All PE instances initiate reactive policy actions, but each DSD locally - selects a leader to listen to. The DSD ignores execution requests - initiated by all other PE instances. - - - Every PE instance computes the required reactive enforcement actions and - initiates the corresponding execution requests over oslo-messaging - - Each DSD locally picks a PE instance as leader (say the first instance - the DSD hears from in the asymmetric node deployment, or the PE - instance on the same node as the DSD in a symmetric node deployment) and - executes only requests from that PE - - If heartbeat contact is lost with the leader, the DSD selects a new - leader - - Each PE instance is unaware of whether it is a leader - - Node Configurations: - - - Congress supports the Two Node-Types (API+PE nodes, all-DSDs) node - configuration because it gives reasonable support for high-load DSDs - while keeping the deployment complexities low. - - Local Leader for Action Execution: - - - Local Leader: every PE instance sends action-execution requests, but - each receiving DSD locally picks a "leader" to listen to - - Because there is a single active DSD for a given data source, - it is a natural spot to locally choose a "leader" among the PE instances - sending reactive enforcement action execution requests. Congress - supports the local leader style because it avoids the deployment - complexities associated with global leader election. Furthermore, - because all PE instances perform reactive enforcement and send action - execution requests, the redundancy opens up the possibility for zero - disruption to reactive enforcement when a PE instance fails. -- API: - - - Each node has an active API service - - Each API service routes requests for the PE to its associated intranode PE - - Requests for any other service (eg. get data source status) are routed to - the Datasource and/or Policy Engine, which will be fielded by some active - instance of the service on some node -- Load balancer: - - - Layer 7 load balancer (e.g. HAProxy) distributes incoming API calls among - the nodes (each running an API service). - - load balancer optionally configured to use sticky session to pin each API - caller to a particular node. This configuration avoids the experience of - going back in time. -- External components (load balancer, DBs, and oslo messaging bus) can be made - highly available using standard solutions (e.g. clustered LB, HA rabbitMQ) - - -Performance Impact -================== -- Increased latency due to network communication required by multi-node - deployment -- Increased reactive enforcement latency if action executions are persistently - logged to facilitate smoother failover -- PE replication can achieve greater query throughput - -Cautions and Limitations -============================ -- Replicated PE deployment is new in the Newton release and a major departure - from the previous model. As a result, the deployer may be more likely to - experience unexpected issues. -- In the Newton release, creating a new policy requires locking a database - table. As a result, it should not be deployed with a database backend that - does not support table locking (e.g., Galera). The limitation is expected to - be removed in the Ocata release. -- Different PE instances may be out-of-sync in their data and policies - (eventual consistency). - The issue is generally made transparent to the end user by - configuring the load balancer to make each user sticky to a particular PE - instance. But if a user reaches a different PE instance (say because of load - balancer configuration or because the original instance went down), the end - user reaches a different instance and may experience out-of-sync artifacts. diff --git a/doc/source/inactive/enforcement_experimental.rst.inactive b/doc/source/inactive/enforcement_experimental.rst.inactive deleted file mode 100644 index f634e0e2..00000000 --- a/doc/source/inactive/enforcement_experimental.rst.inactive +++ /dev/null @@ -1,65 +0,0 @@ - -Reactive Enforcement ----------------------- -There's no requirement that a cloud service consult with Congress before changing the cloud (and even if it does, the cloud could change between the time Congress is consulted and when the service actually makes the changes). So Congress expects that sometimes the Classification policy will be violated and attempts to take action that brings the cloud back into compliance when that happens. We call this *reactive enforcement*. - -Congress's ability to monitor policy violations is the key to its ability to reactively enforce that policy. Every time a cloud service sends an update for one of its tables (or Congress polls the cloud service and finds an update), Congress checks if that update caused any new policy violations and if so attempts to find actions that when executed will eliminate them. - -For example, recall the Classification policy "every network connected to a VM must either be public or owned by someone in the same group as the VM owner," written below in Datalog:: - - error(vm) :- nova:virtual_machine(vm), - nova:network(vm, network), - not neutron:public_network(network), - neutron:owner(network, netowner), - nova:owner(vm, vmowner), - not same_group(netowner, vmowner) - - same_group(user1, user2) :- ad:group(user1, group), ad:group(user2, group) - -If this policy is violated, it means there is a network connected to a VM whose owners are not in the same group. After identifying this violation, Congress could attempt to eliminate it by executing an action that disconnects the offending network from the VM. - -To find actions that eliminate violations, Congress combines information from the Classification theory, which explains why the error exists in terms of the cloud-service tables, and information from the Action theory, which explains how each API call can be used to change the cloud-service tables. Conceptually, Congress tries to match up the actions it has available with the changes in cloud-service tables that would eliminate a given violation. - -One important observation is that there are sometimes multiple ways of eliminating a violation, and not all of them are equally preferred. Congress is currently ultra-conservative about making changes to the cloud and does not enforce policy reactively without guidance from an administrator; however, in the future we expect to provide administrators with the ability to let Congress sometimes enforce policy reactively without guidance. - -In the example above, there are several ways of eliminating the violation: - -* Disconnect the network from the VM -* Delete the VM. -* Make the network public -* Change the owner of the VM. -* Change the owner of the network. -* Change the group membership of the VM owner and/or network owner. - -In this case, disconnecting the network is a far better choice than destroying the VM, but without additional guidance from an administrator, there is not enough information contained in the Classification and Action policies for Congress to figure that out. - -Until we provide a mechanism that enables administrators to give Congress proper guidance for choosing among options for eliminating a violation, Congress supports two of the crucial building blocks for reactive enforcement: a way to enumerate remediation options and a cache of remediation decisions. - -To ask Congress for actions that will eliminate a policy violation (or more generally cause any row from any table defined in the Classification policy to be deleted), we can use the function :func:`remediate`. - -.. function:: remediate(atom) - :noindex: - - ATOM is a string representing a Datalog atom. :func:`remediate` returns a string representing a list of action sequences that will cause ATOM to become false. - -Once a decision is made about which remediation action to take, an administrator (or other process) can choose to have Congress make the same decision for future violations. To do so, she modifies Congress's **Enforcement Policy**. The Enforcement policy is a Datalog policy that dictates which actions to take under what circumstances. Each rule in the policy has an action (as declared in the Action policy) in the head. Every time an update from a cloud-service tables arrives at Congress, Congress executes any actions that the update caused to become true. - -For example, to execute the *disconnectNetwork* action each time a network is connected to a VM where the owners are not in the same group, we would insert the following statement into the Enforcement policy:: - - disconnectNetwork(vm, network) :- - nova:virtual_machine(vm), - nova:network(vm, network), - not neutron:public_network(network), - neutron:owner(network, netowner), - nova:owner(vm, vmowner), - not same_group(netowner, vmowner) - - -In this example, the statement added to the Enforcement policy is identical to the one in the Classification policy, except in the head we use *disconnectNetwork* instead of *error*. But in other examples, the administrator might decide to apply an action in broader or narrower circumstances. See :ref:`other-enforcement` for a discussion of how Congress's policies relate to one another and how they relate to other approaches to policy-based management. - - -.. warning: The functionality in this section is the least tested. - -.. todo: Add mechanism for admins to control when actions are taken automatically. - - diff --git a/doc/source/inactive/ifallelse.rst.inactive b/doc/source/inactive/ifallelse.rst.inactive deleted file mode 100644 index fa9a821c..00000000 --- a/doc/source/inactive/ifallelse.rst.inactive +++ /dev/null @@ -1,12 +0,0 @@ - -If all else fails -================== - -Congress's policy language was designed to balance the needs of the people who write complex policies (e.g. encoding the relevant fragments of HIPAA) and the needs of the software that enforces that policy. Too rich a policy language and the software cannot properly enforce it; too poor and people cannot write the policy they care about. - -Because the policy language is less expressive than a traditional programming languages, there will undoubtedly arise situations where we need to hit Congress with a hammer. There are several ways to do that. - -- Create your own cloud service -- Write the enforcement policy -- Access control policy (unimplemented) - diff --git a/doc/source/inactive/intro.rst.inactive b/doc/source/inactive/intro.rst.inactive deleted file mode 100644 index 7336ad41..00000000 --- a/doc/source/inactive/intro.rst.inactive +++ /dev/null @@ -1,16 +0,0 @@ - -.. _introduction: - -Introduction -============ - - -The user documentation is broken into 4 pieces: - -* :ref:`Concepts `: Understanding the core ideas of Congress -* :ref:`Services `: Configuring cloud services in Congress -* :ref:`Policy `: Writing Congress policies -* :ref:`API `: Interacting with Congress - - - diff --git a/doc/source/inactive/other_enforcement.rst.inactive b/doc/source/inactive/other_enforcement.rst.inactive deleted file mode 100644 index 37e42835..00000000 --- a/doc/source/inactive/other_enforcement.rst.inactive +++ /dev/null @@ -1,35 +0,0 @@ - -.. _other-enforcement: - -Other Enforcement Techniques -============================ - -Congress's policy language was designed to balance the needs of the people who write complex policies (e.g. encoding the relevant fragments of HIPAA) and the needs of the software that enforces that policy. Too rich a policy language and the software cannot properly enforce it; too poor and people cannot write the policy they care about. - -Because the policy language is less expressive than a traditional programming languages, there will undoubtedly arise situations where we need to hit Congress with a hammer. There are several ways to do that. - -- Create your own cloud service -- Write the enforcement policy -- Access control policy (unimplemented) - - - - -* Write a separate *Action Description* policy that describes how each of the API calls (which we call *actions*) change the state of the cloud. Congress can then be asked to :func:`simulate` the effects of any action and check if an action execution would cause any new policy violations. External cloud services like Nova and Heat can then more directly pose the question of whether or not a given API call should be rejected. - -If the cloud and policy are such that all potential violations can be prevented before they occur, the Access Control policy approach is the right one, and the policy described in :ref:`policy` (called the *Classification policy*) is unnecessary because it will never be violated. But if there is ever a time at which some fragment of the policy might be violated, the Action-description approach is superior. Instead of writing two separate policies (the Classification policy and the Access Control policy) that have similar contents, we write two separate policies that have almost entirely independent contents (the Classification policy and the Action policy). - - - - - - - - -Customizing Enforcement -------------------------- -- can choose which cloud services to make consult Congress before taking action. -- can choose which actions to make available in the Action policy -- can change condition-action rules in the Enforcement policy. -- can change the Access Control Policy - diff --git a/doc/source/inactive/policy_engine.rst.inactive b/doc/source/inactive/policy_engine.rst.inactive deleted file mode 100644 index 016e573c..00000000 --- a/doc/source/inactive/policy_engine.rst.inactive +++ /dev/null @@ -1,99 +0,0 @@ - -.. _policy_engine: - -Policy Engine --------------- - -The policy engine is the component of Congress responsible for evaluating, analyzing, and enforcing policy. To tell the policy engine what the policy is, you use the methods *insert* and *delete* (which are methods of the class congress.runtime.Runtime). - -.. function:: insert(formula) - - Inserts FORMULA to the current policy. FORMULA is a string encoding a single Datalog rule. - -.. function:: delete(formula) - - Deletes FORMULA from the current policy. FORMULA is a string encoding a single Datalog rule. - - -Formulas may be inserted and deleted at any time. Once Congress has a policy that it is trying to enforce, there are a number of additional functions we can call to have Congress tell us about the current state of the cloud's policy compliance. Many of these methods take a string encoding of Datalog as input and return a string encoding of Datalog as output. When working in Python, we can parse a Datalog string into Python objects using **compile.parse**. - -.. function:: compile.parse(string) - - Takes a string, parses it, and returns a list of compile.Rule objects representing the Datalog contained within that string. Does not check any of the Syntactic Restrictions from :ref:`datalog`. - - -We can utilize the method :func:`select` to query the contents of any table, including *error*. - -.. function:: select(formula) - - FORMULA is either a Datalog rule or atom. If it is an atom, SELECT returns a string representing all instances of FORMULA that are true. If it is a rule, it returns all instances of that rule where the body is true. - -:func:`select` takes either an atom or a rule as an argument. If it is an atom, Congress returns all instances of the atom that are true. For example, suppose we have the following instance of the table *neutron:port*. - -====================================== ========== -ID IP -====================================== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.2" -"73e31d4c-a49c-11e3-be40-425861b86ab6" "10.0.0.3" -====================================== ========== - -If the argument to :func:`select` is:: - - 'neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", x)' - -then Congress would return the following statements encoded as a string.:: - - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.1") - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.2") - - -If the argument to :func:`select` is a rule then Congress finds all instances of the body of the rule that are true and instantiates the variables in the head accordingly. For example, if the rule argument were the string:: - - multi_port(port) :- neutron:port(port, ip1), neutron:port(port, ip2), not equal(ip1, ip2) - -then Congress would return the following string. Notice that there are two results because there are two different reasons that "66dafde0-a49c-11e3-be40-425861b86ab6" belongs to *multi_port*.:: - - multi_port("66dafde0-a49c-11e3-be40-425861b86ab6") :- - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.1"), - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.2"), - not equal("10.0.0.1", "10.0.0.2") - multi_port("66dafde0-a49c-11e3-be40-425861b86ab6") :- - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.2"), - neutron:port("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.1"), - not equal("10.0.0.2", "10.0.0.1") - -We can also ask Congress for an explanation as to why a row belongs to a particular table. - -.. function:: explain(atom, tablenames=None, find_all=False) - - At the time of writing, this function needs an overhaul. In theory it should return a rule describing why ATOM is true. The head of the rule is ATOM. The body of the rule has tables only from TABLENAMES (which if not supplied Congress is free to choose) that constitute the causes of ATOM being true. If FIND_ALL is True, then the result is a list of all such rules. - -We can also ask Congress to enumerate possible actions that will cause a given policy violation (or any other row) to be eliminated. - -.. function:: remediate(atom) - - ATOM is a string representing a Datalog atom. :func:`remediate` returns a string representing a list of action sequences that will cause ATOM to become false. - -We can also ask Congress to simulate a sequence of actions and data or policy updates and answer a :func:`select` query in the resulting state. - -.. function:: simulate(query, sequence) - - QUERY is any :func:`select` query. SEQUENCE is a string of Datalog rules, described in more detail below. SIMULATE returns select(QUERY) after applying the updates described by SEQUENCE. Does not actually apply SEQUENCE--it only simulates its application. - -SEQUENCE is a mini-programming language built out of Datalog rules. Each Datalog rule in SEQUENCE is one of the following types. - -* Data update. q+(1) means that q(1) should be inserted. q-(1) means that q(1) should be deleted. -* Policy update. p+(x) :- q(x) means that p(x) :- q(x) should be inserted. p-(x) :- q(x) means that p(x) :- q(x) should be deleted. -* Action invocation. See :ref:`enforcement` for more details on actions. In short, an Action is analogous to an API call that changes state in the cloud. An action invocation is described by a Datalog rule, such as the following.:: - - create_network(x, 17), options:value(17, "name", "net1") :- result(x) - -Here the action being executed is *create_network(x, 17)*, where 17 is a "pointer" to the list of arguments (or options) for that API call, and the "name" argument of that argument list has value "net1". The value of *x* depends on the return value of a previously executed action. If the action does not depend on the previous result, you can use *true* in place of *result(x)*. - - - - - - - diff --git a/doc/source/inactive/related.rst.inactive b/doc/source/inactive/related.rst.inactive deleted file mode 100644 index bf2fa91e..00000000 --- a/doc/source/inactive/related.rst.inactive +++ /dev/null @@ -1,40 +0,0 @@ - -.. _relatedwork: - -============ -Related Work -============ -This page describes comparisons of Congress to other well-known systems. - - -1. ActiveDirectory -================== - -In many ways Congress is similar to ActiveDirectory (AD). - -* Both Congress and AD are cloud services whose main responsibility is policy. -* Both Congress and AD are concerned with providing a single touchpoint for - policy across the cloud so that users may understand and control their cloud - from a holistic point of view. -* Both Congress and AD support a policy language that includes abstractions - like groups that make it easy to express policies over large numbers of - servers, networks, users, files, etc. - -Congress generalizes ActiveDirectory in several dimensions. - -* AD is primarily used for managing a collection of servers. Congress is - designed to manage any collection of cloud services. Congress does not - require any of its own code running on the services it is managing. -* AD's policy language provides a list of several thousand actions that the - policy controls (e.g. changing the screen saver). Congress provides a - high-level, general-purpose policy language where a policy controls which - states of the cloud are permitted (independent of which actions were - executed to achieve that state). -* AD enforces policy by relying on the OS to prevent violations before they - occur. Congress makes no assumptions about the enforcement points it has - available; rather, once we add enforcement capabilities, it will prevent - policy violations when possible and correct them when not. And Congress - enables administrators to control the extent to which enforcement is - automatic. - - diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 03c36bb9..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. Congress documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Congress! -==================== - -**For Users** - -.. toctree:: - :maxdepth: 2 - - README - - architecture - cloudservices - policy - enforcement - api - - tutorial-tenant-sharing - troubleshooting - -**For Operators** - -.. toctree:: - :maxdepth: 2 - - deployment - release - - -**For Developers** - -.. toctree:: - :maxdepth: 2 - - contributing - codeoverview - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/policy.rst b/doc/source/policy.rst deleted file mode 100644 index d6f7c89a..00000000 --- a/doc/source/policy.rst +++ /dev/null @@ -1,570 +0,0 @@ - -.. _policy: - -====== -Policy -====== - -1. What Does a Policy Look Like -=============================== - -A policy describes how services (either individually or as a whole) -ought to behave. More specifically, a policy describes which -**states** of the cloud are permitted and which are not. Or a policy describes -which **actions** to take in each state of the cloud, in order to -transition the cloud to one of those permitted states. For example -For example, -a policy might simply state that the minimum password length on all -systems is eight characters, or a policy might state that if -the minimum password length on some system is less than 8 that the -minimum length should be reset to 8. - -In both cases, the policy relies on knowing the state of the cloud. -The state of the cloud is the amalgamation of the states of all the -services running in the cloud. In Congress, the state of each service -is represented as a collection of tables (see :ref:`cloudservices`). -The policy language determines whether any violation exists given the -content of the state tables. - -For example, one desirable policy is that each Neutron port has at -most one IP address. That means that the following table mapping port -id to ip address with the schema "port(id, ip)" is permitted by the -policy. - -====================================== ========== -ID IP -====================================== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.3" -====================================== ========== - -Whereas, the following table is a violation. - -====================================== ========== -ID IP -====================================== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.2" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.3" -====================================== ========== - -This is the policy written in Congress's policy language: - -error(port_id, ip1, ip2) :- - port(port_id, ip1), - port(port_id, ip2), - not equal(ip1, ip2); - -Note that the policy above does not mention specific table content; -instead it describes the general condition of tables. The policy says -that for every row in the port table, no two rows should have the same -ID and different IPs. - -This example verifies a single table within Neutron, but a -policy can use many tables as well. Those tables -might all come from the same cloud service (e.g. all the tables might be -Neutron tables), or the tables may come from different cloud services (e.g. -some tables from Neutron, others from Nova). - -For example, if we have the following table schemas from Nova, Neutron, and -ActiveDirectory, we could write a policy that says every network connected to a VM must -either be public or owned by someone in the same group as the VM owner.:: - - error(vm, network) :- - nova:virtual_machine(vm) - nova:network(vm, network) - nova:owner(vm, vm_owner) - neutron:owner(network, network_owner) - not neutron:public_network(network) - not same_group(vm_owner, network_owner) - - same_group(user1, user2) :- - ad:group(user1, group) - ad:group(user2, group) - -And if one of these errors occurs, the right solution is to disconnect -the offending network (as opposed to deleting the VM, changing the owner, -or any of the other feasible options):: - - execute[neutron:disconnectNetwork(vm, network)] :- - error(vm, network) - -The language Congress supports for expressing policy is called Datalog, -a declarative language derived from SQL and first-order logic that has been -the subject of research and development for decades. - - - -.. _datalog: - -2. Datalog Policy Language -========================== - -As a policy writer, your goal is to define the contents of the *error* table, and -in so doing to describe exactly those conditions that must be true -when policy is being obeyed. - -As a policy writer, you can also describe which actions Congress should take when policy -is being violated by using the *execute* operator and thinking of the action -to be executed as if it were a table itself. - -Either when defining policy directly or describing the conditions under which -actions should be executed to eliminate policy violations, it is often useful -to use higher-level concepts than -the cloud services provide natively. Datalog allows us to do this by defining -new tables (higher-level concepts) in terms of existing tables (lower-level -concepts) by writing *rules*. For example, OpenStack does not tell us directly -which VMs are connected to the internet; rather, it provides a collection of -lower-level API calls from which we can derive that information. Using Datalog -we can define a table that lists all of the VMs connected to the internet in -terms of the tables that Nova/Neutron support directly. As another example, if -Keystone stores some collection of user groups and Active Directory stores a -collection of user groups, we might want to create a new table that represents -all the groups from either Keystone or Active Directory. - -Datalog has a collection of core features for manipulating tables, and it -has a collection of more advanced features that become important when you -go beyond toy examples. - - -2.1 Core Datalog Features -------------------------- - -Since Datalog is entirely concerned with tables, it's not surprising that -Datalog allows us to represent concrete tables directly in the language. - -**Concrete tables.** Suppose we want to use Datalog to represent a Neutron -table that lists which ports have been assigned which IPs, such as the one -shown below. - -Table: neutron:port_ip - -====================================== ========== -ID IP -====================================== ========== -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.1" -"66dafde0-a49c-11e3-be40-425861b86ab6" "10.0.0.2" -"73e31d4c-e89b-12d3-a456-426655440000" "10.0.0.3" -====================================== ========== - -To represent this table, we write the following Datalog:: - - neutron:port_ip("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.1") - neutron:port_ip("66dafde0-a49c-11e3-be40-425861b86ab6", "10.0.0.2") - neutron:port_ip("73e31d4c-e89b-12d3-a456-426655440000", "10.0.0.3") - -Each of the Datalog statements above is called a *ground atom* (or *ground -fact*). A ground atom takes the form ``(arg1, ..., argn)``, -where each ``argi`` is either a double-quoted Python string or a Python -number. - -**Basic rules** The real power of Datalog is that it allows you to write recipes -for constructing new tables out of existing tables, regardless which rows are -in those existing tables. - -To create a new table out of an existing table, we write Datalog *rules*. -A *rule* is a simple if-then statement, where the *if* part is called the -*head* and the *then* part is called the *body*. The head is always a single -Datalog atom. The body is an AND of several possibly negated Datalog atoms. -OR is accomplished by writing multiple rules with the same table in the head. - -Suppose we want to create a new table ``has_ip`` that is just a list of -the Neutron ports that have been assigned at least one IP address. We want -our table to work regardless what IDs and IPs appear in the neutron:port_ip -table so we use variables in place of strings/numbers. Variables have the -same meaning as in algebra: they are placeholders for any value. -(Syntactically, a variable is any symbol other than a number or a string.):: - - has_ip(x) :- neutron:port_ip(x, y) - -This rule says that a port *x* belongs to the *has_ip* table if there exists -some IP *y* such that row ** belongs to the *neutron:port* table. -Conceptually, this rule says to look at all of the ground atoms for the -neutron:port_ip table, and for each one assign *x* to the port UUID and *y* -to the IP. Then create a row in the *has_ip* table for *x*. This rule when -applied to the neutron:port_ip table shown above would generate the following -table:: - - has_ip("66dafde0-a49c-11e3-be40-425861b86ab6") - has_ip("73e31d4c-e89b-12d3-a456-426655440000") - -Notice here that there are only 2 rows in *has_ip* despite there being 3 rows -in *neutron:port_ip*. That happens because one of the ports in -neutron:port_ip has been assigned 2 distinct IPs. - -**AND operator** As a slightly more complex example, we could define a table -*same_ip* that lists all the pairs of ports that are assigned the same IP.:: - - same_ip(port1, port2) :- neutron:port_ip(port1, ip), neutron:port_ip(port2, ip) - -This rule says that the row must be included in the -*same_ip* table if there exists some *ip* where both ** and ** -are rows in the *neutron:port* table (where notice that *ip* is the same in the two -rows). Notice here the variable *ip* appears in two different places in the body, -thereby requiring the value assigned to that variable be the same in both cases. -This is called a *join* in the realm of relational databases and SQL. - -**NOT operator** As another example, suppose we want a list of all the ports -that have NOT been assigned any IP address. We can use the *not* operator to -check if a row fails to belong to a table. - - no_ip(port) :- neutron:port(port), not has_ip(port) - -There are special restrictions that you must be aware of when using *not*. -See the next section for details. - -**OR operator**. Some examples require an OR, which in Datalog means writing -multiple rules with the same table in the head. Imagine we have two tables -representing group membership information from two different services: -Keystone and Active Directory. We can create a new table *group* that says a -person is a member of a group if she is a member of that group either according -to Keystone or according to Active Directory. In Datalog we create this table -by writing two rules.:: - - group(user, grp) :- ad:group(user, grp) - group(user, grp) :- keystone:group(user, grp) - -These rules happen to have only one atom in each of their bodies, but there is -no requirement for that. - -2.2 Extended Datalog Features ------------------------------ -In addition writing basic rules with and/or/not, the version of Datalog used -by Congress includes the features described in this section. - -**Builtins**. Often we want to write rules that are conditioned on things that -are difficult or impossible to define within Datalog. For example, we might -want to create a table that lists all of the virtual machines that have at -least 100 GB of memory. To write that rule, we would need a way to check -if the memory of a given machine is greater-than 100 or not. -Basic arithmetic, string manipulation, etc. are operations -that are built into Datalog, but they look as though they are just ordinary -tables. Below the *gt* is a builtin table implementing greater-than:: - - plenty_of_memory(vm) :- nova:virtual_machine.memory(vm, mem), gt(mem, 100) - -In a later section we include the list of available builtins. - -**Column references**. Some tables have 5+ columns, and -when tables have that many columns writing rules can be awkward. Typically when -we write a rule, we only want 1 or 2 columns, but if there are 10 columns, then -we end up needing to invent variable names to fill all the unneeded columns. - -For example, Neutron's *ports* table has 10 columns. If you want to create a -table that includes just the port IDs (as we used above), you would write the -following rule:: - - port(id) :- - neutron:ports(id, tenant_id, name, network_id, mac_address, admin_state_up, - status, device_owner, fixed_ips, security_groups) - -To simplify such rules, we can write rules that reference only those columns -that we care about by using the column's name. Since the name of the first -column of the *neutron:ports* table is "ID", we can write the rule above as -follows:: - - port(x) :- neutron:ports(id=x) - -You can only use these column references for tables provided by cloud services -(since Congress only knows the column names for the cloud service tables). -Column references like these are translated automatically to the version -without column-references, which is something you may notice from time to -time. - -**Table hierarchy**. The tables in the body of rules can either be the -original cloud-service tables or tables that are defined by other rules -(with some limitations, described in the next section). We can think of a -Datalog policy as a hierarchy of tables, where each table is defined in -terms of the tables at a lower level in the hierarchy. At the bottom of that -hierarchy are the original cloud-service tables representing the state of the -cloud. - -**Order irrelevance**. One noteworthy feature of Datalog is that the order -in which rules appear is irrelevant. The rows that belong to a table are -the minimal ones required by the rules if we were to compute their contents -starting with the cloud-service tables (whose contents are given to us) and -working our way up the hierarchy of tables. For more details, search the web -for the term *stratified Datalog semantics*. - -**Execute modal**. To write a policy that tells Congress the conditions -under which it should execute a certain action, we write rules that utilize -the *execute* modal in the head of the rule. - -For example, to dictate that Congress should ask Nova to pause() all of the -servers whose state is ACTIVE, we would write the following policy statement:: - - execute[nova:servers.pause(x)] :- nova:servers(id=x, status="ACTIVE") - -We discuss this modal operator in greater detail in Section 3. - -**Grammar**. Here is the grammar for Datalog policies:: - - ::= * - ::= COLONMINUS (COMMA )* - ::= - ::= EXECUTE[] - ::= - ::= NOT - ::= TABLENAME LPAREN (COMMA )* RPAREN - ::= - ::= COLUMNNAME= - ::= INTEGER | FLOAT | STRING | VARIABLE - - -2.3 Datalog Syntax Restrictions -------------------------------- - -There are a number of syntactic restrictions on Datalog that are, for the most -part, common sense. - -**Head Safety**: every variable in the head of a rule must appear in the body. - -Head Safety is natural because if a variable appears in the head of the rule -but not the body, we have not given a prescription for which strings/numbers -to use for that variable when adding rows to the table in the head. - -**Body Safety**: every variable occurring in a negated atom or in the input -of a built-in table must appear in a non-negated, non-builtin atom in the body. - -Body Safety is important for ensuring that the sizes of our tables are always -finite. There are always infinitely many rows that DO NOT belong to a table, -and there are often infinitely many rows that DO belong to a builtin -(like equal). Body safety ensures that the number of rows belonging to -the table in the head is always finite. - -**No recursion**: You are not allowed to define a table in terms of itself. - -A classic example starts with a table that tells us which network nodes -are directly adjacent to which other nodes (by a single network hop). Then you -want to write a policy about which nodes are connected to which other nodes -(by any number of hops). Expressing such a policy requires recursion, which -is not allowed. - -**Modal safety**: The *execute* modal may only appear in the heads of rules. - -The Datalog language is we have is called a condition-action language, meaning -that action-execution depends on conditions on the state of the cloud. But -it is not an event-condition-action language, which would enable -action-execution to depend on the conditions of the cloud plus the action -that was just executed. An event-condition-action language would allow -the *execute* modal to appear in the body of rules. - -**Schema consistency**: Every time a rule references one of the cloud service -tables, the rule must use the same (number of) columns that the cloud service -provides for that table. - -This restriction catches mistakes in rules that use the wrong number of columns -or the wrong column names. - - - -.. **Stratification [Recursion is not currently supported]** -.. No table may be defined in terms of its negation. - -.. In Datalog, a table may be defined in terms of itself. These are called - *recursive* tables. A classic example is defining all pairs of nodes that - are connected in a network given a table that records which nodes are adjacent - to which other nodes (i.e. by a single network hop).:: - -.. connected(x,y) :- adjacent(x,y) -.. connected(x,y) :- connected(x,z), connected(z,y) - -.. The Stratification restriction says that we cannot define a table in terms of - its *negation*. For example, the following rule is disallowed.:: - -.. p(x) :- not p(x) // NOT valid Datalog - -.. More precisely, the Stratification restriction says that there is no cycle - through the dependency graph of a Datalog policy that includes an edge - labeled with *negation*. The dependency graph of a Datalog policy has - one node for every table. It has an edge from table u to table v if - there is a rule with u in the head and v in the body; that edge is labeled - with *negation* if NOT is applied to the atom for v. - - - -2.4 Datalog builtins --------------------- - -You can think of builtins as tables that are defined for you. All builtins -are referenced in rules using the prefix *builtin:*. For example, to check -if the value of x is less than the value of y, you write *builtin:lt(x,y)*. - -In previous releases, the *builtin:* prefix was unnecessary. You could reference -builtin tables in their bare form, e.g. *lt(x,y)*. As of Ocata, bare references -to builtins are deprecated. - -**Bare builtin references are deprecated as of Ocata.They will be removed -in the Q release.** - -Here is a list of the currently supported builtins. A builtin that has -N inputs means that the leftmost N columns are the inputs, and the -remaining columns (if any) are the outputs. If a builtin has no outputs, it -return boolean value True or False, starting with comparison. - -====================================== ======= ============================= -Comparison Builtin Inputs Description -====================================== ======= ============================= -lt(x, y) 2 True if x < y -lteq(x, y) 2 True if x <= y -equal(x, y) 2 True if x == y -gt(x, y) 2 True if x > y -gteq(x, y) 2 True if x >= y -max(x, y, z) 2 z = max(x, y) -====================================== ======= ============================= - - -Next are the arithmetic builtins. - -====================================== ======= ============================= -Arithmetic Builtin Inputs Description -====================================== ======= ============================= -plus(x, y, z) 2 z = x + y -minus(x, y, z) 2 z = x - y -mul(x, y, z) 2 z = x * y -div(x, y, z) 2 z = x / y -float(x, y) 1 y = float(x) -int(x, y) 1 y = int(x) -====================================== ======= ============================= - - -Then are the string builtins. - -====================================== ======= ============================= -String Builtin Inputs Description -====================================== ======= ============================= -concat(x, y, z) 2 z = concatenate(x, y) -len(x, y) 1 y = number of characters in x -====================================== ======= ============================= - -Next are the builtins for manipulating dates and times. These builtins -are based on the Python DateTime object. - -====================================== ======= =============================== -Datetime Builtin Inputs Description -====================================== ======= =============================== -now(x) 0 The current date-time -unpack_date(x, year, month, day) 1 Extract year/month/day -unpack_time(x, hours, minutes, secs) 1 Extract hours/minutes/seconds -unpack_datetime(x, y, m, d, h, i, s) 1 Extract date and time -pack_time(hours, minutes, seconds, x) 3 Create date-time with date -pack_date(year, month, day, x) 3 Create date-time with time -pack_datetime(y, m, d, h, i, s, x) 6 Create date-time with date/time -extract_date(x, date) 1 Extract date obj from date-time -extract_time(x, time) 1 Extract time obj from date-time -datetime_to_seconds(x, secs) 1 secs from 1900 to date-time x -datetime_plus(x, y, z) 2 z = x + y -datetime_minus(x, y, z) 2 z = x - y -datetime_lt(x, y) 2 True if x is before y -datetime_lteq(x, y) 2 True if x is no later than y -datetime_gt(x, y) 2 True if x is later than y -datetime_gteq(x, y) 2 True if x is no earlier than y -datetime_equal(x, y) 2 True if x == y -====================================== ======= =============================== - -Last are the builtins for handling network addresses. These builtins -are based on the Python netaddr package. Both IPv4 and IPv6 are supported. -For more details see the -`netaddr documentation `. - -========================= ======= ============================================= -Network Address Builtins Inputs Description -========================= ======= ============================================= -ips_equal(x, y) 2 True if IP x is equal to IP y -ips_lt(x, y) 2 True if IP x is less than IP y -ips_lteq(x, y) 2 True if IP x is less than or equal to IP y -ips_gt(x, y) 2 True if IP x is greater than IP y -ips_gteq(x, y) 2 True if IP x is greater than or equal to IP y -networks_equal(x, y) 2 True if network x and network y are equal -networks_overlap(x, y) 2 True if the same IP is in networks x and y -ip_in_network(x, y) 2 True if IP x belongs to network y -========================= ======= ============================================= - - - -3. Multiple Policies -==================== - -One of the goals of Congress is for several different people in an organization -to collaboratively define a single, overarching policy that governs a cloud. -The example, the compute admin might some tables that are good building blocks -for writing policy about compute. Similarly the network and storage admins -might create tables that help define policy about networking and storage, respectively. -Using those building blocks, the cloud administrator might then write -policy about compute, storage, and networking. - -To make it easier for several people to collaborate (or for a single person -to write more modular policies) Congress allows you organize your Datalog -statements using policy modules. Each policy module is simply a collection of -Datalog statements. You create and delete policy modules using the API, -and the you insert/delete Datalog statements into a particular policy module also -using the API. - -The rules you insert into one policy module can reference tables defined in -other policy modules. To do that, you prefix the name of the table with -the name of the policy and separate the policy module and table name with -a colon. - -For example, if the policy module *compute* has a table that lists all the -servers that have not been properly secured *insecure(server)* -and the policy module *network* has a table of all devices connected to -the internet *connected_to_internet*, then as a -cloud administrator, you might write a policy that says there is an error -whenever a server is insecure and connected to the internet. - - error(x) :- compute:insecure(x), network:connected_to_internet(x) - -Notice that this is exactly the same syntax you use to reference tables exported -directly by cloud services:: - - has_ip(x) :- neutron:port_ip(x, y) - -In fact, the tables exported by cloud services are stored in a policy module -with the same name as the service. - -While the term *policy module* is accurate, we usually abbreviate it to *policy*, -and say that Congress supports multiple policies. Note, however, that supporting -multiple policies is not the same thing as supporting multi-tenancy. -Currently, all of -the policies are visible to everyone using the system, and everyone using -the system has the same view of the tables the cloud services export. For -true multi-tenancy, you would expect different tenants to have different -sets of policies and potentially a different view of the data exported -by cloud services. - -See section :ref:`API ` for details about creating, deleting, and -populating policies. - - -3.1 Syntactic Restrictions for Multiple Policies ------------------------------------------------- -There are a couple of additional syntactic restrictions imposed when using -multiple policies. - -**No recursion across policies**. Just as there is no recursion permitted -within a single policy, there is no recursion permitted across policies. - -For example, the following is prohibited:: - - # Not permitted because of recursion - Module compute: p(x) :- storage:q(x) - Module storage: q(x) :- compute:p(x) - -**No policy name may be referenced in the head of a rule**. A rule may -not mention any policy in the head (unless the head uses the modal *execute*). - -This restriction prohibits one policy from changing the tables -defined within another policy. The following example is prohibited -(in all policy modules, including 'compute'):: - - # Not permitted because 'compute' is in the head - compute:p(x) :- q(x) - -The following rule is permitted, because it utilizes *execute* in the -head of the rule:: - - # Permitted because of execute[] - execute[nova:pause(x)] :- nova:servers(id=x, status="ACTIVE") - -Congress will stop you from inserting rules that violate these restrictions. - diff --git a/doc/source/release.rst b/doc/source/release.rst deleted file mode 100644 index b902be1d..00000000 --- a/doc/source/release.rst +++ /dev/null @@ -1,58 +0,0 @@ - -.. _release: - -============= -Release Notes -============= - -Newer release notes (after Mitaka) are published in OpenStack official -release notes. -To see the latest updates, please visit here: -[OpenStack official release notes](http://docs.openstack.org/) - - -Liberty -------- -**Main updates** - -* Added datasource driver for Heat -* Designed and began implementation of new distributed architecture -* Added API call to list available actions for manual reactive enforcement -* Refactored all datasource drivers for improved consistency -* Extended grammar to include insert and delete events -* Improved tempest/devstack support for running in gate -* Added version API -* Improved support for python3 -* Reduced debug log volume by reducing messages sent on message bus -* Enabled action execution for all datasources -* Eliminated busy-loop in message bus for reduced cpu consumption -* Improved unit test coverage for API -* Added experimental vm-migration policy enforcement engine - - -Kilo ----- - -**Main features** - -* Datalog: basic rules, column references, multiple policies, - action-execution rules -* Monitoring: check for policy violations by asking for the rows of - the ``error`` table -* Proactive enforcement: prevent violations by asking Congress before making - changes using the ``simulate`` API call -* Manual reactive enforcement: correct violations by writing Datalog - statements that say which actions to execute to eliminate violations -* Datasource drivers for Ceilometer, Cinder, CloudFoundry, Glance, Ironic, - Keystone, Murano, Neutron, Nova, Plexxi, Swift, vCenter - -**Known issues** - -* ``GET /v1/policies//rules`` fails to return 404 if the policy name - is not found. There are similar issues for other - ``/v1/policies//rules`` API calls. - -* Within a policy, you may not use both ``execute[()]`` and - ``
()`` in the heads of rules. - - diff --git a/doc/source/troubleshooting.rst b/doc/source/troubleshooting.rst deleted file mode 100644 index d5eaa114..00000000 --- a/doc/source/troubleshooting.rst +++ /dev/null @@ -1,516 +0,0 @@ - -.. _troubleshooting: - -=============== -Troubleshooting -=============== - -So you've installed Congress with devstack as per the README, -and now something is not behaving the way you think it should. -Let's say you're using the policy that follows (from the tutorial), -but the *error* table does not contain the rows you expect. In -this document, we describe how to figure out what the problem is -and hopefully how to fix it. At the end we also detail a collection -of problems and tips for how to fix them. :: - - error(name2) :- - neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p), - nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), - neutron:networks(a3, b3, c3, d3, e3, f3, tenant_id3, h3, i3, j3, network_id, l3), - not same_group(tenant_id, tenant_id2) - - error(name2) :- - neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p), - nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), - neutron:networks(a3, b3, c3, d3, e3, f3, tenant_id3, h3, i3, j3, network_id, l3), - not same_group(tenant_id2, tenant_id3) - - same_group(x, y) :- - group(x, g), - group(y, g) - - group("7320f8345acb489e8296ddb3b1ad1262", "IT") :- true() - group("81084a94769c4ce0accb6968c397a085", "Marketing") :- true() - - -Policy-engine troubleshooting ------------------------------ - -Make sure the policy engine knows about the rules you think it knows about. -It is possible that the policy engine rejected a rule because of a syntax -error. Let's assume you're using the *classification* policy. - - -**Check**: Ensure the policy engine has the right rules:: - - $ curl -X GET localhost:1789/v1/policies//rules - -For example:: - - $ curl -X GET localhost:1789/v1/policies//rules - { - "results": [ - { - "comment": "None", - "id": "2be98841-953d-44f0-91f6-32c5f4dd4f83", - "rule": "group(\"d0a7ff9e5d5b4130a586a7af1c855c3e\", \"IT\") :- true()" - }, - { - "comment": "None", - "id": "c01067ef-10e4-498f-8aaa-0c1cce1272a3", - "rule": "group(\"e793326db18847e1908e791daa69a5a3\", \"Marketing\") :- true()" - }, - { - "comment": "None", - "id": "3c6e48ee-2783-4b5e-94ff-aab00ccffd42", - "rule": "error(name2) :- neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, network_id, i3), not same_group(tenant_id, tenant_id2)" - }, - { - "comment": "None", - "id": "36264def-d917-4f39-a6b0-4aaf12d4b349", - "rule": "error(name2) :- neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, network_id, i3), not same_group(tenant_id2, tenant_id3)" - }, - { - "comment": "None", - "id": "31922bb0-711c-43da-9f11-cef69828a2c4", - "rule": "same_group(x, y) :- group(x, g), group(y, g)" - } - ] - } - -It is also possible that you might have a typo in one of the table names that appear -in the rules. To eliminate that possibility, ask the list of tables -that occur in the rules and compare those to the ones the datasources export. -You might also look for near-duplicates, such as *same_group* and *samegroup*, -in case tables are spelled differently in different rules. - -**Check**: Ensure there are no typos in any of the table names by asking -for the list of tables occurring in the rules:: - - $ curl -X GET localhost:1789/v1/policies//tables - -For example:: - - $ curl -X GET localhost:1789/v1/policies//tables - { - "results": [ - { - "id": "nova:servers" - }, - { - "id": "neutron:ports" - }, - { - "id": "group" - }, - { - "id": "neutron:networks" - }, - { - "id": "error" - }, - { - "id": "same_group" - }, - { - "id": "true" - } - ] - } - -Next we want to check that tables have the rows we would expect. A good place -to start is with the tables exported by external datasources like Nova and -Neutron. If these tables are empty, that points to a problem with the -datasources (see below for troubleshooting datasources). -If they are not empty, it points to a problem with the rules:: - - $ curl -X GET localhost:1789/v1/policies//tables//rows - -For example, below are the rows in the *neutron:ports* table. There are 2 -rows (each of which represents a port), and each row has 16 columns:: - - $ curl -X GET localhost:1789/v1/policies//tables/neutron:ports/rows - { - "results": [ - { - "data": [ - "795a4e6f-7cc8-4052-ae43-80d4c3ad233a", - "5f1f9b53-46b2-480f-b653-606f4aaf61fd", - "1955273c-242d-46a6-8063-7dc9c20cbba9", - "None", - "ACTIVE", - "", - "True", - "37eee894-a65f-414d-bd8c-a9363293000a", - "e793326db18847e1908e791daa69a5a3", - "None", - "network:router_interface", - "fa:16:3e:28:ab:0b", - "4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c", - "882911e9-e3cf-4682-bb18-4bf8c559e22d", - "c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", - "None" - ] - }, - { - "data": [ - "ebeb4ee6-14be-4ba2-a723-fd62f220b6b9", - "f999de49-753e-40c9-9eed-d01ad76bc6c3", - "ad058c04-05be-4f56-a76f-7f3b42f36f79", - "None", - "ACTIVE", - "", - "True", - "07ecce19-d7a4-4c79-924e-1692713e53a7", - "e793326db18847e1908e791daa69a5a3", - "None", - "compute:None", - "fa:16:3e:3c:0d:13", - "af179309-65f7-4662-a087-e583d6a8bc21", - "149f4271-41ca-4b1a-875b-77909debbeac", - "bc333f0f-b665-4e2b-97db-a4dd985cb5c8", - "None" - ] - } - ] - } - -After checking the tables exported by datasources like Nova and Neutron, -it is useful to check the contents of the other tables that build upon -those tables. - -In our running example, we should check the rows of the *group* table. Here -we see what we expect: that there are two users, each of which belongs to -a different group:: - - $ curl -X GET localhost:1789/v1/policies//tables/group/rows - { - "results": [ - { - "data": [ - "d0a7ff9e5d5b4130a586a7af1c855c3e", - "IT" - ] - }, - { - "data": [ - "e793326db18847e1908e791daa69a5a3", - "Marketing" - ] - } - ] - } - -Once you have found a table that contains the wrong rows, it may be obvious -looking at the rules for that table what the problem is. But if there -are many rules or if some of the rules are long, it can be difficult to -pinpoint the problem. When this happens, you can ask for a trace that -describes how the rows of that table were computed:: - - $ curl -X GET localhost:1789/v1/policies//tables//rows?trace=true - -The trace is similar to a function-call trace. It uses the following -annotations:: - -* Call Q: a query for all the rows that match Q -* Exit Q: successfully discovered row Q -* Fail Q: failure to find a row matching Q (in the current context) -* Redo Q: attempt to find another row matching Q - -In our example, we know the contents of the *error* table is empty, but -all of the tables used to construct *error* look reasonable. So we ask -for a trace showing why the *error* table is empty. The trace is returned -as a string and be quite large.:: - - $ curl -X GET localhost:1789/v1/policies//tables/error/rows?trace=true - { - "results": [], - "trace": "Clas : Call: error(x0)\nClas : | Call: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p)\nClas : | Exit: neutron:ports(\"795a4e6f-7cc8-4052-ae43-80d4c3ad233a\", \"5f1f9b53-46b2-480f-b653-606f4aaf61fd\", \"1955273c-242d-46a6-8063-7dc9c20cbba9\", \"None\", \"ACTIVE\", \"\", \"True\", \"37eee894-a65f-414d-bd8c-a9363293000a\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"network:router_interface\", \"fa:16:3e:28:ab:0b\", \"4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c\", \"882911e9-e3cf-4682-bb18-4bf8c559e22d\", \"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", \"None\")\nClas : | Call: nova:servers(\"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Fail: nova:servers(\"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Redo: neutron:ports(\"795a4e6f-7cc8-4052-ae43-80d4c3ad233a\", \"5f1f9b53-46b2-480f-b653-606f4aaf61fd\", \"1955273c-242d-46a6-8063-7dc9c20cbba9\", \"None\", \"ACTIVE\", \"\", \"True\", \"37eee894-a65f-414d-bd8c-a9363293000a\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"network:router_interface\", \"fa:16:3e:28:ab:0b\", \"4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c\", \"882911e9-e3cf-4682-bb18-4bf8c559e22d\", \"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", \"None\")\nClas : | Exit: neutron:ports(\"ebeb4ee6-14be-4ba2-a723-fd62f220b6b9\", \"f999de49-753e-40c9-9eed-d01ad76bc6c3\", \"ad058c04-05be-4f56-a76f-7f3b42f36f79\", \"None\", \"ACTIVE\", \"\", \"True\", \"07ecce19-d7a4-4c79-924e-1692713e53a7\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"compute:None\", \"fa:16:3e:3c:0d:13\", \"af179309-65f7-4662-a087-e583d6a8bc21\", \"149f4271-41ca-4b1a-875b-77909debbeac\", \"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"None\")\nClas : | Call: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Exit: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"vm-demo\", \"c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d\", \"ACTIVE\", \"e793326db18847e1908e791daa69a5a3\", \"cf23fabed97742a9af463002e68068bd\", \"6183d5a6-e26c-4f48-af4d-5d0b6770a976\", \"1\")\nClas : | Call: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, \"07ecce19-d7a4-4c79-924e-1692713e53a7\", i3)\nClas : | Fail: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, \"07ecce19-d7a4-4c79-924e-1692713e53a7\", i3)\nClas : | Redo: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"vm-demo\", \"c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d\", \"ACTIVE\", \"e793326db18847e1908e791daa69a5a3\", \"cf23fabed97742a9af463002e68068bd\", \"6183d5a6-e26c-4f48-af4d-5d0b6770a976\", \"1\")\nClas : | Fail: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Redo: neutron:ports(\"ebeb4ee6-14be-4ba2-a723-fd62f220b6b9\", \"f999de49-753e-40c9-9eed-d01ad76bc6c3\", \"ad058c04-05be-4f56-a76f-7f3b42f36f79\", \"None\", \"ACTIVE\", \"\", \"True\", \"07ecce19-d7a4-4c79-924e-1692713e53a7\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"compute:None\", \"fa:16:3e:3c:0d:13\", \"af179309-65f7-4662-a087-e583d6a8bc21\", \"149f4271-41ca-4b1a-875b-77909debbeac\", \"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"None\")\nClas : | Fail: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p)\nClas : | Call: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p)\nClas : | Exit: neutron:ports(\"795a4e6f-7cc8-4052-ae43-80d4c3ad233a\", \"5f1f9b53-46b2-480f-b653-606f4aaf61fd\", \"1955273c-242d-46a6-8063-7dc9c20cbba9\", \"None\", \"ACTIVE\", \"\", \"True\", \"37eee894-a65f-414d-bd8c-a9363293000a\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"network:router_interface\", \"fa:16:3e:28:ab:0b\", \"4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c\", \"882911e9-e3cf-4682-bb18-4bf8c559e22d\", \"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", \"None\")\nClas : | Call: nova:servers(\"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Fail: nova:servers(\"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Redo: neutron:ports(\"795a4e6f-7cc8-4052-ae43-80d4c3ad233a\", \"5f1f9b53-46b2-480f-b653-606f4aaf61fd\", \"1955273c-242d-46a6-8063-7dc9c20cbba9\", \"None\", \"ACTIVE\", \"\", \"True\", \"37eee894-a65f-414d-bd8c-a9363293000a\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"network:router_interface\", \"fa:16:3e:28:ab:0b\", \"4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c\", \"882911e9-e3cf-4682-bb18-4bf8c559e22d\", \"c62efe5d-d070-4dff-8d9d-3df8ac08b0ec\", \"None\")\nClas : | Exit: neutron:ports(\"ebeb4ee6-14be-4ba2-a723-fd62f220b6b9\", \"f999de49-753e-40c9-9eed-d01ad76bc6c3\", \"ad058c04-05be-4f56-a76f-7f3b42f36f79\", \"None\", \"ACTIVE\", \"\", \"True\", \"07ecce19-d7a4-4c79-924e-1692713e53a7\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"compute:None\", \"fa:16:3e:3c:0d:13\", \"af179309-65f7-4662-a087-e583d6a8bc21\", \"149f4271-41ca-4b1a-875b-77909debbeac\", \"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"None\")\nClas : | Call: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Exit: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"vm-demo\", \"c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d\", \"ACTIVE\", \"e793326db18847e1908e791daa69a5a3\", \"cf23fabed97742a9af463002e68068bd\", \"6183d5a6-e26c-4f48-af4d-5d0b6770a976\", \"1\")\nClas : | Call: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, \"07ecce19-d7a4-4c79-924e-1692713e53a7\", i3)\nClas : | Fail: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, \"07ecce19-d7a4-4c79-924e-1692713e53a7\", i3)\nClas : | Redo: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"vm-demo\", \"c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d\", \"ACTIVE\", \"e793326db18847e1908e791daa69a5a3\", \"cf23fabed97742a9af463002e68068bd\", \"6183d5a6-e26c-4f48-af4d-5d0b6770a976\", \"1\")\nClas : | Fail: nova:servers(\"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", x0, c2, d2, tenant_id2, f2, g2, h2)\nClas : | Redo: neutron:ports(\"ebeb4ee6-14be-4ba2-a723-fd62f220b6b9\", \"f999de49-753e-40c9-9eed-d01ad76bc6c3\", \"ad058c04-05be-4f56-a76f-7f3b42f36f79\", \"None\", \"ACTIVE\", \"\", \"True\", \"07ecce19-d7a4-4c79-924e-1692713e53a7\", \"e793326db18847e1908e791daa69a5a3\", \"None\", \"compute:None\", \"fa:16:3e:3c:0d:13\", \"af179309-65f7-4662-a087-e583d6a8bc21\", \"149f4271-41ca-4b1a-875b-77909debbeac\", \"bc333f0f-b665-4e2b-97db-a4dd985cb5c8\", \"None\")\nClas : | Fail: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p)\nClas : Fail: error(x0)\n" - } - -We can print the trace using 'printf ' (without the quotes):: - - $ printf "Clas : Call: error(x0)... - Clas : Call: error(x0) - Clas : | Call: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p) - Clas : | Exit: neutron:ports("795a4e6f-7cc8-4052-ae43-80d4c3ad233a", "5f1f9b53-46b2-480f-b653-606f4aaf61fd", "1955273c-242d-46a6-8063-7dc9c20cbba9", "None", "ACTIVE", "", "True", "37eee894-a65f-414d-bd8c-a9363293000a", "e793326db18847e1908e791daa69a5a3", "None", "network:router_interface", "fa:16:3e:28:ab:0b", "4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c", "882911e9-e3cf-4682-bb18-4bf8c559e22d", "c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", "None") - Clas : | Call: nova:servers("c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Fail: nova:servers("c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Redo: neutron:ports("795a4e6f-7cc8-4052-ae43-80d4c3ad233a", "5f1f9b53-46b2-480f-b653-606f4aaf61fd", "1955273c-242d-46a6-8063-7dc9c20cbba9", "None", "ACTIVE", "", "True", "37eee894-a65f-414d-bd8c-a9363293000a", "e793326db18847e1908e791daa69a5a3", "None", "network:router_interface", "fa:16:3e:28:ab:0b", "4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c", "882911e9-e3cf-4682-bb18-4bf8c559e22d", "c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", "None") - Clas : | Exit: neutron:ports("ebeb4ee6-14be-4ba2-a723-fd62f220b6b9", "f999de49-753e-40c9-9eed-d01ad76bc6c3", "ad058c04-05be-4f56-a76f-7f3b42f36f79", "None", "ACTIVE", "", "True", "07ecce19-d7a4-4c79-924e-1692713e53a7", "e793326db18847e1908e791daa69a5a3", "None", "compute:None", "fa:16:3e:3c:0d:13", "af179309-65f7-4662-a087-e583d6a8bc21", "149f4271-41ca-4b1a-875b-77909debbeac", "bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "None") - Clas : | Call: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Exit: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "vm-demo", "c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d", "ACTIVE", "e793326db18847e1908e791daa69a5a3", "cf23fabed97742a9af463002e68068bd", "6183d5a6-e26c-4f48-af4d-5d0b6770a976", "1") - Clas : | Call: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, "07ecce19-d7a4-4c79-924e-1692713e53a7", i3) - Clas : | Fail: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, "07ecce19-d7a4-4c79-924e-1692713e53a7", i3) - Clas : | Redo: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "vm-demo", "c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d", "ACTIVE", "e793326db18847e1908e791daa69a5a3", "cf23fabed97742a9af463002e68068bd", "6183d5a6-e26c-4f48-af4d-5d0b6770a976", "1") - Clas : | Fail: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Redo: neutron:ports("ebeb4ee6-14be-4ba2-a723-fd62f220b6b9", "f999de49-753e-40c9-9eed-d01ad76bc6c3", "ad058c04-05be-4f56-a76f-7f3b42f36f79", "None", "ACTIVE", "", "True", "07ecce19-d7a4-4c79-924e-1692713e53a7", "e793326db18847e1908e791daa69a5a3", "None", "compute:None", "fa:16:3e:3c:0d:13", "af179309-65f7-4662-a087-e583d6a8bc21", "149f4271-41ca-4b1a-875b-77909debbeac", "bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "None") - Clas : | Fail: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p) - - Clas : | Call: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p) - Clas : | Exit: neutron:ports("795a4e6f-7cc8-4052-ae43-80d4c3ad233a", "5f1f9b53-46b2-480f-b653-606f4aaf61fd", "1955273c-242d-46a6-8063-7dc9c20cbba9", "None", "ACTIVE", "", "True", "37eee894-a65f-414d-bd8c-a9363293000a", "e793326db18847e1908e791daa69a5a3", "None", "network:router_interface", "fa:16:3e:28:ab:0b", "4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c", "882911e9-e3cf-4682-bb18-4bf8c559e22d", "c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", "None") - Clas : | Call: nova:servers("c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Fail: nova:servers("c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Redo: neutron:ports("795a4e6f-7cc8-4052-ae43-80d4c3ad233a", "5f1f9b53-46b2-480f-b653-606f4aaf61fd", "1955273c-242d-46a6-8063-7dc9c20cbba9", "None", "ACTIVE", "", "True", "37eee894-a65f-414d-bd8c-a9363293000a", "e793326db18847e1908e791daa69a5a3", "None", "network:router_interface", "fa:16:3e:28:ab:0b", "4b7e5f9c-9ba8-4c94-a7d0-e5811207d26c", "882911e9-e3cf-4682-bb18-4bf8c559e22d", "c62efe5d-d070-4dff-8d9d-3df8ac08b0ec", "None") - Clas : | Exit: neutron:ports("ebeb4ee6-14be-4ba2-a723-fd62f220b6b9", "f999de49-753e-40c9-9eed-d01ad76bc6c3", "ad058c04-05be-4f56-a76f-7f3b42f36f79", "None", "ACTIVE", "", "True", "07ecce19-d7a4-4c79-924e-1692713e53a7", "e793326db18847e1908e791daa69a5a3", "None", "compute:None", "fa:16:3e:3c:0d:13", "af179309-65f7-4662-a087-e583d6a8bc21", "149f4271-41ca-4b1a-875b-77909debbeac", "bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "None") - Clas : | Call: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Exit: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "vm-demo", "c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d", "ACTIVE", "e793326db18847e1908e791daa69a5a3", "cf23fabed97742a9af463002e68068bd", "6183d5a6-e26c-4f48-af4d-5d0b6770a976", "1") - Clas : | Call: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, "07ecce19-d7a4-4c79-924e-1692713e53a7", i3) - Clas : | Fail: neutron:networks(a3, b3, c3, d3, e3, tenant_id3, f3, g3, h3, "07ecce19-d7a4-4c79-924e-1692713e53a7", i3) - Clas : | Redo: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "vm-demo", "c5dd62237226c4f2eaddea823ca8b4f5c1a3c2d3a27e5e51e407954d", "ACTIVE", "e793326db18847e1908e791daa69a5a3", "cf23fabed97742a9af463002e68068bd", "6183d5a6-e26c-4f48-af4d-5d0b6770a976", "1") - Clas : | Fail: nova:servers("bc333f0f-b665-4e2b-97db-a4dd985cb5c8", x0, c2, d2, tenant_id2, f2, g2, h2) - Clas : | Redo: neutron:ports("ebeb4ee6-14be-4ba2-a723-fd62f220b6b9", "f999de49-753e-40c9-9eed-d01ad76bc6c3", "ad058c04-05be-4f56-a76f-7f3b42f36f79", "None", "ACTIVE", "", "True", "07ecce19-d7a4-4c79-924e-1692713e53a7", "e793326db18847e1908e791daa69a5a3", "None", "compute:None", "fa:16:3e:3c:0d:13", "af179309-65f7-4662-a087-e583d6a8bc21", "149f4271-41ca-4b1a-875b-77909debbeac", "bc333f0f-b665-4e2b-97db-a4dd985cb5c8", "None") - Clas : | Fail: neutron:ports(a, b, c, d, e, f, g, network_id, tenant_id, j, k, l, m, n, device_id, p) - Clas : Fail: error(x0) - - -Recall that there are 2 rules defining *error*. The part of the trace -occurring before the line break is from one of the rules; the part of the trace -after the line break is from the other. (The line break does not appear in -the trace--we inserted it for the sake of pedagogy.) - -Both rules join the tables neutron:ports, nova:servers, and neutron:networks. The -trace shows the join being computed one row at a time. In this case, -we see that there is some port (from neutron:ports) connected to a VM -(from nova:servers) for which there is no record of the port's network -(from neutron:networks). In this case, there is a row missing from -neutron:networks: the one with ID 07ecce19-d7a4-4c79-924e-1692713e53a7. - -At this point, it seems clear that the problem is with the Neutron datasource, -not the rules. - - - -Datasource troubleshooting --------------------------- - -At this point, you believe the problem is with one of the datasources. The first -thing to consider is whether Congress can properly connect to the associated cloud service. -The best way to do that is to examine the tables that the problematic datasource -is exporting. If the tables being exported by a service is empty, the datasource -driver is not properly connecting to the datasource. - -**Check**: Ensure each (relevant) datasource is exporting the tables -the documentation says should be exported:: - - $ curl -X GET localhost:1789/v1/data-sources//tables - -To fix connection problems, do both of the following. - - * Ensure the datasource component is enabled in devstack (if you're using devstack) - * Fix the configuration of the datasource by asking to see its current configuration, - and if it is wrong, delete that datasource and create a new one with the proper - configuration. Don't forget that datasources sometimes return different - information for different username/password combinations. - -Below are examples of how to list datasource configuration, delete an existing datasource, -and create a new datasource:: - - # show list and configuration options for each - $ curl -X GET localhost:1789/v1/data-sources - - # delete old datasource - $ curl -X DELETE http://127.0.0.1:1789/v1/data-sources/ - - # create new datasource - $ curl -X POST localhost:1789/v1/data-sources -d - '{"config": {"username": "admin", - "tenant_name": "admin", - "password": "password", - "auth_url": "http://127.0.0.1:5000/v2"}, - "driver": "neutronv2", - "name": "neutronv2"}' - - -For example, below we see that the *neutron* datasource is exporting all -the right tables:: - - $ curl -X GET localhost:1789/v1/data-sources//tables - { - "results": [ - { - "id": "ports.binding_capabilities" - }, - { - "id": "routers" - }, - { - "id": "ports.extra_dhcp_opts" - }, - { - "id": "ports.fixed_ips" - }, - { - "id": "ports" - }, - { - "id": "ports.fixed_ips_groups" - }, - { - "id": "ports.security_groups" - }, - { - "id": "networks.subnets" - }, - { - "id": "networks" - }, - { - "id": "security_groups" - }, - { - "id": "ports.address_pairs" - } - ] - } - - -Once the datasource is properly configured and is returning the proper -list of tables, the next potential problem is that the rows of -one of the tables are incorrect. - -**Check**: Ensure the rows of each of the tables exported by the -datasource are correct:: - - $ curl -X GET localhost:1789/v1/data-sources//tables//rows - -To check that the rows are correct, you'll need to look at the datasource's schema -to see what each column means and compare that to the -current contents of the actual datasource. - -For example, we can look at the rows of the *networks* table in the *neutron* -service. In this example, there are two rows. Each row is the value of -the *data* key:: - - $ curl -X GET localhost:1789/v1/data-sources//tables/networks/rows - { - "results": [ - { - "data": [ - "ACTIVE", - "public", - "faf2c578-2893-11e4-b1e3-fa163ebf1676", - "None", - "True", - "d0a7ff9e5d5b4130a586a7af1c855c3e", - "None", - "True", - "False", - "0d31bf61-c749-4791-8cf2-345f624bad8d", - "None" - ] - }, - { - "data": [ - "ACTIVE", - "private", - "faf31ec4-2893-11e4-b1e3-fa163ebf1676", - "None", - "True", - "e793326db18847e1908e791daa69a5a3", - "None", - "False", - "False", - "37eee894-a65f-414d-bd8c-a9363293000a", - "None" - ] - } - ] - } - -Compare these rows to the schema for this datasource:: - - $ curl -X GET localhost:1789/v1/data-sources//schema - - {'tables': [ - ... - {'columns': [ - {'description': 'None', 'name': 'status'}, - {'description': 'None', 'name': 'name'}, - {'description': 'None', 'name': 'subnet_group_id'}, - {'description': 'None', 'name': 'provider_physical_network'}, - {'description': 'None', 'name': 'admin_state_up'}, - {'description': 'None', 'name': 'tenant_id'}, - {'description': 'None', 'name': 'provider_network_type'}, - {'description': 'None', 'name': 'router_external'}, - {'description': 'None', 'name': 'shared'}, - {'description': 'None', 'name': 'id'}, - {'description': 'None', 'name': 'provider_segmentation_id'}], - 'table_id': 'networks'}, - ...]} - -The schema says the 1st column is the network's status, which in both the -rows above, has the value "ACTIVE". -The schema says the 10th column is the network's ID, which in the -two rows above are 0d31bf61-c749-4791-8cf2-345f624bad8d and -37eee894-a65f-414d-bd8c-a9363293000a. Notice that the missing network -from our earlier analysis of the policy trace is missing from here as well: -07ecce19-d7a4-4c79-924e-1692713e53a7. - -This points to a problem in the configuration of the datasource, in particular -using a username/password combination that does not return all the networks. - - -Message bus troubleshooting ---------------------------- - -One thing that sometimes happens is that the datasource has the right rows, -but the policy engine does not. For example, the *networks* table of the -*neutron* service is not identical to the *neutron:networks* table. -Typically, this means that the policy engine simply hasn't received and -processed the update from the datasource on the message bus. Waiting -several seconds should fix the problem. - - -**Check**: Compare the policy engine's version of a table to the datasource's -version. Remember that the policy engine's name for table T in datasource -D is D:T, e.g. the *networks* table for service *neutron* is named *neutron:networks*:: - - curl -X GET localhost:1789/v1/policies/classification/tables/:/rows - curl -X GET localhost:1789/v1/data-sources//tables//rows - - -**Warning**: In the current datasource drivers for Neutron and Nova, a -single API call can generate several different tables. Each table is sent independently -on the message bus, which can lead to inconsistencies between tables (e.g. -table *neutron:ports* might be out of sync with *neutron:ports.security_groups*). -This kind of data skew is an artifact of our implementation and will be -addressed in a future release. The best solution currently is to wait -until all the messages from the latest polling reach the policy engine. - -A similar problem can arise when two datasources are out of sync with each other. -This happens because the two datasources are polled independently. If something -changes one of the datasources in between when those datasources are polled, -the local cache Congress has will be out of sync. In a future release, we -will provide machinery for mitigating the impact of these kinds of synchronization -problems. - - -Production troubleshooting --------------------------- - -Another class of problems arises most often in production deployments. -Here we give a couple of problems encountered in production deployments -along with tips for solving them. - -1. Log file too big - -**Symptom**: slower than normal performance, log size not changing - -**Solution**: set up logrotate (a Linux service). In the directory - `/etc/logrotate.d`, include a file `congress` and add an entry - such as the one shown below. (Here we're assuming the congress log - is in /var/log/congress):: - - /var/log/congress - { - rotate 7 - daily - missingok - notifempty - delaycompress - compress - endscript - } - - diff --git a/doc/source/tutorial-tenant-sharing.rst b/doc/source/tutorial-tenant-sharing.rst deleted file mode 100644 index 285c47f7..00000000 --- a/doc/source/tutorial-tenant-sharing.rst +++ /dev/null @@ -1,360 +0,0 @@ -Congress Tutorial - Tenant Sharing Policy -========================================= - -Overview --------- -This tutorial illustrates how to create a Congress monitoring policy -that detects when one OpenStack tenant shares a network with another -OpenStack tenant, and then flags that sharing as a policy violation. - -**Data Source Tables** - -* Neutron networks: list of each network and its owner tenant. -* Neutron ports: list of each port and its owner tenant. -* Nova servers: list of each server, and its owner tenant. - - -**Detailed Policy Description** - -This policy collects the owner information for each server, any ports -that server is connected to, and each network those ports are part of. -It then verifies that the owner (tenant_id) is the same for the -server, ports, and networks. If the tenant_id does not match, the -policy will insert the server's name to the Congress error table. - - -Setting up Devstack -------------------- - -The first step is to install and configure Devstack + Congress: - -1) Install Devstack and Congress using the directions in the following - README. When asked for a password, type "password" without the quotes. - - https://github.com/openstack/congress/blob/master/README.rst#41-devstack-install - -2) The Devstack installation script will automatically create a data source - instance of the neutronv2 driver. If you are not using Devstack, you will - need to create the data source: - a.) If your environment supports identity v3 then, - - $ AUTH_URL=`openstack endpoint list --service identity | grep "public" | awk '{print $14}'` - - b.) If your environment only supports identity v2 then, - - $ AUTH_URL=`openstack endpoint show identity | grep "public" | awk '{print $4}'` - - $ openstack congress datasource create neutronv2 neutronv2 --config username=admin --config tenant_name=admin --config password=password --config auth_url=$AUTH_URL - -3) Change auth_strategy from "keystone" to "noauth" in - /etc/congress/congress.conf - -4) Restart congress-server: - - a.) Devstack using systemd by default then:: - - $ sudo systemctl restart devstack@congress.service - - b.) If you still use screen then:: - - $ screen -x stack - switch to congress window ' - - - - d - -Setting up an OpenStack VM and network --------------------------------------- - -At this point, Devstack and Congress are running and ready to accept -API calls. Now you can setup the OpenStack environment, including a -network and subnet owned by the "admin" tenant, a port owned by the -"demo" tenant, and a VM owned by the "demo" tenant. - -5) Change to the congress directory:: - - $ cd /opt/stack/congress - -6) Login as the admin tenant:: - - $ source ~/devstack/openrc admin admin - -7) Create a network called "network-admin". Note this is owned by the admin - tenant:: - - $ neutron net-create network-admin - Created a new network: - +-----------------------+--------------------------------------+ - | Field | Value | - +-----------------------+--------------------------------------+ - | admin_state_up | True | - | id | a4130b34-81b4-46df-af3a-f133b277592e | - | name | network-admin | - | port_security_enabled | True | - | shared | False | - | status | ACTIVE | - | subnets | | - | tenant_id | 7320f8345acb489e8296ddb3b1ad1262 | - +-----------------------+--------------------------------------+ - -8) Create a subnet called "subnet-admin". Note this is owned by the admin - tenant:: - - $ neutron subnet-create network-admin 2.2.2.0/24 --name subnet-admin - Created a new subnet: - +-------------------+------------------------------------------+ - | Field | Value | - +-------------------+------------------------------------------+ - | allocation_pools | {"start": "2.2.2.2", "end": "2.2.2.254"} | - | cidr | 2.2.2.0/24 | - | dns_nameservers | | - | enable_dhcp | True | - | gateway_ip | 2.2.2.1 | - | host_routes | | - | id | 6ff5faa3-1752-4b4f-b744-2e0744cb9208 | - | ip_version | 4 | - | ipv6_address_mode | | - | ipv6_ra_mode | | - | name | subnet-admin | - | network_id | a4130b34-81b4-46df-af3a-f133b277592e | - | tenant_id | 7320f8345acb489e8296ddb3b1ad1262 | - +-------------------+------------------------------------------+ - -9) Create port owned by the demo tenant:: - - $ source ~/devstack/openrc admin demo - $ neutron port-create network-admin | tee port-create.log - Created a new port: - +-----------------------+--------------------------------------------------------------------------------+ - | Field | Value | - +-----------------------+--------------------------------------------------------------------------------+ - | admin_state_up | True | - | allowed_address_pairs | | - | binding:host_id | | - | binding:profile | {} | - | binding:vif_details | {} | - | binding:vif_type | unbound | - | binding:vnic_type | normal | - | device_id | | - | device_owner | | - | fixed_ips | {"subnet_id": "6ff5faa3-1752-4b4f-b744-2e0744cb9208", "ip_address": "2.2.2.2"} | - | id | 066c5cfc-949e-4d56-ad76-15528c68c8b8 | - | mac_address | fa:16:3e:e9:f8:2a | - | name | | - | network_id | a4130b34-81b4-46df-af3a-f133b277592e | - | security_groups | dd74db4f-fe35-4a51-b920-313fd36837f2 | - | status | DOWN | - | tenant_id | 81084a94769c4ce0accb6968c397a085 | - +-----------------------+--------------------------------------------------------------------------------+ - - $ PORT_ID=`grep " id " port-create.log | awk '{print $4}'` - -10) Create vm named "vm-demo" with the newly created port. The vm is owned by - the demo tenant:: - - $ nova boot --image cirros-0.3.4-x86_64-uec --flavor 1 vm-demo --nic port-id=$PORT_ID - +--------------------------------------+----------------------------------------------------------------+ - | Property | Value | - +--------------------------------------+----------------------------------------------------------------+ - | OS-DCF:diskConfig | MANUAL | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | Ubuntu1204Server | - | OS-EXT-SRV-ATTR:hypervisor_hostname | Ubuntu1204Server | - | OS-EXT-SRV-ATTR:instance_name | instance-00000001 | - | OS-EXT-STS:power_state | 0 | - | OS-EXT-STS:task_state | networking | - | OS-EXT-STS:vm_state | building | - | OS-SRV-USG:launched_at | - | - | OS-SRV-USG:terminated_at | - | - | accessIPv4 | | - | accessIPv6 | | - | adminPass | js6ZnNjX82rQ | - | config_drive | | - | created | 2014-08-15T00:08:11Z | - | flavor | m1.tiny (1) | - | hostId | 930764f06a4a5ffb8e433b24efce63fd5096ddaee5e62b439169fbdf | - | id | 19b6049e-fe69-416a-b6f1-c02afaf54a34 | - | image | cirros-0.3.4-x86_64-uec (e8dc8305-c9de-42a8-b3d1-6b1bc9869f32) | - | key_name | - | - | metadata | {} | - | name | vm-demo | - | os-extended-volumes:volumes_attached | [] | - | progress | 0 | - | security_groups | default | - | status | BUILD | - | tenant_id | 81084a94769c4ce0accb6968c397a085 | - | updated | 2014-08-15T00:08:12Z | - | user_id | 3d6c6119e5c94c258a26ab246cdcac12 | - +--------------------------------------+----------------------------------------------------------------+ - -11) Get tenant ids:: - - $ openstack project list | tee tenant-list.log - +----------------------------------+--------------------+ - | id | name | - +----------------------------------+--------------------+ - | 7320f8345acb489e8296ddb3b1ad1262 | admin | - | 81084a94769c4ce0accb6968c397a085 | demo | - | 315d4a5892ed4da1bdf717845e8959df | invisible_to_admin | - | b590e27c87fa40c18c850954dca4c879 | service | - +----------------------------------+--------------------+ - - $ ADMIN_ID=`grep " admin " tenant-list.log | awk '{print $2}'` - $ DEMO_ID=`grep " demo " tenant-list.log | awk '{print $2}'` - -Creating a Congress Policy --------------------------- - -At this point, demo's vm exists and its port is connected to an -network belonging to admin. This is a violation of the policy. Now -you will add the congress policy to detect the violation. - -12) Add a rule that detects when a VM is connected to a port belonging to a - different group:: - - CongressClient: - $ openstack congress policy rule create classification "error(name2) :- neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), not same_group(tenant_id, tenant_id2)" - +---------+--------------------------------------------------------------------------+ - | Field | Value | - +---------+--------------------------------------------------------------------------+ - | comment | None | - | id | c235f3a6-44cc-4222-8201-80188f9601ce | - | name | None | - | rule | error(name2) :- | - | | neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), | - | | nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), | - | | neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), | - | | not same_group(tenant_id, tenant_id2) | - +---------+--------------------------------------------------------------------------+ - - or:: - - $ curl -X POST localhost:1789/v1/policies/classification/rules -d '{"rule": "error(name2) :- neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), not same_group(tenant_id, tenant_id2)"}' - {"comment": null, "id": "783ff249-6a52-4691-baf7-3cdfb8f9d200", "rule": "error(name2) :- \n neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i),\n nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2),\n neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3),\n not same_group(tenant_id, tenant_id2)", "name": null} - - -13) Add a rule that detects when a port is connected to a network belonging to - a different group:: - - CongressClient: - $ openstack congress policy rule create classification "error(name2) :- neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), not same_group(tenant_id2, tenant_id3)" - +---------+--------------------------------------------------------------------------+ - | Field | Value | - +---------+--------------------------------------------------------------------------+ - | comment | None | - | id | f7369e20-8b1b-4315-9b68-68197d740521 | - | name | None | - | rule | error(name2) :- | - | | neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), | - | | nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), | - | | neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), | - | | not same_group(tenant_id2, tenant_id3) | - +---------+--------------------------------------------------------------------------+ - - or:: - - $ curl -X POST localhost:1789/v1/policies/classification/rules -d '{"rule": "error(name2) :- neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i), nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2), neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3), not same_group(tenant_id2, tenant_id3)"}' - {"comment": null, "id": "f7708411-a0fc-4ee8-99e6-0f4be7e980ff", "rule": "error(name2) :- \n neutronv2:ports(a, tenant_id, c, network_id, e, f, g, device_id, i),\n nova:servers(device_id, name2, c2, d2, tenant_id2, f2, g2, h2),\n neutronv2:networks(network_id, tenant_id3, c3, d3, e3, f3),\n not same_group(tenant_id2, tenant_id3)", "name": null} - -14) Define a table mapping a tenant_id to any other tenant in the same group:: - - CongressClient: - $ openstack congress policy rule create classification "same_group(x, y) :- group(x, g), group(y, g)" - +---------+--------------------------------------+ - | Field | Value | - +---------+--------------------------------------+ - | comment | None | - | id | a3d0cfcb-d013-4578-ac60-3e8cefb4ab35 | - | name | None | - | rule | same_group(x, y) :- | - | | group(x, g), | - | | group(y, g) | - +---------+--------------------------------------+ - - or:: - - $ curl -X POST localhost:1789/v1/policies/classification/rules -d '{"rule": "same_group(x, y) :- group(x, g), group(y, g)"}' - {"comment": null, "id": "e919d62e-b9af-4b50-a22c-c266379417b8", "rule": "same_group(x, y) :- \n group(x, g),\n group(y, g)", "name": null} - -15) Create a table mapping tenant_id to a group name. admin and demo are in - two separate groups called "IT" and "Marketing" respectively. In practice, - this "group" table would receive group membership information from a system - like Keystone or ActiveDirectory. In this tutorial, we'll populate the - group table with membership information manually:: - - CongressClient: - $ openstack congress policy rule create classification "group(\"$ADMIN_ID\", \"IT\") :- true" - +---------+-----------------------------------------------------+ - | Field | Value | - +---------+-----------------------------------------------------+ - | comment | None | - | id | 97a6aeb0-0c9d-493b-8b0c-77691c1c3547 | - | name | None | - | rule | group("14a3eb4f5b234b578ff905a4bec71605", "IT") :- | - | | true() | - +---------+-----------------------------------------------------+ - - or:: - - $ curl -X POST localhost:1789/v1/policies/classification/rules -d "{\"rule\": \"group(\\\"$ADMIN_ID\\\", \\\"IT\\\") :- true \"}" - {"comment": null, "id": "4a51b768-1458-4c68-881f-1cf2f1edb344", "rule": "group(\"14a3eb4f5b234b578ff905a4bec71605\", \"IT\") :- \n true()", "name": null} - - Then:: - - CongressClient: - $ openstack congress policy rule create classification "group(\"$DEMO_ID\", \"Marketing\") :- true" - +---------+------------------------------------------------------------+ - | Field | Value | - +---------+------------------------------------------------------------+ - | comment | None | - | id | 67c0d86d-f7cf-4db1-9efa-4d46960a3905 | - | name | None | - | rule | group("8f08a89de9c945d4ac7f945f1d93b676", "Marketing") :- | - | | true() | - +---------+------------------------------------------------------------+ - - or:: - - $ curl -X POST localhost:1789/v1/policies/classification/rules -d "{\"rule\": \"group(\\\"$DEMO_ID\\\", \\\"Marketing\\\") :- true \"}" - {"comment": null, "id": "e6b57c8f-ffd2-4acf-839c-83284519ae3c", "rule": "group(\"8f08a89de9c945d4ac7f945f1d93b676\", \"Marketing\") :- \n true()", "name": null} - -Listing Policy Violations -------------------------- - -Finally, we can print the error table to see if there are any -violations (which there are). - -16) List the errors. You should see one entry for "vm-demo":: - - $ curl -X GET localhost:1789/v1/policies/classification/tables/error/rows - { - "results": [ - { - "data": [ - "vm-demo" - ] - } - ] - } - -Fix the Policy Violation ------------------------- - -17) To fix the policy violation, we'll remove the demo's port from admin's - network:: - - $ neutron port-delete $PORT_ID - Deleted port: 066c5cfc-949e-4d56-ad76-15528c68c8b8 - -Relisting Policy Violations ---------------------------- - -18) Now, when print the error table it will be empty because there are no - violations:: - - $ curl -X GET localhost:1789/v1/policies/classification/tables/error/rows - { - "results": [] - } - diff --git a/etc/README-congress.conf.txt b/etc/README-congress.conf.txt deleted file mode 100644 index 312d2790..00000000 --- a/etc/README-congress.conf.txt +++ /dev/null @@ -1,15 +0,0 @@ -To generate the sample congress.conf file, run the following -command from the top level of the congress directory (may need root -privilege): - -tox -egenconfig #Generates etc/congress.conf.sample - -If tox is not installed, you may install it as follows: - -$ sudo pip install tox - -If you experience error(s) generating a sample conf file -you may be able to resolve them by ensuring you have -virtualenv version 12.0.0 or higher installed. - -$ virtualenv --version # check virtualenv version diff --git a/etc/api-paste.ini b/etc/api-paste.ini deleted file mode 100644 index f07e9222..00000000 --- a/etc/api-paste.ini +++ /dev/null @@ -1,38 +0,0 @@ -[composite:congress] -use = egg:Paste#urlmap -/: congressversions -/v1: congress_api_v1 - -[pipeline:congressversions] -pipeline = cors http_proxy_to_wsgi catch_errors congressversionapp - -[app:congressversionapp] -paste.app_factory = congress.api.versions:Versions.factory - -[composite:congress_api_v1] -use = call:congress.auth:pipeline_factory -keystone = cors http_proxy_to_wsgi request_id catch_errors authtoken keystonecontext congress_api -noauth = cors http_proxy_to_wsgi request_id catch_errors congress_api - -[app:congress_api] -paste.app_factory = congress.service:congress_app_factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:catch_errors] -paste.filter_factory = oslo_middleware:CatchErrors.factory - -[filter:keystonecontext] -paste.filter_factory = congress.auth:CongressKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = congress - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory -oslo_config_project = congress diff --git a/etc/congress-config-generator.conf b/etc/congress-config-generator.conf deleted file mode 100644 index a16ec9a7..00000000 --- a/etc/congress-config-generator.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -output_file = etc/congress.conf.sample -wrap_width = 79 -namespace = congress -namespace = oslo.log -namespace = oslo.policy -namespace = oslo.db -namespace = oslo.middleware.cors -namespace = oslo.messaging -namespace = keystonemiddleware.auth_token diff --git a/etc/policy.json b/etc/policy.json deleted file mode 100644 index 4476051d..00000000 --- a/etc/policy.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_only": "rule:context_is_admin", - "regular_user": "", - "default": "rule:admin_only" -} diff --git a/examples/murano/predeploy_simulate.sh b/examples/murano/predeploy_simulate.sh deleted file mode 100755 index b95539eb..00000000 --- a/examples/murano/predeploy_simulate.sh +++ /dev/null @@ -1,111 +0,0 @@ -cd /opt/stack/congress -source ~/devstack/openrc admin demo - -echo "Creating datasource murano: user=admin, tenant=demo" -openstack congress datasource create murano "murano" \ - --config username="admin" \ - --config tenant_name="demo" \ - --config password="password" \ - --config auth_url="http://127.0.0.1:5000/v2.0" - -echo "Deleting all existing rules of murano_system policy" -rule_ids=(`openstack congress policy rule list murano_system | \ -grep "// ID:" | awk '{print $3}'`) -for i in "${rule_ids[@]}" -do - echo "delete rule ${i}" - openstack congress policy rule delete murano_system ${i} -done - -echo "Deleting murano_system policy if exists" -murano_system_uuid=(`openstack congress policy list | \ - grep murano_system | awk '{print $2}'`) -if [ -n "$murano_system_uuid" ] -then - echo "Found existing $murano_system_uuid" - openstack congress policy delete $murano_system_uuid - echo "$murano_system_uuid deleted" -fi - -echo -echo "Create murano_system policy" -openstack congress policy create murano_system - -openstack congress policy rule create murano_system ' -allowed_flavors(flavor) :- - nova:flavors(flavor_id, flavor, vcpus, ram, disk, ephemeral, rxtx_factor), - equal(flavor, "m1.medium")' - -openstack congress policy rule create murano_system ' -allowed_flavors(flavor) :- - nova:flavors(flavor_id, flavor, vcpus, ram, disk, ephemeral, rxtx_factor), - equal(flavor, "m1.small")' - -openstack congress policy rule create murano_system ' -allowed_flavors(flavor) :- - nova:flavors(flavor_id, flavor, vcpus, ram, disk, ephemeral, rxtx_factor), - equal(flavor, "m1.tiny")' - -openstack congress policy rule create murano_system ' -murano_pending_envs(env_id) :- - murano:objects(env_id, tenant_id, "io.murano.Environment"), - murano:states(env_id, env_state), - equal(env_state, "pending")' - -openstack congress policy rule create murano_system ' -murano_instances(env_id, instance_id) :- - murano:objects(env_id, tenant_id, "io.murano.Environment"), - murano:objects(service_id, env_id, service_type), - murano:parent_types(service_id, "io.murano.Object"), - murano:parent_types(service_id, "io.murano.Application"), - murano:parent_types(service_id, service_type), - murano:objects(instance_id, service_id, instance_type), - murano:parent_types(instance_id, "io.murano.resources.Instance"), - murano:parent_types(instance_id, "io.murano.Object"), - murano:parent_types(instance_id, instance_type)' - -openstack congress policy rule create murano_system ' -murano_instance_flavors(instance_id, flavor) :- - murano:properties(instance_id, "flavor", flavor)' - -openstack congress policy rule create murano_system ' -predeploy_error(env_id) :- - murano_pending_envs(env_id), - murano_instances(env_id, instance_id), - murano_instance_flavors(instance_id, flavor), - not allowed_flavors(flavor)' - -echo "" -echo "--- simulate policy ---" -echo 'env_id = "env_uuid", flavor = "m1.small"' -openstack congress policy simulate murano_system 'predeploy_error(env_id)' ' - murano:objects+("env_uuid", "tenant_uuid", "io.murano.Environment") - murano:states+("env_uuid", "pending") - - murano:objects+("service_uuid", "env_uuid", "service_type") - murano:parent_types+("service_uuid", "io.murano.Object") - murano:parent_types+("service_uuid", "io.murano.Application") - murano:parent_types+("service_uuid", "service_type") - murano:objects+("instance_uuid", "service_uuid", "instance_type") - murano:parent_types+("instance_uuid", "io.murano.resources.Instance") - murano:parent_types+("instance_uuid", "io.murano.Object") - murano:parent_types+("instance_uuid", "instance_type") - - murano:properties+("instance_uuid", "flavor", "m1.small")' action - -echo "---" -echo 'env_id = "env_uuid", flavor = "m1.large"' -openstack congress policy simulate murano_system 'predeploy_error(env_id)' ' - murano:objects+("env_uuid", "tenant_uuid", "io.murano.Environment") - murano:states+("env_uuid", "pending") - - murano:objects+("service_uuid", "env_uuid", "service_type") - murano:parent_types+("service_uuid", "io.murano.Object") - murano:parent_types+("service_uuid", "io.murano.Application") - murano:parent_types+("service_uuid", "service_type") - murano:objects+("instance_uuid", "service_uuid", "instance_type") - murano:parent_types+("instance_uuid", "io.murano.resources.Instance") - murano:parent_types+("instance_uuid", "io.murano.Object") - murano:parent_types+("instance_uuid", "instance_type") - - murano:properties+("instance_uuid", "flavor", "m1.large")' action \ No newline at end of file diff --git a/examples/neutron.action b/examples/neutron.action deleted file mode 100644 index 15dee364..00000000 --- a/examples/neutron.action +++ /dev/null @@ -1,422 +0,0 @@ - -// import("options") -// import("mac") -// import("ip") - -/////////////////////////////////////////////////////////// -// Issues -/////////////////////////////////////////////////////////// - -// Some actions invoke other actions (e.g. delete_network invokes -// delete_subnet). There's a simple way of writing this, but -// when we do projection, we would need to use a fixed-point -// computation to compute all the changes. Seemingly simple to do -// but haven't done it; hence, the delete_network example has this -// fragment commented out. Note: this impacts how we store/represent -// results. -// Notion of 'results' is clunky and may be problematic down the road. -// Need mapping from real neutron actions to these actions when simulating. -// - Each Bulk operation maps to a sequence of our actions -// - Subnet creation maps to one subnet_create and a series of -// assign_subnet_allocation_pools -// Not capturing all of the constraints on when an action can be execed -// - create_subnet and delete_subnet require manipulating IP addresses -// - see notes -// Not properly handling access control policy -- additional conditions should -// be grafted onto the policy below, after analyzing AC policy -// Because we're not necessarily grounding everything, builtins/negation -// may be encountered that are not ground. Perhaps the right answer is -// to delay their evaluation until they are ground. For builtins, it -// is probably okay to just return the unground ones, e.g. that way -// skolemization turns into constraint solving. For negatives, -// it is an error if we never get to evaluate them. I suppose this is -// just a variant of abduction. But it means that we should always be using -// abduction when processing the action theory. -// Should be 'import'ing modules that we care about. Implementationally, -// this is straightforward to implement using theory includes. Need -// to enable cycles through imports, which would cause an infinite loop -// during top_down_evaluation. But since we -// are always querying the top-level action theory, we should be -// able to eliminate cycles. Or we could modify top_down_includes -// to avoid the infinite loop. - -/////////////////////////////////////////////////////////// -// Congress-defined "actions"/builtins -/////////////////////////////////////////////////////////// - -// Representing optional parameters to API calls with options:value -// Maybe use 'support' instead of 'action' so that during -// abduction we can save both 'support' and 'action' but -// we do not get confused about what the legit actions are. -action("options:value") - -// hacks to avoid dealing with builtins -action("sys:userid") // the name of the user currently logged in -action("sys:user") // a list of all users -action("sys:mac_address") // 'list' of all mac addresses -action("cms:admin") // group of users considered administrators - -// Options module -// Check if a key is present -options:present(options, key) :- - options:value(options, key, value) -// Compute value of one of the options, providing a default if not present -// options:lookup(options, key, default, result) -options:lookup(options, key, old, old) :- - not options:present(options, key) -options:lookup(options, key, old, new) :- - options:value(options, key, new) - -// Mac address module -// mac:mac_address(mac) //builtin - -// IP module -// ip:ip_address(ip) // builtin -// ip:ip_in_range(ip, start, end) //builtin - - -/////////////////////////////////////////////////////////// -// Neutron helpers -/////////////////////////////////////////////////////////// - -// tenant_id creation -// If not present or not admin, login ID -// Otherwise, supplied value -neutron:compute.create.tenant(options, userid) :- - sys:user(userid), - not options:present(options, "tenant_id") -neutron:compute.create.tenant(options, userid) :- - sys:user(userid), - not cms:admin(userid) -neutron:compute.create.tenant(options, value) :- - sys:user(userid), - cms:admin(userid), - options:value(options, "tenant_id", value) - - -// tenant_id update -// Cannot update tenant_id unless user requesting action is admin -neutron:compute.update.tenant(options, old, old) :- - not options:present(options, "tenant_id") -neutron:compute.update.tenant(options, old, tenant_op) :- - options:present(options, "tenant_id"), - options:value(options, "tenant_id", tenant_op), - sys:user(userid), - cms:admin(userid) - - -/////////////////////////////////////////////////////////// -// Ports -/////////////////////////////////////////////////////////// - -///////////////////////////// -// Create port -// Modeling differences from reality: -// - Fixed_ips and security_groups cannot be provided at creation-time -// - Fixed_ips and security_groups can only be updated after creation -// - Storing fixed_ips and security_groups in separate tables, keyed -// on the port's ID. - -// Tables we're manipulating: -// neutron:port(id, name, network_id, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) -// neutron:port.ip(id, ip) -// neutron:available_ip(ip) -// neutron:port.security_group(id, group_id) - -action("neutron:create_port") - -result(id) :- - neutron:port+(id, network_id, name, mac_address, "null", "null", status, admin_state_up, tenant_id) - -neutron:port+(id, network_id, name, mac_address, "null", "null", status, admin_state_up, tenant_id) :- - neutron:create_port(network_id, options), - options:lookup(options, "name", "", name), - neutron:create_port.compute.mac_address(options, mac_address), - neutron:compute.create.tenant(options, tenant_id) - -// If given value but not mac_address, it's a noop. -neutron:create_port.compute.mac_address(options, mac_address) :- - not options:present(options, "mac_address") -neutron:create_port.compute.mac_address(options, value) :- - options:value(options, "mac_address", value), - mac:mac_address(value) - -///////////////////////////// -// Update port -// Note: updating a port is not the same as deleting old and adding new -// but that's how we have to model it. If we generate a remediation -// with both a delete and an add, we need to postprocess it to create -// an update. Of course, that requires knowing what the add/delete/update -// actions are and how to combine an add and a delete to produce an update. -// Annoying, but no way around it I can see. Maybe for the common case -// we'd want a macro language that spits out the code above and below. - -// Semantics: first delete and then insert - -action("neutron:update_port") - -result(id) :- - neutron:port+(id, network_id, name, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) - -// delete the old -neutron:port-(id, network_id_old, name_old, mac_address_old, device_id_old, device_owner_old, status_old, admin_state_up_old, tenant_id_old) :- - neutron:update_port(id, options), - neutron:port(id, network_id_old, name_old, mac_address_old, device_id_old, device_owner_old, status_old, admin_state_up_old, tenant_id_old) - -// add the new -neutron:port+(id, network_id, name, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) :- - neutron:update_port(id, options), - neutron:port(id, network_id_old, name_old, mac_address_old, device_id_old, device_owner_old, status_old, admin_state_up_old, tenant_id_old), - // look up optional params -- old if not present - options:lookup(options, "network_id", network_id_old, network_id), - options:lookup(options, "name", name_old, name), - options:lookup(options, "mac_address", mac_address_old, mac_address), - options:lookup(options, "device_id", device_id_old, device_id), - options:lookup(options, "device_owner", device_owner_old, device_owner), - neutron:compute.update.tenant(options, tenant_id_old, tenant_id) - - -// Since options:value is a relation, we can handle sets natively. -// We don't have an ordering on those sets, though I could imagine -// making options:value ternary so that we at least have the index. -// This seems like a bad idea though--since we're then writing code -// that updates the index of port.ip, for example. -// However, we won't *generate* a single API call that adds multiple -// ports. Again, post-processing should be able to help; we just need -// to know how to combine multiple API calls into a single one: -// create_port(1, x), options:value(x,"ports","a") -// + create_port(1, y), options:value(y,"ports","b") -// -------------------------------------------------- -// create_port(1,z), options:value(z, "ports", "a"), -// options:value(z, "ports", "b") - -// Should be more careful here so as to not depend on the conflict -// resolution semantics: delete before insert. Arguably, if an action -// results in both the insertion and deletion of a tuple, then -// there's something wrong with the policy. - -// delete old IPs, if value included -neutron:port.ip-(id, ip) :- - neutron:update_port(id, options), - options:present(options, "ip"), - neutron:port.ip(id, ip) - -// add new ports -neutron:port.ip+(id, ip) :- - neutron:update_port(id, options), - options:value(options, "ip", ip), - neutron:available_ip(ip) - -// old IPs become available -neutron:available_ip+(ip) :- - neutron:update_port(id, options), - options:present(options, "ip"), - neutron:port.ip(id, ip) - -// new IPs become unavailable -neutron:available_ip-(ip) :- - neutron:update_port(id, options), - options:value(options, "ip", ip) - -// delete old groups, if value included -neutron:port.security_group-(id, group) :- - neutron:update_port(id, options), - options:present(options, "security_group"), - neutron:port.security_group(id, group) - -// add new ports -neutron:port.security_group+(id, group) :- - neutron:update_port(id, options), - options:value(options, "security_group", group) - - -///////////////////////////// -// Delete port - -action("neutron:delete_port") - -neutron:port-(id, network_id, name, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) :- - neutron:delete_port(id), - neutron:port(id, network_id, name, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) - -neutron:port.ip-(id,ip) :- - neutron:delete_port(id), - neutron:port.ip(id, ip) - -neutron:available_ip+(ip) :- - neutron:delete_port(id), - neutron:port.ip(id, ip) - -neutron:port.security_group-(id, group) :- - neutron:delete_port(id), - neutron:port.security_group(id, group) - -/////////////////////////////////////////////////////////// -// Subnets -/////////////////////////////////////////////////////////// - -// Tables: -// neutron:subnet(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) -// neutron:subnet.allocation_pool(subnetid, start, end) - -// Modeling approximation: instead of creating a subnet and including -// unchangeable allocation pools, we create a subnet and then execute -// the action "neutron:extend_subnet_allocation_pools" several times - -///////////////////////////// -// Create subnet - -action("neutron:create_subnet") -result(id) :- - neutron:subnet+(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) - -// What do we do about the fact that in reality you must supply -// a valid IP if you provide a gateway_ip. Could add that constraint -// but then our simulation will be *generating* -// an IP address here WITHIN the logic. One option: -// we extract constraints (builtins that are unground) -// and then satisfy them when skolemizing. -// We can always bail out since this is an approximation anyway. -neutron:subnet+(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) :- - neutron:create_subnet(network_id, cidr, options), - options:lookup(options, "name", "", name), - options:lookup(options, "gateway_ip", gateway_ip, gateway_ip), - options:lookup(options, "ip_version", 4, ip_version), - options:lookup(options, "enable_dhcp", "true", enable_dhcp), - neutron:compute.create.tenant(options, tenant_id) - -action("neutron:assign_subnet_allocation_pools") -neutron:subnet.allocation_pool+(id, start, end) :- - neutron:assign_subnet_allocation_pools(id, start, end) - - -///////////////////////////// -// Update subnet - -action("neutron:update_subnet") - -neutron:subnet-(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) :- - neutron:update_subnet(id, options), - neutron:subnet(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) - -neutron:subnet+(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) :- - neutron:update_subnet(id, options), - neutron:subnet(id, name_old, network_id_old, gateway_ip_old, ip_version, cidr, enable_dhcp_old, tenant_id_old), - options:lookup(options, "name", name_old, name), - options:lookup(options, "network_id", network_id_old, network_id), - options:lookup(options, "gateway_ip", gateway_ip_old, gateway_ip), - options:lookup(options, "enable_dhcp", enable_dhcp_old, enable_dhcp), - neutron:compute.update.tenant(options, tenant_id_old, tenant_id) - -///////////////////////////// -// Delete subnet - -action("neutron:delete_subnet") - -neutron:subnet-(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) :- - neutron:delete_subnet(id), - not neutron:some_allocated_ip(id), - neutron:subnet(id, name, network_id, gateway_ip, ip_version, cidr, enable_dhcp, tenant_id) - -some_allocated_ip(subnet_id) :- - neutron:port(port_id, ip), - neutron:subnet.allocation_pool(subnet_id, start, end), - ip:ip_in_range(ip, start, end) // empty until we attach it - - - -/////////////////////////////////////////////////////////// -// Networks -/////////////////////////////////////////////////////////// - -// Tables: -// neutron:network(id, name, status, admin_state, shared, tenant_ID) -// neutron:network.subnets(id, subnet) - -///////////////////////////// -// Create network - -result(id) :- - neutron:network+(id, name, status, admin_state, shared, tenant_id) - -action("neutron:create_network") -neutron:network+(id, name, status, admin_state, shared, tenant_id) :- - neutron:create_network(options), - options:lookup(options, "name", "", name), - options:lookup(options, "admin_state", "true", admin_state), - options:lookup(options, "shared", "true", shared), - neutron:compute.create.tenant(options, tenant_id) - -///////////////////////////// -// Update network -// Note: updating a port is not the same as deleting old and adding new -// but that's how we have to model it. If we generate a remediation -// with both a delete and an add, we need to postprocess it to create -// an update. Of course, that requires knowing what the add/delete/update -// actions are and how to combine an add and a delete to produce an update. -// Annoying, but no way around it I can see. Maybe for the common case -// we'd want a macro language that spits out the code above and below. - -// Semantics: first delete and then insert - -action("neutron:update_network") - -result(id) :- - neutron:network+(id, name, status, admin_state, shared, tenant_id) - -// delete the old -neutron:network-(id, name, status, admin_state, shared, tenant_id) :- - neutron:update_network(id, options), - neutron:network(id, name, status, admin_state, shared, tenant_id) - -// add the new -neutron:network+(id, name, status, admin_state, shared, tenant_id) :- - neutron:update_network(id, options), - neutron:network(id, name_old, status, admin_state_old, shared_old, tenant_id_old), - // look up optional params -- old if not present - options:lookup(options, "name", name_old, name), - options:lookup(options, "admin_state", admin_state_old, admin_state), - options:lookup(options, "shared", shared_old, shared), - neutron:compute.update.tenant(options, tenant_id_old, tenant_id) - -// Should be more careful here so as to not depend on the conflict -// resolution semantics: delete before insert. Arguably, if a change -// results in both the insertion and deletion of a tuple, then -// there's something wrong with the policy. - -// delete old subnets, if value included -neutron:network.subnet-(id, subnet) :- - neutron:update_network(id, options), - options:present(options, "subnet"), - neutron:network.subnet(id, subnet) - -// add new subnets -neutron:network.subnet+(id, subnet) :- - neutron:update_network(id, options), - options:value(options, "subnet", subnet) - -///////////////////////////// -// Delete network -// Can only be executed if no ports configured for network - -action("neutron:delete_network") - -neutron:network-(id, name, status, admin_state, shared, tenant_id) :- - neutron:delete_network(id), - not neutron:some_port_configured(id), - neutron:network(id, name, status, admin_state, shared, tenant_id) - -neutron:network.subnet-(id,subnet) :- - neutron:delete_network(id), - not some_port_configured(id), - neutron:network.subnet(id, subnet) - -// CASCADING DELETE -- WANT TO BE ABLE TO SAY THAT DELETE_NETWORK CAUSES -// DELETE_SUBNET. CAN'T REALLY DO THAT. MAYBE WE WANT TO DO IT OUTSIDE. -// neutron:delete_subnet*(subnet_id) :- -// neutron:delete_network(id), -// not some_port_configured(id), -// neutron:network.subnet(id, subnet_id) - -neutron:some_port_configured(network) :- - neutron:port+(id, network, name, mac_address, device_id, device_owner, status, admin_state_up, tenant_id) diff --git a/examples/recursion b/examples/recursion deleted file mode 100644 index 79d7dae6..00000000 --- a/examples/recursion +++ /dev/null @@ -1,4 +0,0 @@ - -connected(x,y) :- link(x,y) -connected(x,y) :- link(x,z), connected(z,y) - diff --git a/future-features.txt b/future-features.txt deleted file mode 100644 index 9ef92177..00000000 --- a/future-features.txt +++ /dev/null @@ -1,51 +0,0 @@ -* Basic Policy language implementation -** Multi-threaded Datalog implementation -** Bottom-up datalog evaluation -** Query optimization -** Materialized implementation: automated view selection/inlining - -* Enhanced Policy language -** ActiveDirectory facade -** Syntax improvements (modals like insert/delete) -** Support for compound objects (safe, stratified recursion) -** Richer support for describing actions in Enforcement policy -** Modules - -* Policy structure -** Multi-tenant -** Multi-stakeholder (tenants, finance, operations, etc.) - -* Enforcement -** Execution of (rich) actions -** Carve out subpolicies and push to other components, e.g. Neutron -** Add consultation with Congress to other OS components, e.g. Nova/Neutron -** Proper remediation enumeration with Classification+Action policies -** Find ways of automatically choosing the proper remediation strategy (e.g. priorities/monotonicity) -** Give cloud owner way of configuring how proactive/reactive/etc. based on information from separate policies. - -* Libraries -** Data source drivers for common OS and non-OS components -** HIPAA, etc. encoding -** Ontologies for different sectors, e.g. finance - -* Policy Analysis -** Look for infinite loops through Enforcement policy (using Action policy) -** Compare Access control policy and Classification policy for redundancy -** Change impact analysis - -* Dashboard -** IDE for policy (different levels: raw-Datalog, AD, checkbox-based) -** List violations -** Explain violations (step-by-step tracing through policy) -** Simulate state change and action execution -** Enumerate remediations for a given violation - -* Architecture and API -** Formalize and implement full introspection and query APIs -** Distribute across multiple nodes -** Ensure Congress can use another Congress instance as data - -* Authentication and Access Control -** Add authentication and access controls on API/dashboard -** When remediating, which user(s) are executing actions? Does Congress need admin credentials for all its cloud services or are user credentials part of actions? Need proper storage for those credentials. Etc. - diff --git a/library/pause_disallowed_flavors.yaml b/library/pause_disallowed_flavors.yaml deleted file mode 100644 index 8b96875d..00000000 --- a/library/pause_disallowed_flavors.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: PauseBadFlavors -description: "Pause any server using a flavor that is not permitted" -rules: - - - comment: "User should customize this. Permitted flavors." - rule: permitted_flavor('m1.tiny') - - - comment: "User should customize this. Permitted flavors." - rule: permitted_flavor('m1.large') - - - rule: > - server_with_bad_flavor(id) :- nova:servers(id=id,flavor_id=flavor_id), - nova:flavors(id=flavor_id, name=flavor), not permitted_flavor(flavor) - - - comment: "Remediation: Pause any VM that shows up in the server_with_bad_flavor table" - rule: "execute[nova:servers.pause(id)] :- server_with_bad_flavor(id), nova:servers(id,status='ACTIVE')" \ No newline at end of file diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/add_aodh_datasource-e0e3891a73f391d4.yaml b/releasenotes/notes/add_aodh_datasource-e0e3891a73f391d4.yaml deleted file mode 100644 index 9f207e02..00000000 --- a/releasenotes/notes/add_aodh_datasource-e0e3891a73f391d4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added Aodh datasource driver to poll alarms data from aodh service diff --git a/releasenotes/notes/bp-lazy-datasource-6cc39bee817548de.yaml b/releasenotes/notes/bp-lazy-datasource-6cc39bee817548de.yaml deleted file mode 100644 index 5a0eeb93..00000000 --- a/releasenotes/notes/bp-lazy-datasource-6cc39bee817548de.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added lazy_tables flag in OpenStack datasource drivers, which enables - datasource drivers to postpone pulling data from real datasource services - until policy rules refer to the specified tables. diff --git a/releasenotes/notes/ceilometer_alarms_fix-142b13092a779a5f.yaml b/releasenotes/notes/ceilometer_alarms_fix-142b13092a779a5f.yaml deleted file mode 100644 index ff022deb..00000000 --- a/releasenotes/notes/ceilometer_alarms_fix-142b13092a779a5f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - Ceilometer driver alarms issue resolved. When Aodh is available, - alarms work as expected. When Aodh is unavailable, empty alarms - list is reported. -deprecations: - - Ceilometer driver alarms table is now deprecated. Instead, the alarms - table in Aodh driver should be used. \ No newline at end of file diff --git a/releasenotes/notes/cinder-volume-attribs-cd525393381b5838.yaml b/releasenotes/notes/cinder-volume-attribs-cd525393381b5838.yaml deleted file mode 100644 index 8702e3c8..00000000 --- a/releasenotes/notes/cinder-volume-attribs-cd525393381b5838.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > -features: - - The following Cinder volume attributes are now - available through Congress driver for cinder. - encrypted, availability_zone, replication_status, - multiattach, snapshot_id, source_volid, consistencygroup_id, - migration_status, attachments. - - The Cinder schema version is set to 2.1, backward - compatible with policy rules written under the - previous Cinder driver data schema. diff --git a/releasenotes/notes/haht-replicated-pe-affb7dcf83effd68.yaml b/releasenotes/notes/haht-replicated-pe-affb7dcf83effd68.yaml deleted file mode 100644 index 3ee69143..00000000 --- a/releasenotes/notes/haht-replicated-pe-affb7dcf83effd68.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - Congress now supports high-availability and high-query-throughput - deployments with load-balanced policy engines and warm-standby data-source - drivers. -features: - - Added support for replicated (load-balanced) policy engines for - high-availability and high-query-throughput deployments. See HA-Overview - and HA-Deployment documentation for more information. - - Added support for warm-standby data-source drivers managed by Pacemaker. - See HA-Overview and HA-Deployment documentation for more information. -upgrade: - - Added a new database table dstabledata to persist pushed data from - a datasource. Necessary migration scripts are included in - congress/db/migration/ \ No newline at end of file diff --git a/releasenotes/notes/keystonev3-driver-fix-408ec81797bffeaf.yaml b/releasenotes/notes/keystonev3-driver-fix-408ec81797bffeaf.yaml deleted file mode 100644 index 0e890c02..00000000 --- a/releasenotes/notes/keystonev3-driver-fix-408ec81797bffeaf.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > -upgrade: - - In keystonev3_driver (experimental) `users` table, - the columns `description` and `email` have been - removed because they are not present in keystone - V3 API response. These columns should be removed - from existing policy rules referring to the `users` - table. The `project_id` column has been replaced - by `default_project_id` because the previous - column name was incorrect. Named column reference - should be similarly replaced in existing policy - rules referring to the `users` table. diff --git a/releasenotes/notes/load-lib-policies-a5cca19f58f9030c.yaml b/releasenotes/notes/load-lib-policies-a5cca19f58f9030c.yaml deleted file mode 100644 index aafce99c..00000000 --- a/releasenotes/notes/load-lib-policies-a5cca19f58f9030c.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > -upgrade: - - A new config option `policy_library_path` is added to the [DEFAULT] - section. The string option specifies the directory from which - Congress will load pre-written policies for easy activation later - by an administrator. - This option can be ignored if you do not want - Congress to load pre-written policies from files. Due to MySQL limitations, - the full path to each policy file cannot exceed 760 characters. diff --git a/releasenotes/notes/namespaced-builtins-5e742106e90015bc.yaml b/releasenotes/notes/namespaced-builtins-5e742106e90015bc.yaml deleted file mode 100644 index a98a0f0d..00000000 --- a/releasenotes/notes/namespaced-builtins-5e742106e90015bc.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -prelude: > -features: - - Builtins like plus, times, lt now have the - namespace 'builtin'. For example, instead - of writing 'plus(x,y,z)' you now write - 'builtin:plus(x,y,z)' -upgrade: - - Policies using the bare version of builtins, - such as 'plus(x,y,z)' should be modified to - include the 'builtin' namespace, such as - 'builtin:plus(x,y,z)'. -deprecations: - - Builtins without the 'builtin' namespace are - now deprecated. They will be supported in the - near term, but all policies should be modified - to include the 'builtin:' prefix on all builtins. - For example, use 'builtin:plus(x,y,z)' instead - of 'plus(x,y,z)'. diff --git a/releasenotes/notes/newton-other-notes-c885979502f3f540.yaml b/releasenotes/notes/newton-other-notes-c885979502f3f540.yaml deleted file mode 100644 index e1ddf2b7..00000000 --- a/releasenotes/notes/newton-other-notes-c885979502f3f540.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - Congress now supports multi-node deployment where different components - (policy engine, API server, and datasource drivers) may be deployed in - separate processes on separate hosts. See deployment documentation for - more information. -issues: - - Ceilometer driver experiences error when retrieving the list of alarms. - The impact is that the some ceilometer data may not be updated in Congress. - We are currently working with the ceilometer team to find a resolution. - Expect a resolution in the next minor release. - - The specialized policy engine for VM placement (previously released as - experimental) is not available in this release. Expect it to be available - again in a future release. diff --git a/releasenotes/notes/policy-lib-db-656f809410706e6a.yaml b/releasenotes/notes/policy-lib-db-656f809410706e6a.yaml deleted file mode 100644 index 1cee0fb2..00000000 --- a/releasenotes/notes/policy-lib-db-656f809410706e6a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -prelude: > -upgrade: - - A new database table `library_policies` is added; - alembic migration scripts included. diff --git a/releasenotes/notes/policy_name_unique_db_constraint-22d658e4b17e0388.yaml b/releasenotes/notes/policy_name_unique_db_constraint-22d658e4b17e0388.yaml deleted file mode 100644 index 0da8a128..00000000 --- a/releasenotes/notes/policy_name_unique_db_constraint-22d658e4b17e0388.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - Added a new database table policiesdeleted to hold soft-deleted - policies. table:policies column:name type changed from Text() to - String(255) to support uniqueness constraint. - Necessary migration scripts are included in congress/db/migration/. - Migration aborts without effect if any existing policy name is longer than - 255. diff --git a/releasenotes/notes/remove-nova-floatingips-74e2548d1e381e8b.yaml b/releasenotes/notes/remove-nova-floatingips-74e2548d1e381e8b.yaml deleted file mode 100644 index c395d22f..00000000 --- a/releasenotes/notes/remove-nova-floatingips-74e2548d1e381e8b.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -prelude: > -upgrade: - - | - In Nova driver, the `floating_IPs` table is removed because nova networking - had been deprecated and is now removed from nova client. - Workaround: replace in policy rules all references to the Nova - `floating_IPs` table by the Neutron `floating_IPs` table. - diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 8fe73759..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,277 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Congress Release Notes documentation build configuration file, created by -# sphinx-quickstart on Mon Sep 5 11:50:32 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# openstackdocstheme options -repository_name = 'openstack/congress' -bug_project = 'congress' -bug_tag = '' -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Congress Release Notes' -copyright = u'2016, Congress developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from congress.version import version_info as congress_version -# The short X.Y version. -version = congress_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = congress_version.version_string_with_vcs() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CongressReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'CongressReleaseNotes.tex', - u'Congress Release Notes Documentation', - u'Congress developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'congressreleasenotes', u'Congress Release Notes Documentation', - [u'Congress developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'CongressReleaseNotes', u'Congress Release Notes Documentation', - u'Congress developers', 'CongressReleaseNotes', - 'One line description of project.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 5739c845..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================================ -Welcome to Congress Release Notes documentation! -================================================ - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - ocata - newton - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed2..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42..00000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 5fa45aa0..00000000 --- a/requirements.txt +++ /dev/null @@ -1,42 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -Babel!=2.4.0,>=2.3.4 # BSD -eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -PuLP>=1.4.1 # MIT -keystoneauth1>=2.21.0 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -Paste # MIT -PasteDeploy>=1.5.0 # MIT -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -aodhclient>=0.7.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -python-heatclient>=1.6.1 # Apache-2.0 -python-monascaclient>=1.1.0 # Apache-2.0 -python-muranoclient>=0.8.2 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-neutronclient>=6.3.0 # Apache-2.0 -python-ceilometerclient>=2.5.0 # Apache-2.0 -python-cinderclient>=2.1.0 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -python-ironicclient>=1.14.0 # Apache-2.0 -alembic>=0.8.10 # MIT -python-dateutil>=2.4.2 # BSD -python-glanceclient>=2.7.0 # Apache-2.0 -Routes>=2.3.1 # MIT -six>=1.9.0 # MIT -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.vmware>=2.17.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -WebOb>=1.7.1 # MIT diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index 47b0c7ae..00000000 --- a/run_tests.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/bin/bash - -set -eu - -function usage { - echo "Usage: $0 [OPTION]..." - echo "Run Congress's test suite(s)" - echo "" - echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" - echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" - echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" - echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." - echo " -n, --no-recreate-db Don't recreate the test database." - echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." - echo " -u, --update Update the virtual environment with any newer package versions" - echo " -p, --pep8 Just run PEP8 and HACKING compliance check" - echo " -P, --no-pep8 Don't run static code checks" - echo " -c, --coverage Generate coverage report" - echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." - echo " -h, --help Print this usage message" - echo " --virtual-env-path Location of the virtualenv directory" - echo " Default: \$(pwd)" - echo " --virtual-env-name Name of the virtualenv directory" - echo " Default: .venv" - echo " --tools-path Location of the tools directory" - echo " Default: \$(pwd)" - echo "" - echo "Note: with no options specified, the script will try to run the tests in a virtual environment," - echo " If no virtualenv is found, the script will ask if you would like to create one. If you " - echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." - exit -} - -function process_options { - i=1 - while [ $i -le $# ]; do - case "${!i}" in - -h|--help) usage;; - -V|--virtual-env) always_venv=1; never_venv=0;; - -N|--no-virtual-env) always_venv=0; never_venv=1;; - -s|--no-site-packages) no_site_packages=1;; - -r|--recreate-db) recreate_db=1;; - -n|--no-recreate-db) recreate_db=0;; - -f|--force) force=1;; - -u|--update) update=1;; - -p|--pep8) just_pep8=1;; - -P|--no-pep8) no_pep8=1;; - -c|--coverage) coverage=1;; - -d|--debug) debug=1;; - --virtual-env-path) - (( i++ )) - venv_path=${!i} - ;; - --virtual-env-name) - (( i++ )) - venv_dir=${!i} - ;; - --tools-path) - (( i++ )) - tools_path=${!i} - ;; - -*) testropts="$testropts ${!i}";; - *) testrargs="$testrargs ${!i}" - esac - (( i++ )) - done -} - -tool_path=${tools_path:-$(pwd)} -venv_path=${venv_path:-$(pwd)} -venv_dir=${venv_name:-.venv} -with_venv=tools/with_venv.sh -always_venv=0 -never_venv=0 -force=0 -no_site_packages=0 -installvenvopts= -testrargs= -testropts= -wrapper="" -just_pep8=0 -no_pep8=0 -coverage=0 -debug=0 -recreate_db=1 -update=0 - -LANG=en_US.UTF-8 -LANGUAGE=en_US:en -LC_ALL=C - -process_options $@ -# Make our paths available to other scripts we call -export venv_path -export venv_dir -export venv_name -export tools_dir -export venv=${venv_path}/${venv_dir} - -if [ $no_site_packages -eq 1 ]; then - installvenvopts="--no-site-packages" -fi - - -function run_tests { - # Cleanup *pyc - ${wrapper} find . -type f -name "*.pyc" -delete - - if [ $debug -eq 1 ]; then - if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then - # Default to running all tests if specific test is not - # provided. - testrargs="discover ./congress/" - fi - ${wrapper} python -m testtools.run $testropts $testrargs - - # Short circuit because all of the testr and coverage stuff - # below does not make sense when running testtools.run for - # debugging purposes. - return $? - fi - - if [ $coverage -eq 1 ]; then - TESTRTESTS="$TESTRTESTS --coverage" - else - TESTRTESTS="$TESTRTESTS --slowest" - fi - - # Just run the test suites in current environment - set +e - testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` - TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'" - OS_TEST_PATH=`echo $testrargs|grep -o 'congress\.tests[^[:space:]:]*\+'|tr . /` - if [ -d "$OS_TEST_PATH" ]; then - wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" - elif [ -d "$(dirname $OS_TEST_PATH)" ]; then - wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper" - fi - echo "Running \`${wrapper} $TESTRTESTS\`" - bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" - RESULT=$? - set -e - - copy_subunit_log - - if [ $coverage -eq 1 ]; then - echo "Generating coverage report in covhtml/" - # Don't compute coverage for common code, which is tested elsewhere - ${wrapper} coverage combine - ${wrapper} coverage html --include='congress/*' -d covhtml -i - fi - - return $RESULT -} - -function copy_subunit_log { - LOGNAME=`cat .testrepository/next-stream` - LOGNAME=$(($LOGNAME - 1)) - LOGNAME=".testrepository/${LOGNAME}" - cp $LOGNAME subunit.log -} - -function run_pep8 { - echo "Running flake8 ..." - ${wrapper} flake8 -} - - -TESTRTESTS="lockutils-wrapper python setup.py testr" - -if [ $never_venv -eq 0 ] -then - # Remove the virtual environment if --force used - if [ $force -eq 1 ]; then - echo "Cleaning virtualenv..." - rm -rf ${venv} - fi - if [ $update -eq 1 ]; then - echo "Updating virtualenv..." - python tools/install_venv.py $installvenvopts - fi - if [ -e ${venv} ]; then - wrapper="${with_venv}" - else - if [ $always_venv -eq 1 ]; then - # Automatically install the virtualenv - python tools/install_venv.py $installvenvopts - wrapper="${with_venv}" - else - echo -e "No virtual environment found...create one? (Y/n) \c" - read use_ve - if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then - # Install the virtualenv and run the test suite in it - python tools/install_venv.py $installvenvopts - wrapper=${with_venv} - fi - fi - fi -fi - -# Delete old coverage data from previous runs -if [ $coverage -eq 1 ]; then - ${wrapper} coverage erase -fi - -if [ $just_pep8 -eq 1 ]; then - run_pep8 - exit -fi - -if [ $recreate_db -eq 1 ]; then - rm -f tests.sqlite -fi - -run_tests - -# NOTE(sirp): we only want to run pep8 when we're running the full-test suite, -# not when we're running tests individually. To handle this, we need to -# distinguish between options (testropts), which begin with a '-', and -# arguments (testrargs). -if [ -z "$testrargs" ]; then - if [ $no_pep8 -eq 0 ]; then - run_pep8 - fi -fi diff --git a/scripts/manual_testing/doctor_pushdriver.sh b/scripts/manual_testing/doctor_pushdriver.sh deleted file mode 100755 index 9c705783..00000000 --- a/scripts/manual_testing/doctor_pushdriver.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -x - -############################################################################# -### doctor push data driver manual testing script ### -# -# Note: -# The following OpenStack environment variables must set first: -# OS_USERNAME, OS_PASSWORD, OS_PROJECT_NAME, OS_TENANT_NAME, OS_AUTH_URL -# For example by running (under a devstack setup) -# $ source devstack/openrc admin admin -############################################################################# - -UUID=`openstack congress datasource create doctor doctor -f value | sed -n '5p'` - -openstack congress datasource row update doctor events '[{"id": "0123-4567-89ab", "time": "2016-02-22T11:48:55Z", "type": "compute.host.down", "details": {"hostname": "compute1", "status": "down", "monitor": "zabbix1", "monitor_event_id": "111"}}]' - -openstack congress datasource row list doctor events - -openstack congress datasource row update $UUID events '[{"id": "1123-4567-89ab", "time": "2016-02-22T11:48:55Z", "type": "compute.host.down", "details": {"hostname": "compute2", "status": "down", "monitor": "zabbix2", "monitor_event_id": "222"}}]' - -openstack congress datasource row list doctor events - -openstack congress datasource delete doctor diff --git a/scripts/manual_testing/general.sh b/scripts/manual_testing/general.sh deleted file mode 100755 index 96e9d7af..00000000 --- a/scripts/manual_testing/general.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash -x - -############################################################################# -### exercise all the congress CLI commands ### -# except datasource push update command -# -# Note: -# The following OpenStack environment variables must set first: -# OS_USERNAME, OS_PASSWORD, OS_PROJECT_NAME, OS_TENANT_NAME, OS_AUTH_URL -# For example by running (under a devstack setup) -# $ source devstack/openrc admin admin -############################################################################# - -openstack congress version list -UUID=`openstack congress datasource create --config username=admin --config tenant_name=admin --config auth_url=http://127.0.0.1:5000/v3 --config password=password --config poll_time=5 nova nova2 -f value | sed -n '5p'` -openstack congress datasource actions show nova2 -openstack congress datasource actions show $UUID -openstack congress datasource list -openstack congress datasource request-refresh nova2 -openstack congress datasource request-refresh $UUID -openstack congress datasource schema show nova2 -openstack congress datasource schema show $UUID -openstack congress datasource status show nova2 -openstack congress datasource status show $UUID -openstack congress datasource table list nova2 -openstack congress datasource table list $UUID -openstack congress datasource table schema show nova2 flavors -openstack congress datasource table schema show $UUID flavors -openstack congress datasource table show nova2 flavors -openstack congress datasource table show $UUID flavors -openstack congress driver config show nova -openstack congress driver list -openstack congress driver schema show nova -openstack congress datasource row list nova flavors -openstack congress datasource row list nova2 flavors -openstack congress datasource row list $UUID flavors -openstack congress datasource delete nova2 - -PUUID=`openstack congress policy create policy1 -f value | sed -n '3p'` -openstack congress policy show policy1 -openstack congress policy show $PUUID -openstack congress policy list -UUID=`openstack congress policy rule create policy1 'temp(1,2)' -f value | sed -n '2p'` -openstack congress policy rule show policy1 $UUID -openstack congress policy rule delete policy1 $UUID -# UUID=`openstack congress policy rule create $PUUID 'temp(1,2)' -f value | sed -n '2p'` -# openstack congress policy rule show $PUUID $UUID -# openstack congress policy rule delete $PUUID $UUID -openstack congress policy rule create policy1 'q(1,2)' -openstack congress policy rule list policy1 # 1 rules -# openstack congress policy rule list $PUUID # 1 rules -openstack congress policy rule create policy1 'q(2,3)' -openstack congress policy rule create policy1 'p(x,y) :- q(x,y), r(y,x)' -openstack congress policy row list policy1 q # 2 tuples -# openstack congress policy row list $PUUID q # 2 tuples -openstack congress policy row list policy1 p # should be empty -openstack congress policy rule create policy1 'r(2,1)' -openstack congress policy rule create policy1 'r(3,2)' -openstack congress policy rule create policy1 'r(5,7)' -openstack congress policy rule create policy1 'r(9,9)' -openstack congress policy rule create policy1 'q(5,7)' - -openstack congress policy table list policy1 -# openstack congress policy table list $PUUID -openstack congress policy table show policy1 'p' -# openstack congress policy table show $PUUID 'p' -openstack congress policy row list policy1 q # 3 tuples -openstack congress policy row list policy1 r # 4 tuples -openstack congress policy row list policy1 p # 2 tuples -openstack congress policy rule create policy1 'p(x,y) :- r(x,y), NOT equal(x,9)' -openstack congress policy row list policy1 p # 5 tuples -openstack congress policy rule create policy1 's(x) :- nova:flavors(vcpus=x), p(x,y)' -openstack congress policy rule create policy1 't(x) :- nova:flavors(vcpus=x), NOT s(x)' -openstack congress policy row list policy1 s # (2),(1) env dep -openstack congress policy row list policy1 t # (4), (8) env dep -openstack congress policy create policy2 -openstack congress policy rule create policy2 'a(1,2)' -openstack congress policy row list policy2 a -openstack congress policy table list policy2 -openstack congress policy rule create policy1 'u(x,y) :- q(x,y), NOT policy2:a(x,y)' -openstack congress policy row list policy1 u # 2 tuples -openstack congress policy delete policy2 - -# restart openstack congress -openstack congress policy row list policy1 q # 3 tuples -openstack congress policy row list policy1 r # 4 tuples -openstack congress policy row list policy1 p # 5 tuples -openstack congress policy row list policy1 s # (2),(1) env dep -openstack congress policy row list policy1 t # (4), (8) env dep - -# test execute -openstack congress policy rule create policy1 'execute[nova:flavors.delete(id)] :- nova:flavors(id=id,vcpus=x),s(x), q(10,10)' # change to action -openstack congress policy row list policy1 s -# TODO make action undoable. undo. - -openstack congress datasource delete nova -UUID=`openstack congress datasource create --config username=admin --config tenant_name=admin --config auth_url=http://127.0.0.1:5000/v3 --config password=password --config poll_time=5 nova nova -f value | sed -n '5p'` -openstack congress datasource row list nova flavors -openstack congress policy rule create policy1 'q(10,10)' -openstack congress policy row list policy1 s # 0 tuples, could take a little time to realize -openstack congress datasource row list $UUID flavors # removed all entries with vcpus 1,2 - -# test simulate -openstack congress policy rule create policy1 'simA(x) :- simB(x)' -openstack congress policy simulate policy1 "simA(x)" "simB+(1)" action # 1 tuple -# openstack congress policy simulate $PUUID "simA(x)" "simB+(1)" action # 1 tuple - -openstack congress policy delete $PUUID -openstack congress policy list # action, classification diff --git a/scripts/ocf/congress-datasource b/scripts/ocf/congress-datasource deleted file mode 100644 index c1b46ae6..00000000 --- a/scripts/ocf/congress-datasource +++ /dev/null @@ -1,313 +0,0 @@ -#!/bin/sh -# -# -# OpenStack Congress DataSource Node -# -# Description: Manages an OpenStack Congress DataSource Node as an HA resource -# -# Authors: Masahito Muroi -# -# Support: openstack-dev@lists.openstack.org -# License: Apache Software License (ASL) 2.0 -# -# -# See usage() function below for more details ... -# -# OCF instance parameters: -# OCF_RESKEY_binary -# OCF_RESKEY_config -# OCF_RESKEY_pid -####################################################################### -# Initialization: - -: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - -####################################################################### - -# Fill in some defaults if no values are specified - -OCF_RESKEY_binary_default="/usr/local/bin/congress-server" -OCF_RESKEY_config_default="/etc/congress/congress.conf" -OCF_RESKEY_node_id_default="datasource-node" -OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" - -: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} -: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} -: ${OCF_RESKEY_node_id=${OCF_RESKEY_node_id_default}} -: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} - -####################################################################### - -usage() { - cat < - - -1.0 - - -Resource agent for the OpenStack Congress DataSource Node -May manage a dsenode that all datasources run on. - -Manages the OpenStack DataSource Node - - - - -Location of the OpenStack Congress binary - -OpenStack Congress binary - - - - - -Location of the OpenStack Congress configuration file - -OpenStack Congress config file - - - - - -ID of the DataSource node - -DataSource Node ID - - - - - -The pid file to use for this OpenStack Congress instance - -OpenStack Congress pid file - - - - - -Additional parameters to pass on to the OpenStack Congress - -Additional parameters for Congress - - - - - - - - - - - - - - -END -} - -####################################################################### -# Functions invoked by resource manager actions - -congress_validate() { - local rc - - check_binary $OCF_RESKEY_binary - - if [ ! -f $OCF_RESKEY_config ]; then - if ! ocf_is_probe; then - ocf_log err "Config $OCF_RESKEY_config doesn't exist" - return $OCF_ERR_INSTALLED - fi - ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" - fi - - true -} - -congress_status() { - local pid - local rc - - if [ ! -f $OCF_RESKEY_pid ]; then - ocf_log info "OpenStack Congress is not running" - return $OCF_NOT_RUNNING - else - pid=`cat $OCF_RESKEY_pid` - fi - - ocf_run -warn kill -s 0 $pid - rc=$? - if [ $rc -eq 0 ]; then - return $OCF_SUCCESS - else - ocf_log info "Old PID file found, but OpenStack Congress" \ - "is not running" - return $OCF_NOT_RUNNING - fi -} - -congress_monitor() { - local rc - local pid - - congress_status - rc=$? - - # If status returned anything but success, return that immediately - if [ $rc -ne $OCF_SUCCESS ]; then - return $rc - fi - - ocf_pidfile_status $OCF_RESKEY_pid - rc=$? - if [ $rc -ne 0 ]; then - pid=`cat $OCF_RESKEY_pid` - ocf_log error "This pid: $pid from Congress is not running." - return $OCF_NOT_RUNNING - fi - - ocf_log debug "OpenStack Congress monitor succeeded" - return $OCF_SUCCESS -} - -congress_start() { - local rc - - congress_status - rc=$? - if [ $rc -eq $OCF_SUCCESS ]; then - ocf_log info "OpenStack Congress already running" - return $OCF_SUCCESS - fi - - su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} \ - --config-file=$OCF_RESKEY_config --datasources \ - --node-id=$OCF_RESKEY_node_id \ - $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' \ - > $OCF_RESKEY_pid - - # Spin waiting for the server to come up. - # Let the CRM/LRM time us out if required - while true; do - congress_monitor - rc=$? - [ $rc -eq $OCF_SUCCESS ] && break - if [ $rc -ne $OCF_NOT_RUNNING ]; then - ocf_log err "OpenStack Congress start failed" - exit $OCF_ERR_GENERIC - fi - sleep 1 - done - - ocf_log info "OpenStack Congress started" - return $OCF_SUCCESS -} - -congress_stop() { - local rc - local pid - - congress_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - ocf_log info "OpenStack Congress already stopped" - return $OCF_SUCCESS - fi - - # Try SIGTERM - pid=`cat $OCF_RESKEY_pid` - ocf_run kill -s TERM $pid - rc=$? - if [ $rc -ne 0 ]; then - ocf_log err "OpenStack Congress couldn't be stopped" - exit $OCF_ERR_GENERIC - fi - - # stop waiting - shutdown_timeout=15 - if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then - shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) - fi - count=0 - while [ $count -lt $shutdown_timeout ]; do - congress_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - break - fi - count=`expr $count + 1` - sleep 1 - ocf_log debug "OpenStack Congress still hasn't stopped yet. Waiting ..." - done - - congress_status - rc=$? - if [ $rc -ne $OCF_NOT_RUNNING ]; then - # SIGTERM didn't help either, try SIGKILL - ocf_log info "OpenStack Congress failed to stop" \ - "after ${shutdown_timeout}s using SIGTERM. Trying SIGKILL ..." - ocf_run kill -s KILL $pid - fi - - ocf_log info "OpenStack Congress stopped" - - rm -f $OCF_RESKEY_pid - - return $OCF_SUCCESS -} - -####################################################################### - -case "$1" in - meta-data) - meta_data - exit $OCF_SUCCESS - ;; - usage|help) - usage - exit $OCF_SUCCESS - ;; -esac - -# Anything except meta-data and help must pass validation -congress_validate || exit $? - -# What kind of method was invoked? -case "$1" in - start) - congress_start - ;; - stop) - congress_stop - ;; - status) - congress_status - ;; - monitor) - congress_monitor - ;; - validate-all) - ;; - *) - usage - exit $OCF_ERR_UNIMPLEMENTED - ;; -esac diff --git a/scripts/preload-policies/output_policy_command.py b/scripts/preload-policies/output_policy_command.py deleted file mode 100644 index 68131953..00000000 --- a/scripts/preload-policies/output_policy_command.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017 NTT All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script outputs pre definded policies and rules in a config file. -# The config file content should look like following: -# { -# "policies": -# [ -# { -# "name": "sample_policy", -# "rules": [ -# { -# "rule": "p(x):- q(x)", -# "name": "rule1" -# }, -# { -# "rule": "q(1)" -# }, -# { -# "rule": "q('sample-row')" -# }, -# { -# "rule": "server_ids(x):- nova:servers(id=x)" -# } -# ] -# } -# ] -# } -# -# Config file option: -# - "name" key in rule object is option -# -# sample config file is located at -# /path/to/congress/scripts/preload-polices/policy-rules.json.sample - - -import argparse -import json -import sys - -OPENSTACK_COMMAND = 'openstack congress' -POLICY_CREATE = 'policy create' -RULE_CREATE = 'policy rule create' -NAME_OPTION = '--name %s' - - -def load_policies(policy_file): - with open(args.policy_file, 'r') as f: - data = f.read() - policies = json.loads(data) - return policies - - -def main(args): - defined_policies = load_policies(args.policy_file) - - for p in defined_policies['policies']: - # create defined policy - sys.stdout.write(' '.join([OPENSTACK_COMMAND, POLICY_CREATE, - p['name'], '\n'])) - # create defined rules - for r in p['rules']: - cmd_string = [OPENSTACK_COMMAND, RULE_CREATE] - if r.get('name'): - cmd_string.append(NAME_OPTION % r['name']) - cmd_string.extend([p['name'], '"%s"' % r['rule'], '\n']) - sys.stdout.write(' '.join(cmd_string)) - - -parser = argparse.ArgumentParser(description='Output pre-defined policy in ' - 'openstack command style.') -parser.add_argument('policy_file', type=str, - help='Path to pre defined policies and rules.') - - -if __name__ == '__main__': - args = parser.parse_args() - main(args) diff --git a/scripts/preload-policies/policy-rules.json.sample b/scripts/preload-policies/policy-rules.json.sample deleted file mode 100644 index e3eaeedf..00000000 --- a/scripts/preload-policies/policy-rules.json.sample +++ /dev/null @@ -1,23 +0,0 @@ -{ - "policies": - [ - { - "name": "sample_policy", - "rules": [ - { - "rule": "p(x):- q(x)", - "name": "rule1" - }, - { - "rule": "q(1)" - }, - { - "rule": "q('sample-row')" - }, - { - "rule": "server_ids(x):- nova:servers(id=x)" - } - ] - } - ] -} \ No newline at end of file diff --git a/scripts/sample_process_config.json b/scripts/sample_process_config.json deleted file mode 100644 index 51c7aae9..00000000 --- a/scripts/sample_process_config.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "datasource_drivers": - [ - { "cmd": "ls", - "args": ["-l"], - "restart_delay": 5, - "name": "foo" - }, - - { "cmd": "watch", - "args": ["ls", "-l", "/tmp"], - "name": "bar" - } - ], - "output_directory": "/tmp/runner", - "poll_interval_ms": 100 -} diff --git a/scripts/start_process.py b/scripts/start_process.py deleted file mode 100644 index 9e2901a7..00000000 --- a/scripts/start_process.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# This script starts a number of children processes defined in a config file. -# If restart_delay is not None, this will restart a crashed process after -# restart_delay seconds. The config file content should look like this: -# { -# "datasource_drivers": -# [ -# { "cmd": "ls", -# "args": ["-l"], -# "restart_delay": 5, -# "name": "foo" -# }, -# -# { "cmd": "watch", -# "args": ["ls", "-l", "/tmp"], -# "name": "bar" -# } -# ], -# "output_directory": "/tmp/runner" -# } -# -# Config file requirements: the "name" fields must be unique within a config -# file. -# -import json -import os -import signal -import subprocess -import sys -import time - -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - -children = [] - - -class ProcessEntry(object): - def __init__(self, process, cmd, args, restart_delay, name): - self.process = process - self.cmd = cmd - self.args = args - self.restart_delay = restart_delay - self.time_of_death = None - self.name = name - - -def _pid_file_name(name): - return name + '.pid' - - -def stop_process(name, output_dir): - filename = _pid_file_name(name) - if os.path.isfile(os.path.join(output_dir, filename)): - try: - f = open(os.path.join(output_dir, filename)) - pid = int(f.read().strip()) - LOG.debug("Killing process %s, pid %d", name, pid) - os.kill(pid, signal.SIGKILL) - f.close() - os.unlink(filename) - except ValueError: - LOG.debug("Could not parse pid file: %s (process %s)", filename, - name) - except OSError: - LOG.debug("No such process %s, pid %d", pid) - except IOError: - LOG.debug("Failed to stop process %s, pid %d", name, pid) - - -def start_process(cmd, args, restart_delay, name, output_dir): - out = open(os.path.join(output_dir, name + '.stdout'), 'w') - err = open(os.path.join(output_dir, name + '.stderr'), 'w') - - LOG.debug("Starting process (" + name + "): " + cmd + ' ' + ' '.join(args)) - p = subprocess.Popen([cmd] + args, stdout=out, stderr=err) - LOG.debug("Started as pid %d", p.pid) - f = open(os.path.join(output_dir, _pid_file_name(name)), 'w') - f.write('%d\n' % p.pid) - f.close() - children.append(ProcessEntry(p, cmd, args, restart_delay, name)) - - -def wait_all(output_dir, poll_interval_ms): - LOG.debug("Monitoring %d children", len(children)) - while(True): - for c in children: - c.process.poll() - if c.process.returncode is not None: - if c.time_of_death is None: - LOG.debug("pid %d ended at %s with return code %d, " - "process %s", c.process.pid, c.time_of_death, - c.process.returncode, - c.name) - c.time_of_death = time.time() - if c.restart_delay is not None: - if c.time_of_death + c.restart_delay < time.time(): - LOG.debug("Restarting " + c.cmd + ' ' + - ' '.join(c.args)) - children.remove(c) - start_process(c.cmd, c.args, c.restart_delay, - c.name, output_dir) - else: - children.remove(c) - - if not children: - break - time.sleep(poll_interval_ms/1000) - - -def main(): - if len(sys.argv) != 2: - sys.stderr.write("usage: start_process.py config_file\n") - sys.exit(1) - - with open(sys.argv[1]) as f: - txt = f.read() - f.close() - config = json.loads(txt) - - if os.path.exists(config['output_directory']): - if os.path.isfile(config['output_directory']): - sys.stderr.write('output_directory %s already exists as ' - 'a file\n', config['output_directory']) - sys.exit(1) - else: - os.makedirs(config['output_directory']) - - names = set() - for driver in config['datasource_drivers']: - if driver['name'] in names: - sys.stderr.write("Duplicate name '%s' in config file\n" - % driver['name']) - sys.exit(1) - names.add(driver['name']) - stop_process(driver['name'], config['output_directory']) - - for driver in config['datasource_drivers']: - start_process(driver['cmd'], driver['args'], - driver.get('restart_delay'), driver['name'], - config['output_directory']) - wait_all(config['output_directory'], config['poll_interval_ms']) - - -if __name__ == '__main__': - main() diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index a8f9ff9c..00000000 --- a/setup.cfg +++ /dev/null @@ -1,79 +0,0 @@ -[metadata] -name = congress -summary = Congress: The open policy framework for the cloud. -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/congress/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - congress - congress_tempest_tests - antlr3runtime/Python/antlr3 - antlr3runtime/Python3/antlr3 - -[global] -setup-hooks = - pbr.hooks.setup_hook - -# FIXME: fix the docstrings to comply with format. Re-enable this module -# when warning-errors resolved. -# [pbr] -# autodoc_index_modules = True -# autodoc_exclude_modules = -# congress.db.migration.alembic_migrations.* -# congress_tempest_tests.* -# thirdparty.* -# antlr3runtime.* -# warnerrors = True - -[entry_points] -oslo.config.opts = - congress = congress.opts:list_opts - -oslo.config.opts.defaults = - congress = congress.common.config:set_config_defaults - -console_scripts = - congress-server = congress.server.congress_server:main - congress-db-manage = congress.db.migration.cli:main - -tempest.test_plugins = - congress_tests = congress_tempest_tests.plugin:CongressTempestPlugin - - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = congress/locale -domain = congress - -[update_catalog] -domain = congress -output_dir = congress/locale -input_file = congress/locale/congress.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = congress/locale/congress.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 7e5a7590..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,23 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -coverage!=4.4,>=4.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -mox3!=0.19.0,>=0.7.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -requests-mock>=1.1 # Apache-2.0 -python-subunit>=0.0.18 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -tenacity>=3.2.1 # Apache-2.0 - -# Doc requirements -openstackdocstheme>=1.11.0 # Apache-2.0 -sphinx>=1.6.2 # BSD - -# release note requirements -reno!=2.3.1,>=1.8.0 # Apache-2.0 diff --git a/thirdparty-requirements.txt b/thirdparty-requirements.txt deleted file mode 100644 index 648af028..00000000 --- a/thirdparty-requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -# This file contains a list of optional requirements that are not in the -# official global-requirements.txt -python-cloudfoundryclient>=1.0.2 diff --git a/thirdparty/antlr3-antlr-3.5/.gitignore b/thirdparty/antlr3-antlr-3.5/.gitignore deleted file mode 100644 index aa2e3293..00000000 --- a/thirdparty/antlr3-antlr-3.5/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# Maven build folders -target/ - -# IntelliJ project files -*.iml -*.ipr -*.iws -.idea/ - -# Eclipse project files -.project -.classpath -.settings/ - -# NetBeans user configuration -nbactions.xml -nb-configuration.xml - -# Python runtime files -*.py[co] diff --git a/thirdparty/antlr3-antlr-3.5/CHANGES.txt b/thirdparty/antlr3-antlr-3.5/CHANGES.txt deleted file mode 100644 index 0770dcb5..00000000 --- a/thirdparty/antlr3-antlr-3.5/CHANGES.txt +++ /dev/null @@ -1,3564 +0,0 @@ -ANTLR 3.5 Release -January 4, 2012 - -Terence Parr, parrt at cs usfca edu -ANTLR project lead and supreme dictator for life -University of San Francisco - -CHANGES - -January 4 2012 -- release 3.5 - -January 3, 2012 - -* Improve error reporting and recovery for STRAY_BRACKET, fixes antlr/antlr3#42 -* Do not write output files if errors were reported, fixes antlr/antlr3#61 -* Fix AST operator on labeled set of terminals e.g. x=(A|B)^ -* Fix labeled set of terminals with rewrite e.g. x=(A|B) -> $x - -December 1, 2012 - -* Added error msg for .. in parser - -September 17, 2012 - -* Add Gokulakannan Somasundaram's C++ target based upon C target. - Use language=Cpp in options. It's a header-only library, runtime/Cpp/include, - so installation is not required. - -September 16, 2012 - -* Python 3.3 target added by Benjamin Wolf based upon Python 2 target - https://github.com/antlr/antlr3/pull/23 - -September 15, 2012 - -* LookaheadStream bug fixes; - https://github.com/antlr/antlr3/pull/21 - -* Pulled "Fix Python handling of syntactic predicates" - https://github.com/antlr/antlr3/pull/33 - -July 15, 2012 - -* GUnit improvements - https://github.com/antlr/antlr3/pull/27 - -May 2012: - -* ANTLR3 update of ObjC runtime to go with latest ST4-ObjC - https://github.com/antlr/antlr3/pull/17 - -August 9, 2012 - -* Provide Lexer get end of file method so people can override it. - -November 25, 2011 - -* stderr not test correctly in gunit examineExecResult - -October 27, 2011 - -* Dieter Habelitz reported bug in java code gen with synpreds. labels were - being carried from grammar into synpreds but weren't typed properly (they - were "void x=null;" for x=ruleref labels) - -October 25, 2011 - -* (Sam) Rule.setOption didn't do memoize option right. -* (Sam) Fix labels in synpreds -* (Sam) Fix input index for NoViableAltException during inline prediction -* (Sam) Fix label aliasing errors in cases like (x=y|x=z) - -August 10, 2011 - -* (Sam) fix stack overflow in semantic context analysis - -July 30, 2011 - -* added range check to BaseTree.insertChild() - -July 18, 2011 -- release 3.4 - -* Added tree method insertChild(int i, Object t). - -July 14, 2011 - -* Added BaesTree.freshenParentAndChildIndexesDeeply() to recursively - walk tree and set ptrs. - -July 6, 2011 - -* reset() for token stream didn't skip initial off-channel tokens. - -July 5, 2011 - -* Sam fixes rare infinite loop upon error in nextToken(). -* rewrites weren't pulled from syntactic predicates. - -June 29, 2011 - -* Sam noticed CommonToken.getText() cached substring pulled from input, which - defeated purpose of pointing into input char array. Altered to not cache. - Should reduce memory footprint. - -June 24, 2011 - -* empty alts with actions didn't have EPSILON dummy alt node. - -June 19, 2011 - -* Udo noticed that Parser.getMissingSymbol() didn't set invented token's input - -June 8, 2011 - -* Added inContext(String context) back into TreeParser. - -April 21, 2011 - -* Updated for ST v4.0.2 (setting iterateAcrossValues = true as instance var) -* Needed throws condition for delegatedRules. - -April 20, 2011 (Sam Harwell) - -* Implement the 'throwsSpec' feature of parser rules for the Java target -* Improve algorithm for SemanticContext Boolean predicate reduction - -April 13, 2011 - -* Unmangled region names in STViz hiearchy tree display. -* Removed conversion timeout thing again - -April 11, 2011 - -* Added option -Xconversiontimeout back in. Turns out we hit NFA conversion - time landmine occasionally with huge grammars; fails over to backtracking - (if turned on) if it can't make DFA. - -March 29 - April 10, 2011 - -* Uses ST v4 now!!! Massive change. Only updated Java target so far. - Ripped out ST v3 usage to remove indirect dependency on ANTLR v2. - -March 28, 2011 - -* Sam Harwell ported all v2 grammars to v3! - -March 3, 2011 - -* left-recursion pattern off until all targets catch up - -* ANTLRCore.sti no longer used; removed from all targets. - -* Adding type, text terminal options - -* Replaced hetero arg with terminalOptions arg in all templates that reference hetero - (this is the class name / node type used for TOKEN references - in grammar). Widespread but trivial changes to all targets. hetero is - now terminalOptions.node. Can also get terminalOptions.type and - terminalOptions.text from dictionary of options. - -* Fixed mispelling of license in BSD license headers - -March 3, 2011 - -* Add tree, getTree() to ParserRuleReturnScope to do away with specific ones like: - public static class rewrite_template_args_return extends ParserRuleReturnScope { - CommonTree tree; - public Object getTree() { return tree; } - }; - Removed these special classes if it's just AST; keep if they have defined "returns" - values - -February 26, 2011 - -* All finally {} have comment now to suppress warning. - -* removed ; from {;} blank method - -* Added @SuppressWarnings({"all"}) to front of each - generated class. - -* -print wasn't always showing ^ and ! in grammar - -* Added java-left-recur/Java.g example. - -* left-recursion pattern detection handles backtracking mode now - -February 25, 2011 - -* -Xmaxinlinedfastates went to 60 from 10 for forcing prediction in left- - recursive expression rules to stay in rule; preds use a parameter. - -* trees know where they came from now start..stop tokens; todo: use for better err handling. - -* Got immediate left-recursion working for rules. Added TestLeftRecursion.java - -February 21, 2011 - -* Fixed http://www.antlr.org/jira/browse/ANTLR-437 thanks to Vladislav Kuzkokov. - added unit test. - -February 17, 2011 - -* Add -language L option to override language=L option in grammar. Same - grammar can yield multiple parsers in different languages now. - -February 10, 2011 - -* Added method to generated Java code to return the array of delegates; e.g., - import Sub1, Sub2; - yields: - - public Parser[] getDelegates() { - return new Parser[] {gSub1, gSub2}; - } - -January 25, 2011 - -* Improve error messages for no viable alt parse exceptions - -January 20, 2011 - -* TokenRewriteStream had dead code; also updated insertBefore followed by - replace or delete. If input is abc and I did insertBefore(2,"y"), where - 'c' is index 2, then did delete of 2 previously defined functionality - was to ignore the insert. that's weird; fixed to keep insert. Also - Delete special case of replace (text==null): - D.i-j.u D.x-y.v | boundaries overlap => combine to max(min)..max(right) - -December 12, 2010 - -* Send couldBacktrack now to enterDecision in debug protocol - -December 4, 2010 - -* TreeWizard ctor needed a default tree adapator. - -November 29, 2010 -- ANTLR v3.3 - -November 23, 2010 - -* CodeGenerator.loadLanguageTarget is now static and available to load - targets so we can ask them questions during analysis. - -* Fixed and added unit test - http://www.antlr.org/jira/browse/ANTLR-370 - http://www.antlr.org/jira/browse/ANTLR-375 - -November 23, 2010 - -* Added source name to syntax error msgs - -October 20, 2010 - -Added boolean couldBacktrack to enterDecision in dbg interface. Breaks AW -interface and other tools! [BREAKS BACKWARD COMPATIBILITY] - -October 17, 2010 - -* Missing -trace in help msg - -November 22, 2010 - -* Added GrammarAST: public int getCharPositionInLine() { return getColumn()-1; } - and Grammar.getHasDelegates() for C# guys - -October 16, 2010 - -* Doesn't write profile data to file anymore; emits decision data to stderr - -October 14, 2010 - -* Make OrderedHashSet have deterministic iteration - -July 20, 2010 - -* greedy=true option shuts off nondeterminism warning. - -* code gen for AST and -profile didn't compile. had useless line: - - proxy.setTreeAdaptor(adap); - - -July 17, 2010 - -* Removed conversion timeout failsafe; no longer needed. - -* Stats updated to be correct for -report. - -June 10, 2010 - -* added toArray in OrderedHashSet to make addAll calls get same order for DFA edges and possibly code gen in some areas. - -June 5, 2010 - -* Added -Xsavelexer - -May 24, 2010 - -* lexerStringRef was missing elementIndex attribute. i='import' didn't work - in lexer. Altered all target stg files. Set in codegen.g - -* output=AST, rewrite=true for tree rewriters broken. nextNode for subtree - streams didn't dup node, it gave whole tree back. - -March 17, 2010 - -* Added MachineProbe class to make it easier to highlight ambig paths in - grammar. More accurate than DecisionProbe; retrofitted from v4. - -February 20, 2010 - -* added range to TokenStream and implementors: - /** How far ahead has the stream been asked to look? The return - * value is a valid index from 0..n-1. - */ - int range(); - -* added new method to get subset of tokens to buffered token streams: - public List get(int start, int stop); - -February 15, 2010 - -* Refs to other tokens in a lexer rule didn't get its line/charpos right. - altered Java.stg. - -January 31, 2010 - -* Creating token from another token didn't copy input stream in CommonToken. - makes sense to copy too; i don't think anybody relies on it being null after - a copy. We might want to know where token came from. - -January 26, 2009 - -* TreeParser.getMissingSymbol() use CommonTree instead of using - adaptor.create() - -December 8, 2009 - -* Instead of sharing Token.EOF_TOKEN, I'm now creating EOF tokens so I can set the char position for better error messages. - -December 5, 2009 - -* Fixed bug in TreeVisitor when rewrites altered number of children. Thanks to Chris DiGiano. - -* added new buffered on-demand streams: BufferedTokenStream. Renamed CommonTokenStream to LegacyCommonTokenStream and made new one as subclass of BufferedTokenStream. - -November 3, 2009 - -* Added org.antlr.runtime.UnbufferedTokenStream. Was trivial and works! - -November 1, 2009 - -* Couldn't properly reuse parser state; ctor reset the state; fixed. - Parser(TokenStream input, RecognizerSharedState state) - -* LookaheadStream used some hardcoded Object return types for LT, etc... - uses T now. - -September 23, 2009 -- ANTLR v3.2 - -September 21, 2009 [Jim Idle] - -* Added new options for tool invocation to control the points at which the code - generator tells the target code to use its equivalent of switch() instead of - inline ifs. - -Xmaxswitchcaselabels m don't generate switch() statements for dfas - bigger than m [300] - -Xminswitchalts m don't generate switch() statements for dfas smaller - than m [3] -* Upgraded -X help output to include new optins and provide the default - settings, as well as provide units for those settings that need them. - -* Change the C Target to overide the deafults for the new settings to - generate the most optimizable C code from the modern C compiler point of - view. This is essentially to always use swtich statements unless there - is absolutely no other option. C defaults are to use 1 for minimum and - 3000 for maximum number of alts that trigger switch(). This results in - object code that is 30% smaller and up to 20% faster. - -April 23, 2009 - -* Added reset to TreeNodeStream interface. - -April 22, 2009 - -* Fixed ANTLR-374. Was caused by moved of grammars. %foo() stuff didn't work - -April 9, 2009 - -* .g and .g3 file extensions work again. -* introduced bug in 3.1.3: gives exception not error msg upon - missing .g file - -March 26, 2009 - -* Made ctor in TreeRewriter and TreeFilter call this not super. - -March 21, 2009 - -* Added ctor to RecognizerSharedState to allow cloning it. - -March 17, 2009 -- ANTLR v3.1.3 - -* improved ANTLRv3.g to handle <...> element options - -March 15, 2009 - -* Fixed ANTLR-389. Strip didn't ignore options in subrules; also seemed - to demand stdin. - -March 15, 2009 - -* ANTLR always sorts incoming grammar list by dependency. For example, - If W.g depends on tokens from P.g then P.g is done first even if - W.g mentioned first on command line. It does not ignore any files you - specify the commandline. If you do *.g and that includes some - imported grammars, it will run antlr on them. - -* -make option prevents ANTLR from running on P.g if P older than - generated files. - -* Added org.antlr.tool.GrammarSpelunker to build a faster dependency - checker (what grammars depend on etc...). Totally independent of any - ANTLR code; easy to pull into other tools. - -* Added org.antlr.misc.Graph, a general graph with nodes - containing an Object payload. It knows how to do a topological sort - on the nodes. - -March 10, 2009 - -* Added associativity token option to support upcoming special expression - parsing. Added rule option strategy=precedence also - -March 1, 2009 - -* Changed ANTLRWorks debug port from 49153 to 49100. Apparently we change the port in - ANTLRWorks to 49100 in 1.2 but forgot to do so in the ANTLR targets. - -START CHANGES FOR TREE FILTER MODE (pulled from dev branch) - -This feature will be announced in 3.2, but I am integrating from my development branch now into the mainline so target developers have a chance to implement. We might release 3.1.3 bug fix release before 3.2. - -* CommonTreeNodeStream -> BufferedTreeNodeStream. Now, - CommonTreeNodeStream is completely unbuffered unless you are - backtracking. No longer making a list of all nodes before tree parsing. - -* Added tree grammar filter=true mode. - - Altered templates: - Java.stg: added filterMode to genericParser and treeParser. - This required a change to ANTLRCore.sti - Defined a default parameter in treeParser to set the superclass - to TreeFilter for tree grammar with filter=true. It sets - superclass to TreeRewriter if filter=true and output=AST. - Other them that, I only had to change ANTLR itself a little bit. - Made filter mode valid for tree grammars and have it automatically set - the necessary elements: @synpredgate, backtrack=true, rewrite=true - (if output=AST). Added error message for detecting conflicting - options. - -* Added misc.FastQueue and TestFastQueue: - A queue that can dequeue and get(i) in O(1) and grow arbitrarily large. - A linked list is fast at dequeue but slow at get(i). An array is - the reverse. This is O(1) for both operations. - -* Added tree.TreeIterator, a generator that walks a doubly linked tree. - The nodes must know what index they are. It's an Iterator but - remove() is not supported. Returns navigation nodes always: - UP, DOWN, EOF. - -* Added misc.LookaheadStream: A lookahead queue that knows how - to mark/release locations in the buffer for backtracking purposes. - I hope to use for both tree nodes and tokens. Just implement - nextElement() to say how to get next node or token. - -END CHANGES FOR TREE FILTER MODE - -February 23, 2009 -- ANTLR v3.1.2 - -February 18, 2009 - -* Added org.antlr.tool.Strip (reads from file arg or stdin, emits to stdout) - to strip actions from a grammar. - -February 4, 2009 - -* Added CommonTree.setUnknownTokenBoundaries(). Sometimes we build trees - in a grammar and some of the token boundaries are not set properly. - This only matters if you want to print out the original text associated - with a subtree. Check this out rule: - - postfixExpression - : primary ('.'^ ID)* - ; - - For a.b.c, we get a '.' that does not have the token boundaries set. - ANTLR only sets token boundaries for subtrees returned from a rule. - SO, the overall '.' operator has the token boundaries set from 'a' - to 'c' tokens, but the lower '.' subtree does not get the boundaries - set (they are -1,-1). Calling setUnknownTokenBoundaries() on - the returned tree sets the boundaries appropriately according to the - children's token boundaries. - -January 22, 2009 - -* fixed to be listeners.add(listener); in addListener() of DebugEventHub.java - -January 20, 2009 - -* Removed runtime method: mismatch in BaseRecognizer and TreeParser. Seems - to be unused. Had to override method recoverFromMismatchedToken() in - TreeParser to get rid of single token insertion and deletion for - tree parsing because it makes no sense with all of the up-and-down nodes. - -* Changed JIRA port number from 8888 to no port spec (aka port 80) and all - refs to it in this file. - -* Changed BaseTree to Tree typecase in getChild and toStringTree() and - deleteChild() to make more generic. - -December 16, 2008 - -* Added -verbose cmd-line option and turned off standard header - and list of read files. Silent now without -verbose. - -November 24, 2008 - -* null-ptr protected getParent and a few others. - -* Added new ctor to CommonTreeNodeStream for walking subtrees. Avoids - having to make new serialized stream as it can reuse overall node stream - buffer. - -November 20, 2008 - -* Updated BaseTest to isolate tests better. - -November 17, 2008 - -* BaseTreeAdaptor.getType() was hosed; always gave 0. Thanks to Sam Harwell. - -November 8, 2008 - -* Added methods to BaseRecognizer: - public void setBacktrackingLevel(int n) { state.backtracking = n; } - /** Return whether or not a backtracking attempt failed. */ - public boolean failed() { return state.failed; } - -November 5, 2008 - -* Tweaked traceIn/Out to say "fail/succeeded" - -* Bug in code gen for tree grammar wildcard list label x+=. - -* Use of backtrack=true anywhere in grammar causes backtracking sensitive - code to be generated. Actions are gated etc... Previously, that only - happened when a syntactic predicate appeared in a DFA. But, we need - to gate actions when backtracking option is set even if no decision - is generated to support filtering of trees. - -October 25, 2008 - -* Fixed debug event socket protocol to allow spaces in filenames. - -* Added TreeVisitor and TreeVisitorAction to org.antlr.runtime.tree. - -October 22, 2008 - -* Added inContext() to TreeParser. Very useful for predicating - tree grammar productions according to context (their parent list). - Added new TestTreeContext unit tests (15). - - /** Check if current node in input has a context. Context means sequence - * of nodes towards root of tree. For example, you might say context - * is "MULT" which means my parent must be MULT. "CLASS VARDEF" says - * current node must be child of a VARDEF and whose parent is a CLASS node. - * You can use "..." to mean zero-or-more nodes. "METHOD ... VARDEF" - * means my parent is VARDEF and somewhere above that is a METHOD node. - * The first node in the context is not necessarily the root. The context - * matcher stops matching and returns true when it runs out of context. - * There is no way to force the first node to be the root. - */ - public boolean inContext(String context) {...} - -* Added 3 methods to Tree interface [BREAKS BACKWARD COMPATIBILITY] - - /** Is there is a node above with token type ttype? */ - public boolean hasAncestor(int ttype); - - /** Walk upwards and get first ancestor with this token type. */ - public Tree getAncestor(int ttype); - - /** Return a list of all ancestors of this node. The first node of - * list is the root and the last is the parent of this node. - */ - public List getAncestors(); - -October 21, 2008 - -* Updated unit tests to be correct for \uFFFE->\uFFFF change - -* Made . in tree grammar look like ^(. .*) to analysis, though ^(. foo) - is illegal (can't have . at root). Wildcard is subtree or node. - Fixed bugs: - http://www.antlr.org/browse/ANTLR-248 - http://www.antlr.org/browse/ANTLR-344 - -October 1, 2008 -- ANTLR v3.1.1 - -September 8, 2008 - -* Labels on tokens, rules carry into synpreds now so semantic predicates work. - This didn't work since labels were stripped in the synpred and they weren't - defined in the generated method. - - a : x=A z=a {$x.text.equals($z.text)}? A - | y=A a A A - ; - -September 3, 2008 - -* Made a REV static variable in Tool so that we can change the rev for - daily builds. - -* Made \uFFFF a valid character. Token types are 32-bit clean using -1 - not 0x0000FFFF as -1 so it should be okay. Label.java: - public static final int MIN_CHAR_VALUE = '\u0000'; - public static final int MAX_CHAR_VALUE = '\uFFFF'; - -August 30, 2008 - -* Changed messages in en.stg so that TOKEN_NONDETERMINISM correctly - indicates when actions hid semantic predicates. - -August 15, 2008 - -* Tweaked build properties and build.xml - -August 13, 2008 - -* Fixed ANTLR-314; 3.1 introduced a problem with list labels += - -August 12, 2008 -- ANTLR v3.1 - -* Added JavaScript target - -August 7, 2008 - -* an NFA target of EOF predicate transition in DFA cause an exception in - getPredicatesPerNonDeterministicAlt(). - -* Kay Roepke found a nasty bug when debugging AST-constructing - composite recognizers. If the input state was null to the constructor, - super class constructor created a new parser state object. - Later, though we passed the argument state not this.state - to the delegate constructors, forcing them to share a different - state objects! Changed state to this.state in Dbg.stg constructors. - -* Ack. messed up debug/AST. Have to set proxy's tree adaptor; it's - a circular ref. Just an ASTDbg.stg change. - -August 4, 2008 - -* superClass works now for lexers - -* Made Grammar.defineNamedAction propogate header actions down to all - delegates if root grammar; regardless of lexer/parser scope. - -* Rejiggered AST templates to propogate changes to tree adaptor - for delegate grammars. Fixes ANTLR-302 - -August 4, 2008 - -* FOLLOW set computations altered constant FOLLOW bit sets. - -* Added (...) are all predicate evaluations. - -* Extra init code for tree parser nonrewrite mode removed. - -* Added empty child list check in becomeRoot - -August 3, 2008 - -* Was using RuleReturnScope not Rulename_return for list labels in tree - parser. - -* Didn't set _last in tree parser for rule ref track stuff (rewrite=true) - -August 2, 2008 - -* Benjamin found another rewrite engine bug. - -July 30, 2008 - -* CommonTreeNodeStream / CommonTokenStream did not reset properly. - -July 29, 2008 - -* Fixed another bug in TokenRewriteStream; didn't like inserts after end. - -July 28, 2008 - -* Fixed bug in TokenRewriteStream.toString(start,stop); it ignored - parameters. ;) - -July 17, 2008 - -* allow qualified type names in hetero <...> options like T - -July 5, 2008 - -* treeLevel not set for setBlock alts; added unit test - -July 3, 2008 - -* Fixed ANTLR-267. parse tree added nodes during backtracking and - cyclic DFAs. tracks hidden tokens too now. Added toInputString() to - get text back including hidden tokens. Shows for rules - that match nothing. - -June 26, 2008 - -* Added gParent ptr that points to immediate parent grammar. E.g., - // delegators - public MParser gM; - public M_S gS; - public M_S gParent = gS; // NEW - -* Grammar imports didn't set all the delegate pointers...lots of imported - grammars would cause a null ptr exception. Fixes ANTLR-292. - -June 25, 2008 - -* List labels in tree construction didn't always track the tree; sometimes - had a rule result structure. - -June 4, 2008 - -* Improved unit testing so that each test suite executes and builds grammars - in a separate temporary directory. This means they can execute concurrently. - Also seem to be a problem with my class path during execution. Moved - tmpdir for ahead of standard CLASSPATH. - -* By virtue of an improvement to StringTemplate, output newlines - in generated files should be normalized to whatever your host uses. - -June 3, 2008 - -* Restrict legality of grammar options; for example you cannot use output option - in lexer anymore. - -June 2, 2008 - -* Throw illegal arg exception upon invalid TokenRewriteStream ops. Rewrote - core of engine. Slightly different operation. Added many more unit tests. - -3.1b1 - May 20, 2008 - -May 11, 2008 - -* rewrite=true, output=AST for tree grammar was not working. Altered trees were not - propagated back up the rule reference chain. Required a number of mods to - ASTTreeParser.stg. Added unit tests. - -May 10, 2008 - -* [BACKWARD INCOMPATIBLE if you override match()] - I had turned off single token insertion and deletion because I could not figure - out how to work with trees and actions. Figure that out and so I turned it back on. - match() returns Object matched now (parser, tree parser) so we can set labels - on token refs properly after single token ins/del error recovery. Allows actions - and tree construction to proceed normally even though we recover in the middle of - an alternative. Added methods for conjuring up missing symbols: getMissingSymbol(). - -* refactored BaseRecognizer error handling routines - -* Single token error recovery was not properly taking into consideration EOF. - -* ANTLR no longer tries to recover in tree parsers inline using single node deletion or insertion; throw exception. Trees should be well formed as they are not created by users. - -* Added empty constructors to the exception classes that did not have them so that ANTLRWorks can create the exceptions. - -* Made debug tree adaptor deal with tokens conjured up during error recovery. - -* Removed extra location() debug element that was emitted. - -May 8, 2008 - -* ANTLR didn't update line/col to DFA map for AW. - -May 6-7, 2008 - -* Insufficiently covered (with semantic predicates) alt warnings are now emitted before - nondeterminisms so it's clear the nondeterminism is a result of insufficient preds. - -* Improved insufficiently covered alt warnings from: - warning(203): T.g:2:3: The following alternatives are insufficiently covered with predicates: 1 - to: - warning(203): T.g:2:3: Input B is insufficiently covered with predicates at loca -tions: alt 1: line 3:15, alt 2: line 2:9 - -* Improved nondeterminism warning to have: - Semantic predicates were present but were hidden by actions. -parser grammar U; -a : (A B)? ; -b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ; - -To create the prediction DFA for the optional sub rule in 'a', ANTLR must find all references to 'a' to determine what can follow. A B can follow 'a' in the first two alts rule 'b'. To resolve the conflict between matching A B immediately in the sub rule and exiting rule 'a' to match it in 'b', ANTLR looks for predicates. In this case, there are two predicates that indicate the semantic context in which the surrounding alternatives are valid. The problem is that one of the predicates is hidden by an action. It took me 1.5 days, but I've finally have gotten ANTLR to properly track the insufficiently covered alternatives. Further, I have gotten it to tell you precisely where the uncovered predicates are even if they are simply hidden by actions. I have also updated all of the nondeterminism warnings so that it tells you if there was a predicate but one hidden by an action (this could be a separate condition from insufficiently covered predicates). here are your messages from ANTLR: - -ANTLR Parser Generator Version 3.1b1 (??) 1989-2007 -warning(203): U.g:2:5: Input such as "A B" is insufficiently covered with predicates at locations: alt 2: line 3:38 at B -Semantic predicates were present but were hidden by actions. -warning(200): U.g:2:5: Decision can match input such as "A B" using multiple alternatives: 1, 2 -As a result, alternative(s) 2 were disabled for that input -Semantic predicates were present but were hidden by actions. - -* Fixed issue where -r41 - : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';' - ; -still warned about $r41 being ambig. - -* actions are now added to the NFA. - -* Fixed ANTLR-222. ANTLR now ignores preds after actions. - -May 5, 2008 - -* Fixed ANTLR-235 by backing out a change from 12-31-07. - -* Fixed ANTLR-249; I include semantic context again in closure busy signal. - -May 3, 2008 - -* Fixed ANTLR-208. Looks in library or in -o output path. antlr -o foo T.g U.g where U needs T.tokens won't work unless we look in foo too. fixed. - -* Refactored assign.types.g to move methods to a class called AssignTokenTypesBehavior. - -* Fixed ANTLR-207. Lexers importing vocabs didn't see ';'=4 type aliases in .tokens. - -* Fixed ANTLR-228. Couldn't use wildcard in alts with AST rewrites. - -May 2, 2008 - -* Fixed ANTLR-230; can use \' now in action. - -* Scope attributes no longer have a stack depth check on front. If you ref $r::a when r has not invoked you, then you get an exception not a default value. Back to the way 3.0.1 worked. - -* $channel was a global variable in 3.0.1 unlike $type which did not affect an invoking lexer rule. Now it's local too. Only $type and $channel are ever set with regularity. Setting those should not affect an invoking lexer rule as in the following should work: - - X : ID WS? '=' ID ; // result is X on normal channel - WS : ' '+ {$channel = HIDDEN; } ; - - STRING : '"' (ESC|.)* '"' ; // result is STRING not ESC - - FLOAT : INT '.' INT? ; // should be FLOAT - INT : Digit+ ; - fragment - Digit : '0'..'9' ; - -* Fixed bug in interpreter regarding (...)* loops - -May 1, 2008 - -* Fixed ANTLR-202. These now give warnings about ambig ref to $a. - a : ID a -> $a | INT ; - and - a : A a {$a.text} | B ; - -April 30, 2008 - -* Fixed ANTLR-237. updated -depend to know about imported grammars. -$ java org.antlr.Tool -depend -lib foo T.g - ANTLR Parser Generator Version 3.1b1 (??) 1989-2007 - T.g: foo/Java.g - TParser.java : T.g - T.tokens : T.g - TLexer.java : T.g - T_Java : T.g - -April 29, 2008 - -* Fixed ANTLR-217; scope A,B,C; didn't work - -* Fixed ANTLR-224; ! or ^ on item in alt with rewrite gave exception - -* Added token options to terminals: ID etc... - node is default so you can do ID for hetero tree types. most common. - -April 17, 2008 - -* Use default msg if unknown recog type in getErrorMessage(): - String msg = e.getMessage(); - -April 14, 2008 - -* %x.y = foo; was not working in @members section - -March 29, 2008 - -* Import couldn't handle A imports B imports C. - -March 27, 2008 - -* Added get/setInputStream to Token interface and affected classes. - -February 26, 2008 - -* made fillBuffer public in CommonTreeNodeStream so we can add trees - to stream for interactive interpreters. - -February 14, 2008 - -* Fixed a bug in the code generation where tree level 0 was used - no matter what to rewrite trees in tree grammars. added unit test - -* Fixed ANTLR-221. exceptions were generated when using - AST construction operators and no output=AST option. - -February 13, 2008 - -* Improved error msgs for unreachable alts and tokens. - -February 11-12, 2008 - -* Fixed ANTLR-219. - It looks like the AST construction code for sets was totally messed up. - This was for not only the new tree parser AST construction, but also - the regular tree construction for parsers. I had to introduce templates - in the ASTTreeParser.stg file to deal with this. added unit tests: - TestTreeGrammarRewriteAST.testSetMatchNoRewrite(), - testSetMatchNoRewriteLevel2(), testSetMatchNoRewriteLevel2Root(). - Had to add template matchRuleBlockSet() - to differentiate between a regular set in one that is an entire rule. - If it is an entire rule, it has to set the return value, retval.tree. - -* Fixed ANTLR-220. - Made TreeAdaptor dupNode and dupTree events emit debugging events - so AW could see tree node duplications. - -February 4, 2008 - -* BACKWARD INCOMPATIBILITY - Added getSourceName to IntStream and TokenSource interfaces and also the - BaseRecognizer. Have to know where char come from for error messages. - Widespread change, but a trivial one. - -January 17, 2008 - -* Interpreter throws FailedPredicateException now when it sees a predicate; - before it was silently failing. I'll make it work one of these days. ;) - -January 12, 2008 - -* Copy ctor not copying start and stop for common token. Fixes ANTLR-212 - -* Removed single token insertion and deletion for tokens, sets. - Required a change to the code generation for matchSet() template - and a tweak inside the BaseRecognizer. To engage this again is easy, - just override mismatch() to call mismatchRecover(). I changed it to simply - throw an exception. - -* Added syntaxError recognizer state var so you can easily tell if - a recognizer failed. Added getNumberOfSyntaxErrors() to recognizers. - -* Added doc for the error node stuff: - http://www.antlr.org/wiki/display/ANTLR3/Tree+construction - -* Fixed ANTLR-193 - -* Added recognizer methods to answer questions about current mismatched - token error. Useful now since i don't automatically recover inline - to such errors (I throw exception): - mismatchIsUnwantedToken(IntStream input, int ttype) - mismatchIsMissingToken(IntStream input, BitSet follow) - -* Added UnwantedTokenException and MissingTokenException to make - match() problems more precise in case you want to catch differently. - Updated getErrorMessage() to be more precise. Says: - - line 2:9 missing EQ at '0' - - now instead of - - line 2:9 mismatched input '0' expecting EQ - - Input "x=9 9;" gives - - line 3:8 extraneous input '9' expecting ';' - - When very confused, "x=9 for;", you still get old mismatched message: - - line 3:8 extraneous input 'for' expecting ';' - line 3:11 mismatched input ';' expecting '(' - -* Added unit tests to TestAutoAST and copied to TestRewriteAST with - suitable rewrites to ensure basic error node insertion works. - -January 11, 2008 - -* Adding errorNode to TreeAdaptor and various debug - events/listeners. Had to add new class runtime.tree.CommonErrorNode - to hold all the goodies: input stream, start/stop objects. - -* Tweaked CommonTree.getType() to return INVALID_TOKEN_TYPE - instead of literal 0 (same thing at moment though). - -* Updated ANTLRWorks to show error nodes in tree as much as I could; Jean - will get to rest of it. - -January 9-10, 2008 - -* Continued work on debugging/profiling composite grammars. - -* Updated debug protocol for debugging composite grammars. enter/exit - rule needs grammar to know when to flip display in AW. - -* Fixed ANTLR-209. ANTLR consumed 2 not 1 char to recover in lexer. - -* Added two faqs instead of making changes to antlr runtime about - lexer error handling: - http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341230 - http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341217 - -January 1-8, 2008 - -* Making debugging/profiling work with composite grammars. - -* Updated ANTLRWorks so it works still for noncomposite grammars. - -* two new examples: import and composite-java (the java example grammar - broken up into multiple pieces using import). - -* Worked on composite grammars. Had to refactor a lot of code to make - ANTLR deal with one meta grammar made up of multiple grammars. I - thought I had it sort of working back in August. Yes, but barely. Lots - of work to do it seemed. Lots of clean up work. Many new unit tests - in TestCompositeGrammars. Had to add new error messages warning about - conflicting tokens inherited from multiple grammars etc... - - TOKEN_ALIAS_CONFLICT(arg,arg2) ::= - "cannot alias ; string already assigned to " - TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::= - "cannot alias ; token name already assigned to " - TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::= - "tokenVocab option ignored in imported grammar " - INVALID_IMPORT(arg,arg2) ::= - " grammar cannot import grammar " - IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::= - "no lexer rules contributed to from imported grammar " - IMPORT_NAME_CLASH(arg,arg2) ::= - "combined grammar and imported grammar both generate ; import ignored" - - This stuff got really really complicated. Syntactic predicate names even - had to be scoped per grammar so they don't conflict. - -* When using subrules like (atom->atom) to set result tree, it was not - properly setting result (early enough). Future code got null for - $rule.tree. - -December 31, 2007 - -* Added the start of a semantic predicate computation for LL(1) to - solve a problem with slow grammar analysis even with k=1 due to - predicates. Then I realized the problem with that grammar was - elsewhere. Semantic context really shouldn't be used when - preventing closure recomputation (May 2008 I discovered I was - wrong--you do need it). The predicates became huge even though the - reduced value would be no different. The analyzer seems faster now - that I am not testing predicate values all the time. Further it may - terminate sooner just due to reduced closure recursion. - -* Moved FIRST/FOLLOW computations to a separate class LL1Analyzer to - tidy up. - -* ANTLR lexer allowed octal escapes, but they didn't work. ;) Rather than - fix, I'm removing. Use '\uxxxx' to get even 8 bit char values: \u00xx. - -December 29, 2007 - -* Fixed ANTLR-206. I wasn't avoiding analyzing decisions in - left-recursive rules. - -* Had to add hetero arg to all tokenRef*() templates. Added _last - local var to track last child so we can do replaceChildren() during - AST rewrite mode for tree grammars. Should be useful later for .text - property. Ack, hetero arg is on lots of templates. :( Moved - ruleCleanUp() template into ASTTreeParser and ASTParser groups. - -* added noRewrite() template (to Java.stg) so we can insert code during - rewrite mode to return original tree if no rewrite. Might be useful - for token rewrites later. For templates too? - -* Had to add if !rewriteMode around tree construction in tree parser - templates. - -* Harald Muller pointed out that we need to use - in our tests for null token/rule property references. For int types - we need 0 not null. (p!=null?p.line:0). Changed scopeAttributeRef, - ruleLabelRef. Also changed the known typed attributes like - lexerRuleLabelPropertyRef_line to yield 0 upon null rule ref to - be consistent with case when we don't know the type. Fixes ANTLR-195. - Added testTypeOfGuardedAttributeRefIsCorrect test and reset expected - output for 13 tests that now "fail". - -December 28, 2007 - -* added polydiff example (Java target) - -* added "int" property for token and lexer rule refs. super convenient. E.g., - a : b=INT {int x = $b.int;} ; - -December 27, 2007 - -* Changed -Xnoinlinedfa to -Xmaxinlinedfastates m where m is - maximum number of states a DFA can have before ANTLR avoids - inlining it. Instead, you get a table-based DFA. This - affectively avoids some acyclic DFA that still have many states - with multiple incident edges. The combinatorial explosion smacks - of infinite loop. Fixes ANTLR-130. - -* [...] are allowed in args now but ] must be escaped as \]. E.g., - a[String[\] ick, int i] : ... ; - And calling a rule: foo[x[i\], 34] - Fixes ANTLR-140. - -* Fixed ANTLR-105. Target.getTargetStringLiteralFromANTLRStringLiteral() - escaped " that were already escaped. - -* target's can now specify how to encode int as char escape. Moved - DFA.encodeIntAsCharEscape to Target. - -* Bug in runtime.DFA. If a special state (one with predicate) failed, it - tried to continue (causing out of range exception due to state = -1) - instead of reporting error. - -* If -dfa with combined grammar T.g, builds T.dec-*.dot and TLexer.dec-*.dot - -* Fix ANTLR-165. - Generate TParser.java and TLexer.java from T.g if combined, else - use T.java as output regardless of type. - BACKWARD INCOMPATIBILITY since file names change. - I changed the examples-v3/java to be consistent. Required XML.g -> - XMLLexer.java and fuzzy/Main.java change. - -* Fix ANTLR-169. Deletes tmp lexer grammar file. - -December 25, 2007 - -* Fixed ANTLR-111. More unit tests in TestAttributes. - -December 25, 2007 - -* Dangling states ("decision cannot distinguish between alternatives - for at least one input sequence") is now an error not a warning. - -* Added sample input sequence that leads to dangling DFA state, one - that cannot reach an accept state. ANTLR ran into a case where - the same input sequence reaches multiple locations in the NFA - (and so not nondeterministic), but analysis ran out of further - NFA states to look for more input. Commonly at EOF target states. - Now says: - - error(202): CS.g:248:95: the decision cannot distinguish between alternative(s) 1,2 for input such as "DOT IDENTIFIER EOF" - - Also fixed bug where dangling states did not resolve to stop states. - -* Fixed ANTLR-123 - -December 17-21, 2007 - -* k=1 doesn't prevent backtracking anymore as in - (options {k=1;}:'else' statement)? - if backtrack=true for overall grammar. Set to false in subrule. - -* Optimized the analysis engine for LL(1). Doesn't attempt LL(*) unless - LL(1) fails. If not LL(1) but autobacktracking but no other kind of - predicate, it also avoids LL(*). This is only important for really - big 4000 line grammars etc... - -* Lots of code clean up - -December 16, 2007 - -* Yet more Kay pair programming. Saved yet more RAM; 15% by - wacking NFA configurations etc in each DFA state after DFA construction. - -* Overall we drop from 2m49s to 1m11s for a huge 4000 line TSQL grammar - with k=*. Only needs -Xconversiontimeout 2000 now not - -Xconversiontimeout 5000 too. With k=1, it's 1m30s down to 40s. - -December 15, 2007 - -* Working with Kay Roepke, we got about 15% speed improvement in - overall ANTLR exec time. Memory footprint seems to be about 50% - smaller. - -December 13-14, 2007 - -* I abort entire DFA construction now when I see recursion in > 1 alt. - Decision is non-LL(*) even if some pieces are LL(*). Safer to bail - out and try with fixed k. If user set fixed k then it continues because - analysis will eventually terminate for sure. If a pred is encountered - and k=* and it's non-LL(*), it aborts and retries at k=1 but does NOT - emit an error. - -* Decided that recursion overflow while computing a lookahead DFA is - serious enough that I should bail out of entire DFA computation. - Previously analysis tried to keep going and made the rules about - how analysis worked more complicated. Better to simply abort when - decision can't be computed with current max stack (-Xm option). - User can adjust or add predicate etc... This is now an error - not a warning. - -* Recursion overflow and unreachable alt is now a fatal error; no code gen. - The decision will literally not work. - -* Cleaned up how DFA construction/analysis aborts due to non-LL(*) and - overflow etc... Throws exceptions now, which cleans up a bunch of IF - checks etc... Very nice now. Exceptions: - analysis/AnalysisRecursionOverflowException.java - analysis/AnalysisTimeoutException.java - analysis/NonLLStarDecisionException.java - -* ErrorManager.grammarWarning() counted them as errors not warnings. - -* Unreachable alt warnings are now errors. - -* The upshot of these changes is that I fixed ANTLR-178 and did - lots of refactoring of code handling analysis failure. - -December 11, 2007 - -* Could not deal with spaces, oddly enough in arg lists: - grammar Bad; - a : A b["foo", $A.text] ; - b[String x, String y] : C ; - -October 28, 2007 - -* Made ANTLR emit a better error message when it cannot write the - implicit lexer file from a combined grammar. Used to say "cannot open - file", now says "cannot write file" and gives backtrace. - -September 15, 2007 - -add getCharStream to Lexer. - -September 10, 2007 - -* Added {{...}} forced action executed even during backtracking. - -September 9, 2007 - -* r='string' in lexer got a duplicate label definition. - -August 21, 2007 - -* $scope::variable refs now check for empty stack so that expr == null if - $scope has an empty stack. Works for $scope[...]::variable too. Nice! - -August 20, 2007 - -* Added reset() to CommonTreeNodeStream, token stream too - -* Made refs to rule/token properties use ?: to avoid null ptr exception. - $label.st now is label!=null?label.st:null. Updated TestAttributes. - This is useful not only for optional rule/token refs, but also during - error recovery. If ID is not matched, $ID.text won't cause a null ptr. - -August 20, 2007 -* Fixed ANTLR-177: hashCode/equals not consistent for label - Fixed bug where Rule was compared to string; introduced from dev branch - -August 15, 2007 -- Got rough draft of the grammar import working. - Submit to dev and then integrate into mainline. - - All file changes/additions: - - README.txt # edit - CHANGES.txt # add - Factored out the changes from the readme. - - runtime/Java/src/org/antlr/runtime/BaseRecognizer.java # edit - runtime/Java/src/org/antlr/runtime/DFA.java # edit - runtime/Java/src/org/antlr/runtime/Lexer.java # edit - runtime/Java/src/org/antlr/runtime/Parser.java # edit - runtime/Java/src/org/antlr/runtime/debug/DebugParser.java # edit - runtime/Java/src/org/antlr/runtime/tree/TreeParser.java # edit - Factored state fields into RecognizerSharedState - object. You will see a lot of things like - state.errorRecovery = false; - runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java # add - Shares all recognizer state variables including lexer even though - these are superfluous to parsers and tree parsers. There - was a casting issue that I could not resolve. - - src/org/antlr/Tool.java # edit - Broke a part Grammar.setGrammarContent() into - parseAndBuildAST() and analyzeGrammar() to make the grammar - import work. I needed to be able to look at the trees for - imported grammars before analyzing them and building DFA. Added - use of the CompositeGrammar object and handling of multiple - delegate grammars. Changed decision DFA DOT file names to - include the grammar name. - - src/org/antlr/analysis/DFA.java # edit - Just tweaked to use generics, updated a comment. - - src/org/antlr/analysis/DecisionProbe.java # edit - Just tweaked to use generics. - - src/org/antlr/analysis/NFA.java # edit - NFA now span multiple grammars and so I moved the NFAs state - tracking to the composite grammar object. - - src/org/antlr/analysis/NFAState.java # edit - Added some null checking and made a field public. - - src/org/antlr/analysis/NFAToDFAConverter.java # edit - Changed a method call to directly access a field. - - src/org/antlr/analysis/RuleClosureTransition.java # edit - Instead of using a rule index, which does not span multiple - grammars, the transition object now attracts a pointer to - the actual Rule definition object. - - src/org/antlr/analysis/SemanticContext.java # edit - Tweaked to use a field instead of a method - - src/org/antlr/codegen/ActionTranslator.g # edit - src/org/antlr/codegen/ActionTranslatorLexer.java # edit - Tweaked to use new runtime and they changed method name. - - src/org/antlr/codegen/CodeGenerator.java # edit - Tweaked comments. - - src/org/antlr/codegen/codegen.g # edit - Added import grammar syntax and altered rule atom to pass a - scope around so that grammar.rule works. Caution this - feature is used internally by ANTLR and is not meant to be - used by users at this point. - - src/org/antlr/codegen/templates/ANTLRCore.sti # edit - Added scope to all ruleref template interfaces. - - src/org/antlr/codegen/templates/Java/Java.stg # edit - Grammars can now import other grammars, which I implemented - using a delegation pointer to the other grammar(s). So if - grammar A imports grammars B and C, then the generated - recognizer for A must have delegation pointers to BParser - and CParser objects. These are now fields: - - // delegates - Lexer ;}; separator="\n"> - - Also, B and C must have back pointers to the delegator so - that they can refer to rules that have been overridden. - This is a mechanism akin to static inheritance: - - // delegators - Lexer ;}; separator="\n"> - - This file also has a lot of changes so that state variables - now are state.backtracking instead of the implied - this.backtracking. - - The file also refers to grammar.delegatedRules attribute - which is the list of Rule objects for which you must - generate manual delegation. This amounts to a stub whereby - rule foo's method foo() simply calls X.foo() if foo is not - defined inside the delegator. - - You will notice that the ruleref templates now take a scope - so that I can have implicit rule Tokens referred to - delegate.Tokens rule in a delegate grammar. This is the way - I do lexer grammar imports. - - I added a template called delegateName which uses the - grammar name to compute a delegate name if the user does not - specify a label in the import statement such as: - - import x=X; - - Oh, note that rule reference templates all receive a Rule - object now instead of the simple rule name as the 'rule' - attribute. You will see me doing instead of - now. - - src/org/antlr/codegen/templates/Java/Dbg.stg # edit - Changes mirroring the constructor and field stuff from - Java.stg. Part of this is a cut and paste because of a bug - in ST. - - src/org/antlr/codegen/templates/Java/AST.stg # edit - src/org/antlr/codegen/templates/Java/ASTParser.stg # edit - src/org/antlr/codegen/templates/Java/ASTTreeParser.stg # edit - Just added the scope attribute. - - src/org/antlr/test/BaseTest.java # edit - Added functionality to support testing composite grammars. - execLexer() - - src/org/antlr/test/TestAttributes.java # edit - Tweak to deal with shared recognizer state. - - src/org/antlr/test/TestCompositeGrammars.java # add - Start of my unit tests. - - src/org/antlr/tool/CompositeGrammar.java # add - src/org/antlr/tool/CompositeGrammarTree.java # add - Tracks main grammar and all delegate grammars. Tracks unique - NFA state numbers and unique token types. This keeps a tree - of grammars computed from the import/delegation chain. When - you want to look up a rule, it starts at the root of the - tree and does a pre-order search to find the rule. - - src/org/antlr/tool/ActionAnalysis.g # edit - src/org/antlr/tool/ActionAnalysisLexer.java # edit - - src/org/antlr/tool/AttributeScope.java # edit - Updated to use generics in one place. - - src/org/antlr/tool/DOTGenerator.java # edit - Updated to indicate when nonlocal rules are referenced. - - src/org/antlr/tool/ErrorManager.java # edit - Added some error messages for import grammars; I need more. - - src/org/antlr/tool/FASerializer.java # edit - Tweaked to use a field not method. - - src/org/antlr/tool/Grammar.java # edit - This is where most of the meat is for the grammar import - stuff as you can imagine. I factored out the token type - tracking into the CompositeGrammar object. I added code to - the addArtificialMatchTokensRule method so that it includes - references to all delegate lexer Tokens rules. Altered the - rule lookup stuff so that it knows about delegate grammars. - - src/org/antlr/tool/GrammarAST.java # edit - src/org/antlr/tool/GrammarAnalysisAbortedMessage.java # edit - src/org/antlr/tool/GrammarReport.java # edit - src/org/antlr/tool/NonRegularDecisionMessage.java # edit - Made enclosing rule visible as field. - - src/org/antlr/tool/GrammarSanity.java # edit - General cleanup and addition of generics. - - src/org/antlr/tool/Interpreter.java # edit - Reference fields instead of methods. - - src/org/antlr/tool/NFAFactory.java # edit - General cleanup and use of Rule object instead of rule - index. - - src/org/antlr/tool/NameSpaceChecker.java # edit - A little bit of cleanup and changes to use either the local - or globally visible rule. Added code to check that scopes - are valid on scoped rule references. again this is an - internal feature, not to be used by users. - - src/org/antlr/tool/RandomPhrase.java # edit - Tweaked. - - src/org/antlr/tool/Rule.java # edit - Added field imported. Removed some unused methods by - commenting them out. Made toString() more expressive. - - src/org/antlr/tool/antlr.g # edit - src/org/antlr/tool/antlr.print.g # edit - src/org/antlr/tool/assign.types.g # edit - src/org/antlr/tool/buildnfa.g # edit - src/org/antlr/tool/define.g # edit - Added syntax for import statement. assign.types.g is the - grammar that invokes Grammar.importGrammar(). - - src/org/antlr/tool/templates/messages/languages/en.stg # edit - Added error messages. - - Added - - CHANGES.txt - runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java - src/org/antlr/test/TestCompositeGrammars.java - src/org/antlr/tool/CompositeGrammar.java - src/org/antlr/tool/CompositeGrammarTree.java - -3.0.1 - August 13, 2007 - -[See target pages on the wiki for more information on the non-Java targets] - -August 7, 2007 - -* added escaping of double quotes in DOTTreeGenerator - -July 22, 2007 - -* fixed dynamic scope implementation in lexers. They were not creating new scope - entries on the stack. Unsupported feature! - -July 30, 2007 - -* float return values were initalized to 0.0 not 0.0f in java. - -July 28, 2007 - -* Sam Ellis points out an init var bug in ANTLRReaderStream. - -July 27, 2007 (done in dev branch) - -* Moved token type index stuff from CommonTreeNodeStream to TreeWizard - -* Added getChildren to BaseTree. - -* Added heterogeneous tree functionality; rewrite for parser/tree parser - and auto AST constr. for parser. - - org/antlr/runtime/tree/RewriteRuleElementStream.java - org/antlr/runtime/tree/RewriteRuleNodeStream.java - org/antlr/runtime/tree/RewriteRuleTokenStream.java - Renamed method next() and re-factor things to have more - specific methods: nextToken, nextNode, nextTree. - - codegen/codegen.g - Updated to include new AST structure for - token references. Pushed hereto attribute into - all tokenRef* templates. - codegen/templates/Java/AST.stg - Factored out a few templates: - createImaginaryNode(tokenType,hetero,args) - createRewriteNodeFromElement(token,hetero,args) - Converted a lot of stream next() calls to more specific - nextToken, nextNode, nextTree per above. - codegen/templates/Java/ASTParser.stg - Added createNodeFromToken template and re-factored creation - sites to use that template. Added hetero attribute. - codegen/templates/Java/ASTTreeParser.stg - Added createRewriteNodeFromElement template and re-factored. - - test/TestHeteroAST.java - New file. Unit tests to test new hetero tree construction. - test/TestRewriteAST.java - Fixed test. Nil single-node trees no longer return nil; - They return null. - - tool/ErrorManager.java - tool/templates/messages/languages/en.stg - Added error message: - HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::= - "alts with rewrites can't use heterogeneous types left of ->" - - tool/antlr.g - tool/antlr.print.g - tool/assign.types.g - tool/buildnfa.g - tool/define.g - Added syntax for to token references. - Altered AST structure rippled through different phases. - -July 24, 2007 - -* Deleted DoubleLinkTree.java; CommonTree does that now. - -July 23, 2007 - -* template group outputFile; changed rewrite arg to rewriteMode. - -* added rewrite mode for tree parser build AST. - -July 22, 2007 - -* Kay fixed dynamic scope implementation in lexers. They were not - creating new scope entries on the stack. This is an UNSUPPORTED feature. - -* added getParent and getChildIndex to TreeAdaptor. Added - implementation to CommonTree. It's just too useful having those - parent and child indexes available for rewriting etc... I tried 2x - to make an implementation of tree rewriting w/o this and the - constraints just made it too expensive and complicated. Have to - update adaptors to set parent, child index values. Updated Tree - interface and BaseTree also. Should only affect target developers - not users. Well, unless they impl Tree. - -* dupNode (via ctor) of CommonTree didn't copy start/stop token indexes. - -TARGET DEVELOPERS WARNING -- AST.stg split with some functionality - going into ASTParser.stg then I added - ASTTreeParser.stg. CodeGenerator - assumes new subgroups exist. - -July 20, 2007 - -* Added AST construction for tree parsers including -> rewrite rules. - Rewrite mode (rewrite=true) alters the tree in place rather than - constructing a whole new tree. Implementation notes: - - org/antlr/runtime/tree/Tree.java - Add methods for parent and child index functionality. - Also added freshenParentAndChildIndexes() which you can use - to ensure that all double linking is set up right after you - manipulate the tree manually. The setChild preteens etc. do - the proper thing so you shouldn't need this. - Added replaceChildren() to support tree rewrite mode in tree parsers - org/antlr/runtime/tree/BaseTree.java - Updated to set parent and child index stuff. Added replaceChildren - method etc... It still only has a list of children as sole field - but calls methods that subclasses can choose to implement such as - CommonTree. - org/antlr/runtime/tree/CommonTree.java - Added parent and childIndex fields to doubly link. - org/antlr/runtime/tree/TreeAdaptor.java - Added methods for new parent and child index functionality. - Also added method for rewrite mode in tree parsers: - replaceChildren(Object parent, int startChildIndex, - int stopChildIndex, Object t); - Added setChild and deleteChild methods - org/antlr/runtime/tree/BaseTreeAdaptor.java - Moved dupTree here from BaseTree. - Updated rulePostProcessing to deal with parent and child index. - Added setChild and deleteChild implementations - org/antlr/runtime/tree/CommonTreeAdaptor.java - Added methods to deal with the parent and child index for a node. - - org/antlr/runtime/tree/CommonTreeNodeStream.java - Removed token type index and method fillReverseIndex etc... - Probably will move into the tree wizard in the future. - Changed call/seek stack implementation to use IntArray - Added replaceChildren interface. - org/antlr/runtime/tree/TreeNodeStream.java - Added replaceChildren. - org/antlr/runtime/tree/UnBufferedTreeNodeStream.java - Added replaceChildren method but no implementation - - codegen/templates/ANTLRCore.sti - Changed rewrite to a better name: rewriteMode - Added tree level argument to alt, tree so that auto AST - construction can occur while recognizing in tree parsers. - - codegen/templates/Java/AST.stg - Split template group: added two subclasses to handle different - functionality for normal parsing and tree parsing + AST - construction. Tree parsers default behavior is to dup tree - not construct another. Added ASTParser.stg and - ASTTreeParser.stg to handle auto AST construction during - recognition for the two different parser types. I just copied - the token, rule, set, wildcard templates to the subclasses. - The rewrite templates are still in AST.stg. I factored out the - node creation so that the same rewrite templates can be used - for both parsing and tree parsing. - - codegen/templates/Java/ASTParser.stg - The templates needed to build trees with auto construction - during parsing. - codegen/templates/Java/ASTTreeParser.stg - The templates needed to build trees with auto construction - during tree parsing. - codegen/templates/Java/Java.stg - genericParser now has rewriteElementType (Note or Token) so - that the rewrite streams know what kind of elements are inside - during rewrite rule tree construction. - codegen/templates/Java/ST.stg - rewrite attribute name change to rewriteMode - - org/antlr/runtime/debug/DebugTreeAdaptor.java - org/antlr/runtime/debug/DebugTreeNodeStream.java - Updated to handle new interfaces - - test/BaseTest.java - Added test rig update to handle AST construction by tree parsers. - All tree construction runs automatically test sanity of parent - and child indexes. - test/TestTreeGrammarRewriteAST.java - test/TestTreeNodeStream.java - test/TestTrees.java - new file; tests the new parent and child index stuff in trees. - -July 19, 2007 - -* implemented new unique ID; GC was causing non unique hash codes. Debugging - tree grammars was messing up. - -* got tree rewrites working in tree grammars. It builds a completely new - tree from old tree; i.e., you get two trees in memory. W/o a rewrite - rule, the input for that rule is duplicated and returned. -> w/o elements - to the right means don't return anything; i.e., delete. Ooops...way - harder than I thought. Real implementation notes above. - -INCOMPATIBILITY WARNING -- templates have changed; must regen output from - grammars. Runtime libraries have also changed. - Debug event listener interface has changed also. - -July 17, 2007 - -* Added line/charposition to node socket events and event dump so - we have more info during tree parsing. Only works if your - tree adaptor returns a value Token object from getToken(treenode) - with line/col set. Refactored consumeNode/LN to use deserializeNode(). - -* Fixed mismatched tree node exceptions; for imaginary nodes, it said - "missing null". Now prints the token type we found. - -* Cleaned up exception stuff. MismatchedTreeNodeException was setting - line/col, but only RecognitionException should do that. - -* If imaginary token gets a mismatch, there is no line info. Search - backwards in stream if input node stream supports to find last - node with good line/col info. E.g., - -ANTLRv3Tree.g: node from after line 156:72 mismatched tree node: EOA expecting - - which used to be: - -ANTLRv3Tree.g: node from line 0:0 mismatched tree node: null expecting - -* mismatched tree node exceptions were not sent to the debug event stream. - Due to a type being slightly different on recoverFromMismatchedToken() - in DebugTreeParser. Was calling BaseRecognizer version not subclass. - Now we get: - - 9459: Recognition exception MismatchedTreeNodeException(0!=0) - -* List labels were not allowed as root nodes in tree rewrites like - ^($listlabel ...). Had to add a template to AST.stg: - - /** Gen ^($label ...) where label+=... */ - rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot - - -July 16, 2007 - -* fixed nextNode in RewriteRuleSubtreeStream was dup'ing too much, - screwing up debug event stream. Also there was a bug in how - the rewrite tree stream stuff decided to dup nodes. - -* fixed bug in LT for tree parsing; text was not transmitted properly; - only single words worked. - -* made decision for rule put line/col on colon not first token of first alt. - -* remote ProxyToken now emits token index for easier debugging when looking - at AW's event stream. For example, the @5 here is the token index: - - 31 Consume hidden [ /<64>,channel=99,30:7, @5] - -* same is true for consume nodes now: - - 25586 Consume node [')'/, <44>, 4712040,@1749] 25 - - When debugging tree parsers, it helps to track errors when you know - what corresponding input symbol created this tree node. - -* Changed debug events associated with trees quite a bit. Passes nodes around - now rather than text, type, unique IDs etc... Mostly affects internal stuff. - Target developers will have some work in their runtime to do to match - this change. :( BUT, there is only a slight tweak in the Dbg.stg - and ASTDbg.stg templates. - Interface just didn't make sense as is. If you turn on debugging, and - want to track a node creation, you want the node pointer not its ID, - text, etc... - Added ProxyTree for passing across socket. Has line/charpos and tokenIndex - -July 15, 2007 - -* added null ptr protection in CommonTreeAdaptor. - -July 14, 2007 - -* null child in TreeAdaptor does nothing now. Changed interface and - implementation. Changed DebugTreeAdaptor to not fire events on null add - as well. - -July 12, 2007 - -* added get method for the line/col to DFA map in Grammar.java - -July 7, 2007 - -* fixed wrong order of test for exceptions in Lexer.getErrorMessage() - -June 28, 2007 - -* Added ability to set the port number in the constructor for the debug parser. - -June 5, 2007 - -* Changed (hidden) option -verbose to -Xnfastates; this just prints out the NFA states along each nondeterministic path for nondeterminism warnings. - -May 18, 2007 - -* there were some dependencies with org.antlr.* that I removed from - org.antlr.runtime.* - -3.0 final - May 17, 2007 - -May 14, 2007 - -* Auto backtracking didn't work with ! and ^ suffixes on first element - of an alt. - -* Auto backtracking didn't work with an action as first element. - -May 10, 2007 - -* turn off the warning about no local messages: - no such locale file org/antlr/tool/templates/messages/languages/ru.stg retrying with English locale - -May 5, 2007 - -* moving org.antlr.runtime to runtime/Java/src/org/... Other target - source / libs are under runtime/targetname. - -May 4, 2007 - -* You could not use arguments on a token reference that was a route in a - tree rewrite rule like -> ^(ID[args] ...). - -May 3, 2007 - -* Fixed ANTLR-82. Actions after the root were considered part of - an optional child. They were not always executed. Required a change - to the ANTLRCore.sti interface for tree() template. - -May 2, 2007 - -* Fixed ANTLR-117. Wasn't building decisions properly for subrules in - syntactic predicates. - -April 22, 2007 - -* Made build.xml ref all jars in antlr lib. Thanks to Miguel Ping. - -* Fixed ANTLR-11 - -* Now labels on ranges and such in lexer work properly. - -* ActionAnalysisLexer was in wrong package. - -April 21, 2007 - -* Pushing a huge update that fixes: - http://www.antlr.org/browse/ANTLR-112 - http://www.antlr.org/browse/ANTLR-110 - http://www.antlr.org/browse/ANTLR-109 - http://www.antlr.org/browse/ANTLR-103 - http://www.antlr.org/browse/ANTLR-97 - http://www.antlr.org/browse/ANTLR-113 - http://www.antlr.org/browse/ANTLR-66 - http://www.antlr.org/browse/ANTLR-98 - http://www.antlr.org/browse/ANTLR-24 - http://www.antlr.org/browse/ANTLR-114 - http://www.antlr.org/browse/ANTLR-5 - http://www.antlr.org/browse/ANTLR-6 - - Basically, I gutted the way AST rewrites work. MUCH better. - -* Fixed lots of little label issues in the lexer. Couldn't do x+=ID - in lexer, for example. Fixed ANTLR-114, ANTLR-112 - -* Isolated EOT transition in lexer generated dangling else clause. - Fixed ANTLR-113. - -April 17, 2007 - -* Fixed a major problem with gated semantic predicates. Added more - unit tests. - -* Fixed bug in cyclic DFA with syntactic predicates. Wasn't rewinding - properly. Further, mark() in token stream did not fill buffer so - when you rewound back to last marker index was -1 not 0. At same time - I fixed ANTLR-103. Syn preds evaluated only once now. - -* Altered code gen file writing so it writes directly to a file - instead of building a big string and then writing that out. Should - be faster and much less memory intensive. - -* Fixed so antlr writes files to correct location again. See: - -http://www.antlr.org/wiki/pages/viewpage.action?pageId=1862 - -3.0b7 - April 12, 2007 - -April 10, 2007 - -* Allows -> {...} actions now when building ASTs. Fixed ANTLR-14. - -* Allows ! on sets and wildcard now during output=AST option. Fixed ANTLR-17. - -* Fixed ANTLR-92 bug. Couldn't use sets with -> tree construction. - -* No lexer rule for a token type is now a warning. - -* Fixed set labels in lexer; ANTLR-60 bug - -* Fixed problem with duplicate state variable definitions in switch-case - -April 9, 2007 - -* Gated predicates didn't work properly in cyclic DFA. - -April 7, 2007 - -* Couldn't have more than one set per rule it seems. Fixed. - -April 3, 2007 - -* Fix a problem in my unused label optimization. Added new - pass over actions to examine them. - -* RuleReturnScope has method back: - /** Has a value potentially if output=template; Don't use StringTemplate - * type as it then causes a dependency with ST lib. - */ - public Object getTemplate() { return null; } - -March 30, 2007 - -* Fixed ANTLR-8. Labels to rules w/o return values caused compile errors. - -* Fixed ANTLR-89; semantic predicates in lexer sometimes - caused exception in code gen. - -* Fixed ANTLR-36; remove runtime dependency with ST - -March 29, 2007 - -* Over last few days, I've gutted how ANTLR handles sets of chars or - tokens. I cleaned up a lot of stuff in the grammars and added lots - of unit tests. - -March 26, 2007 - -* CommonTreeNodeStream didn't push correctly; couldn't handle very - deeply nested trees. - -* Fixed bug that E : 'a' 'b' ; made E be seen as an alias of 'a'. - -March 22, 2007 - -* Working with Egor Ushakov from Sun Optimization / NetBeans team I - made all the Java lexer transition tables static w/o screwing up - ability to reference semantic predicates etc... Only changed Java.stg - -* cached text string in CommonToken.getText(); saves on repeated calls; - Java mode. - -* made all generated methods final; saves a few percent speed according to - Egor Ushakov (Java only). - -* removed most assignments from each lexer rule and even the Lexer.emit() - call! All done in nextToken now. Saves on code gen size and a wee bit of - execution speed probably. Variables became fields: type, channel, line, - etc... Now emit() needs no args even. Again, Egor helped on this. - -March 17, 2007 - -* Jonathan DeKlotz updated C# templates to be 3.0b6 current - -March 14, 2007 - -* Manually-specified (...)=> force backtracking eval of that predicate. - backtracking=true mode does not however. Added unit test. - -March 14, 2007 - -* Fixed bug in lexer where ~T didn't compute the set from rule T. - -* Added -Xnoinlinedfa make all DFA with tables; no inline prediction with IFs - -* Fixed http://www.antlr.org/browse/ANTLR-80. - Sem pred states didn't define lookahead vars. - -* Fixed http://www.antlr.org/browse/ANTLR-91. - When forcing some acyclic DFA to be state tables, they broke. - Forcing all DFA to be state tables should give same results. - -March 12, 2007 - -* setTokenSource in CommonTokenStream didn't clear tokens list. - setCharStream calls reset in Lexer. - -* Altered -depend. No longer printing grammar files for multiple input - files with -depend. Doesn't show T__.g temp file anymore. Added - TLexer.tokens. Added .h files if defined. - -February 11, 2007 - -* Added -depend command-line option that, instead of processing files, - it shows you what files the input grammar(s) depend on and what files - they generate. For combined grammar T.g: - - $ java org.antlr.Tool -depend T.g - - You get: - - TParser.java : T.g - T.tokens : T.g - T__.g : T.g - - Now, assuming U.g is a tree grammar ref'd T's tokens: - - $ java org.antlr.Tool -depend T.g U.g - - TParser.java : T.g - T.tokens : T.g - T__.g : T.g - U.g: T.tokens - U.java : U.g - U.tokens : U.g - - Handles spaces by escaping them. Pays attention to -o, -fo and -lib. - Dir 'x y' is a valid dir in current dir. - - $ java org.antlr.Tool -depend -lib /usr/local/lib -o 'x y' T.g U.g - x\ y/TParser.java : T.g - x\ y/T.tokens : T.g - x\ y/T__.g : T.g - U.g: /usr/local/lib/T.tokens - x\ y/U.java : U.g - x\ y/U.tokens : U.g - - You have API access via org.antlr.tool.BuildDependencyGenerator class: - getGeneratedFileList(), getDependenciesFileList(). You can also access - the output template: getDependencies(). The file - org/antlr/tool/templates/depend.stg contains the template. You can - modify as you want. File objects go in so you can play with path etc... - -February 10, 2007 - -* no more .gl files generated. All .g all the time. - -* changed @finally to be @after and added a finally clause to the - exception stuff. I also removed the superfluous "exception" - keyword. Here's what the new syntax looks like: - - a - @after { System.out.println("ick"); } - : 'a' - ; - catch[RecognitionException e] { System.out.println("foo"); } - catch[IOException e] { System.out.println("io"); } - finally { System.out.println("foobar"); } - - @after executes after bookkeeping to set $rule.stop, $rule.tree but - before scopes pop and any memoization happens. Dynamic scopes and - memoization are still in generated finally block because they must - exec even if error in rule. The @after action and tree setting - stuff can technically be skipped upon syntax error in rule. [Later - we might add something to finally to stick an ERROR token in the - tree and set the return value.] Sequence goes: set $stop, $tree (if - any), @after (if any), pop scopes (if any), memoize (if needed), - grammar finally clause. Last 3 are in generated code's finally - clause. - -3.0b6 - January 31, 2007 - -January 30, 2007 - -* Fixed bug in IntervalSet.and: it returned the same empty set all the time - rather than new empty set. Code altered the same empty set. - -* Made analysis terminate faster upon a decision that takes too long; - it seemed to keep doing work for a while. Refactored some names - and updated comments. Also made it terminate when it realizes it's - non-LL(*) due to recursion. just added terminate conditions to loop - in convert(). - -* Sometimes fatal non-LL(*) messages didn't appear; instead you got - "antlr couldn't analyze", which is actually untrue. I had the - order of some prints wrong in the DecisionProbe. - -* The code generator incorrectly detected when it could use a fixed, - acyclic inline DFA (i.e., using an IF). Upon non-LL(*) decisions - with predicates, analysis made cyclic DFA. But this stops - the computation detecting whether they are cyclic. I just added - a protection in front of the acyclic DFA generator to avoid if - non-LL(*). Updated comments. - -January 23, 2007 - -* Made tree node streams use adaptor to create navigation nodes. - Thanks to Emond Papegaaij. - -January 22, 2007 - -* Added lexer rule properties: start, stop - -January 1, 2007 - -* analysis failsafe is back on; if a decision takes too long, it bails out - and uses k=1 - -January 1, 2007 - -* += labels for rules only work for output option; previously elements - of list were the return value structs, but are now either the tree or - StringTemplate return value. You can label different rules now - x+=a x+=b. - -December 30, 2006 - -* Allow \" to work correctly in "..." template. - -December 28, 2006 - -* errors that are now warnings: missing AST label type in trees. - Also "no start rule detected" is warning. - -* tree grammars also can do rewrite=true for output=template. - Only works for alts with single node or tree as alt elements. - If you are going to use $text in a tree grammar or do rewrite=true - for templates, you must use in your main: - - nodes.setTokenStream(tokens); - -* You get a warning for tree grammars that do rewrite=true and - output=template and have -> for alts that are not simple nodes - or simple trees. new unit tests in TestRewriteTemplates at end. - -December 27, 2006 - -* Error message appears when you use -> in tree grammar with - output=template and rewrite=true for alt that is not simple - node or tree ref. - -* no more $stop attribute for tree parsers; meaningless/useless. - Removed from TreeRuleReturnScope also. - -* rule text attribute in tree parser must pull from token buffer. - Makes no sense otherwise. added getTokenStream to TreeNodeStream - so rule $text attr works. CommonTreeNodeStream etc... now let - you set the token stream so you can access later from tree parser. - $text is not well-defined for rules like - - slist : stat+ ; - - because stat is not a single node nor rooted with a single node. - $slist.text will get only first stat. I need to add a warning about - this... - -* Fixed http://www.antlr.org/browse/ANTLR-76 for Java. - Enhanced TokenRewriteStream so it accepts any object; converts - to string at last second. Allows you to rewrite with StringTemplate - templates now :) - -* added rewrite option that makes -> template rewrites do replace ops for - TokenRewriteStream input stream. In output=template and rewrite=true mode - same as before 'cept that the parser does - - ((TokenRewriteStream)input).replace( - ((Token)retval.start).getTokenIndex(), - input.LT(-1).getTokenIndex(), - retval.st); - - after each rewrite so that the input stream is altered. Later refs to - $text will have rewrites. Here's a sample test program for grammar Rew. - - FileReader groupFileR = new FileReader("Rew.stg"); - StringTemplateGroup templates = new StringTemplateGroup(groupFileR); - ANTLRInputStream input = new ANTLRInputStream(System.in); - RewLexer lexer = new RewLexer(input); - TokenRewriteStream tokens = new TokenRewriteStream(lexer); - RewParser parser = new RewParser(tokens); - parser.setTemplateLib(templates); - parser.program(); - System.out.println(tokens.toString()); - groupFileR.close(); - -December 26, 2006 - -* BaseTree.dupTree didn't dup recursively. - -December 24, 2006 - -* Cleaned up some comments and removed field treeNode - from MismatchedTreeNodeException class. It is "node" in - RecognitionException. - -* Changed type from Object to BitSet for expecting fields in - MismatchedSetException and MismatchedNotSetException - -* Cleaned up error printing in lexers and the messages that it creates. - -* Added this to TreeAdaptor: - /** Return the token object from which this node was created. - * Currently used only for printing an error message. - * The error display routine in BaseRecognizer needs to - * display where the input the error occurred. If your - * tree of limitation does not store information that can - * lead you to the token, you can create a token filled with - * the appropriate information and pass that back. See - * BaseRecognizer.getErrorMessage(). - */ - public Token getToken(Object t); - -December 23, 2006 - -* made BaseRecognizer.displayRecognitionError nonstatic so people can - override it. Not sure why it was static before. - -* Removed state/decision message that comes out of no - viable alternative exceptions, as that was too much. - removed the decision number from the early exit exception - also. During development, you can simply override - displayRecognitionError from BaseRecognizer to add the stuff - back in if you want. - -* made output go to an output method you can override: emitErrorMessage() - -* general cleanup of the error emitting code in BaseRecognizer. Lots - more stuff you can override: getErrorHeader, getTokenErrorDisplay, - emitErrorMessage, getErrorMessage. - -December 22, 2006 - -* Altered Tree.Parser.matchAny() so that it skips entire trees if - node has children otherwise skips one node. Now this works to - skip entire body of function if single-rooted subtree: - ^(FUNC name=ID arg=ID .) - -* Added "reverse index" from node to stream index. Override - fillReverseIndex() in CommonTreeNodeStream if you want to change. - Use getNodeIndex(node) to find stream index for a specific tree node. - See getNodeIndex(), reverseIndex(Set tokenTypes), - reverseIndex(int tokenType), fillReverseIndex(). The indexing - costs time and memory to fill, but pulling stuff out will be lots - faster as it can jump from a node ptr straight to a stream index. - -* Added TreeNodeStream.get(index) to make it easier for interpreters to - jump around in tree node stream. - -* New CommonTreeNodeStream buffers all nodes in stream for fast jumping - around. It now has push/pop methods to invoke other locations in - the stream for building interpreters. - -* Moved CommonTreeNodeStream to UnBufferedTreeNodeStream and removed - Iterator implementation. moved toNodesOnlyString() to TestTreeNodeStream - -* [BREAKS ANY TREE IMPLEMENTATION] - made CommonTreeNodeStream work with any tree node type. TreeAdaptor - now implements isNil so must add; trivial, but does break back - compatibility. - -December 17, 2006 - -* Added traceIn/Out methods to recognizers so that you can override them; - previously they were in-line print statements. The message has also - been slightly improved. - -* Factored BuildParseTree into debug package; cleaned stuff up. Fixed - unit tests. - -December 15, 2006 - -* [BREAKS ANY TREE IMPLEMENTATION] - org.antlr.runtime.tree.Tree; needed to add get/set for token start/stop - index so CommonTreeAdaptor can assume Tree interface not CommonTree - implementation. Otherwise, no way to create your own nodes that satisfy - Tree because CommonTreeAdaptor was doing - - public int getTokenStartIndex(Object t) { - return ((CommonTree)t).startIndex; - } - - Added to Tree: - - /** What is the smallest token index (indexing from 0) for this node - * and its children? - */ - int getTokenStartIndex(); - - void setTokenStartIndex(int index); - - /** What is the largest token index (indexing from 0) for this node - * and its children? - */ - int getTokenStopIndex(); - - void setTokenStopIndex(int index); - -December 13, 2006 - -* Added org.antlr.runtime.tree.DOTTreeGenerator so you can generate DOT - diagrams easily from trees. - - CharStream input = new ANTLRInputStream(System.in); - TLexer lex = new TLexer(input); - CommonTokenStream tokens = new CommonTokenStream(lex); - TParser parser = new TParser(tokens); - TParser.e_return r = parser.e(); - Tree t = (Tree)r.tree; - System.out.println(t.toStringTree()); - DOTTreeGenerator gen = new DOTTreeGenerator(); - StringTemplate st = gen.toDOT(t); - System.out.println(st); - -* Changed the way mark()/rewind() work in CommonTreeNode stream to mirror - more flexible solution in ANTLRStringStream. Forgot to set lastMarker - anyway. Now you can rewind to non-most-recent marker. - -December 12, 2006 - -* Temp lexer now end in .gl (T__.gl, for example) - -* TreeParser suffix no longer generated for tree grammars - -* Defined reset for lexer, parser, tree parser; rewinds the input stream also - -December 10, 2006 - -* Made Grammar.abortNFAToDFAConversion() abort in middle of a DFA. - -December 9, 2006 - -* fixed bug in OrderedHashSet.add(). It didn't track elements correctly. - -December 6, 2006 - -* updated build.xml for future Ant compatibility, thanks to Matt Benson. - -* various tests in TestRewriteTemplate and TestSyntacticPredicateEvaluation - were using the old 'channel' vs. new '$channel' notation. - TestInterpretedParsing didn't pick up an earlier change to CommonToken. - Reported by Matt Benson. - -* fixed platform dependent test failures in TestTemplates, supplied by Matt - Benson. - -November 29, 2006 - -* optimized semantic predicate evaluation so that p||!p yields true. - -November 22, 2006 - -* fixed bug that prevented var = $rule.some_retval from working in anything - but the first alternative of a rule or subrule. - -* attribute names containing digits were not allowed, this is now fixed, - allowing attributes like 'name1' but not '1name1'. - -November 19, 2006 - -* Removed LeftRecursionMessage and apparatus because it seems that I check - for left recursion upfront before analysis and everything gets specified as - recursion cycles at this point. - -November 16, 2006 - -* TokenRewriteStream.replace was not passing programName to next method. - -November 15, 2006 - -* updated DOT files for DFA generation to make smaller circles. - -* made epsilon edges italics in the NFA diagrams. - -3.0b5 - November 15, 2006 - -The biggest thing is that your grammar file names must match the grammar name -inside (your generated class names will also be different) and we use -$channel=HIDDEN now instead of channel=99 inside lexer actions. -Should be compatible other than that. Please look at complete list of -changes. - -November 14, 2006 - -* Force token index to be -1 for CommonIndex in case not set. - -November 11, 2006 - -* getUniqueID for TreeAdaptor now uses identityHashCode instead of hashCode. - -November 10, 2006 - -* No grammar nondeterminism warning now when wildcard '.' is final alt. - Examples: - - a : A | B | . ; - - A : 'a' - | . - ; - - SL_COMMENT - : '//' (options {greedy=false;} : .)* '\r'? '\n' - ; - - SL_COMMENT2 - : '//' (options {greedy=false;} : 'x'|.)* '\r'? '\n' - ; - - -November 8, 2006 - -* Syntactic predicates did not get hoisting properly upon non-LL(*) decision. Other hoisting issues fixed. Cleaned up code. - -* Removed failsafe that check to see if I'm spending too much time on a single DFA; I don't think we need it anymore. - -November 3, 2006 - -* $text, $line, etc... were not working in assignments. Fixed and added - test case. - -* $label.text translated to label.getText in lexer even if label was on a char - -November 2, 2006 - -* Added error if you don't specify what the AST type is; actions in tree - grammar won't work without it. - - $ cat x.g - tree grammar x; - a : ID {String s = $ID.text;} ; - - ANTLR Parser Generator Early Access Version 3.0b5 (??, 2006) 1989-2006 - error: x.g:0:0: (152) tree grammar x has no ASTLabelType option - -November 1, 2006 - -* $text, $line, etc... were not working properly within lexer rule. - -October 32, 2006 - -* Finally actions now execute before dynamic scopes are popped it in the - rule. Previously was not possible to access the rules scoped variables - in a finally action. - -October 29, 2006 - -* Altered ActionTranslator to emit errors on setting read-only attributes - such as $start, $stop, $text in a rule. Also forbid setting any attributes - in rules/tokens referenced by a label or name. - Setting dynamic scopes's attributes and your own parameter attributes - is legal. - -October 27, 2006 - -* Altered how ANTLR figures out what decision is associated with which - block of grammar. Makes ANTLRWorks correctly find DFA for a block. - -October 26, 2006 - -* Fixed bug where EOT transitions led to no NFA configs in a DFA state, - yielding an error in DFA table generation. - -* renamed action.g to ActionTranslator.g - the ActionTranslator class is now called ActionTranslatorLexer, as ANTLR - generates this classname now. Fixed rest of codebase accordingly. - -* added rules recognizing setting of scopes' attributes to ActionTranslator.g - the Objective C target needed access to the right-hand side of the assignment - in order to generate correct code - -* changed ANTLRCore.sti to reflect the new mandatory templates to support the above - namely: scopeSetAttributeRef, returnSetAttributeRef and the ruleSetPropertyRef_* - templates, with the exception of ruleSetPropertyRef_text. we cannot set this attribute - -October 19, 2006 - -* Fixed 2 bugs in DFA conversion that caused exceptions. - altered functionality of getMinElement so it ignores elements<0. - -October 18, 2006 - -* moved resetStateNumbersToBeContiguous() to after issuing of warnings; - an internal error in that routine should make more sense as issues - with decision will appear first. - -* fixed cut/paste bug I introduced when fixed EOF in min/max - bug. Prevented C grammar from working briefly. - -October 17, 2006 - -* Removed a failsafe that seems to be unnecessary that ensure DFA didn't - get too big. It was resulting in some failures in code generation that - led me on quite a strange debugging trip. - -October 16, 2006 - -* Use channel=HIDDEN not channel=99 to put tokens on hidden channel. - -October 12, 2006 - -* ANTLR now has a customizable message format for errors and warnings, - to make it easier to fulfill requirements by IDEs and such. - The format to be used can be specified via the '-message-format name' - command line switch. The default for name is 'antlr', also available - at the moment is 'gnu'. This is done via StringTemplate, for details - on the requirements look in org/antlr/tool/templates/messages/formats/ - -* line numbers for lexers in combined grammars are now reported correctly. - -September 29, 2006 - -* ANTLRReaderStream improperly checked for end of input. - -September 28, 2006 - -* For ANTLRStringStream, LA(-1) was off by one...gave you LA(-2). - -3.0b4 - August 24, 2006 - -* error when no rules in grammar. doesn't crash now. - -* Token is now an interface. - -* remove dependence on non runtime classes in runtime package. - -* filename and grammar name must be same Foo in Foo.g. Generates FooParser, - FooLexer, ... Combined grammar Foo generates Foo$Lexer.g which generates - FooLexer.java. tree grammars generate FooTreeParser.java - -August 24, 2006 - -* added C# target to lib, codegen, templates - -August 11, 2006 - -* added tree arg to navigation methods in treeadaptor - -August 07, 2006 - -* fixed bug related to (a|)+ on end of lexer rules. crashed instead - of warning. - -* added warning that interpreter doesn't do synpreds yet - -* allow different source of classloader: -ClassLoader cl = Thread.currentThread().getContextClassLoader(); -if ( cl==null ) { - cl = this.getClass().getClassLoader(); -} - - -July 26, 2006 - -* compressed DFA edge tables significantly. All edge tables are - unique. The transition table can reuse arrays. Look like this now: - - public static readonly DFA30_transition0 = - new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...}; - public static readonly DFA30_transition1 = - new short[] { 21 }; - public static readonly short[][] DFA30_transition = { - DFA30_transition0, - DFA30_transition0, - DFA30_transition1, - ... - }; - -* If you defined both a label like EQ and '=', sometimes the '=' was - used instead of the EQ label. - -* made headerFile template have same arg list as outputFile for consistency - -* outputFile, lexer, genericParser, parser, treeParser templates - reference cyclicDFAs attribute which was no longer used after I - started the new table-based DFA. I made cyclicDFADescriptors - argument to outputFile and headerFile (only). I think this is - correct as only OO languages will want the DFA in the recognizer. - At the top level, C and friends can use it. Changed name to use - cyclicDFAs again as it's a better name probably. Removed parameter - from the lexer, ... For example, my parser template says this now: - - - -* made all token ref token types go thru code gen's - getTokenTypeAsTargetLabel() - -* no more computing DFA transition tables for acyclic DFA. - -July 25, 2006 - -* fixed a place where I was adding syn predicates into rewrite stuff. - -* turned off invalid token index warning in AW support; had a problem. - -* bad location event generated with -debug for synpreds in autobacktrack mode. - -July 24, 2006 - -* changed runtime.DFA so that it treats all chars and token types as - char (unsigned 16 bit int). -1 becomes '\uFFFF' then or 65535. - -* changed MAX_STATE_TRANSITIONS_FOR_TABLE to be 65534 by default - now. This means that all states can use a table to do transitions. - -* was not making synpreds on (C)* type loops with backtrack=true - -* was copying tree stuff and actions into synpreds with backtrack=true - -* was making synpreds on even single alt rules / blocks with backtrack=true - -3.0b3 - July 21, 2006 - -* ANTLR fails to analyze complex decisions much less frequently. It - turns out that the set of decisions for which ANTLR fails (times - out) is the same set (so far) of non-LL(*) decisions. Morever, I'm - able to detect this situation quickly and report rather than timing - out. Errors look like: - - java.g:468:23: [fatal] rule concreteDimensions has non-LL(*) - decision due to recursive rule invocations in alts 1,2. Resolve - by left-factoring or using syntactic predicates with fixed k - lookahead or use backtrack=true option. - - This message only appears when k=*. - -* Shortened no viable alt messages to not include decision - description: - -[compilationUnit, declaration]: line 8:8 decision=<<67:1: declaration -: ( ( fieldDeclaration )=> fieldDeclaration | ( methodDeclaration )=> -methodDeclaration | ( constructorDeclaration )=> -constructorDeclaration | ( classDeclaration )=> classDeclaration | ( -interfaceDeclaration )=> interfaceDeclaration | ( blockDeclaration )=> -blockDeclaration | emptyDeclaration );>> state 3 (decision=14) no -viable alt; token=[@1,184:187='java',<122>,8:8] - - too long and hard to read. - -July 19, 2006 - -* Code gen bug: states with no emanating edges were ignored by ST. - Now an empty list is used. - -* Added grammar parameter to recognizer templates so they can access - properties like getName(), ... - -July 10, 2006 - -* Fixed the gated pred merged state bug. Added unit test. - -* added new method to Target: getTokenTypeAsTargetLabel() - -July 7, 2006 - -* I was doing an AND instead of OR in the gated predicate stuff. - Thanks to Stephen Kou! - -* Reduce op for combining predicates was insanely slow sometimes and - didn't actually work well. Now it's fast and works. - -* There is a bug in merging of DFA stop states related to gated - preds...turned it off for now. - -3.0b2 - July 5, 2006 - -July 5, 2006 - -* token emission not properly protected in lexer filter mode. - -* EOT, EOT DFA state transition tables should be init'd to -1 (only - was doing this for compressed tables). Fixed. - -* in trace mode, exit method not shown for memoized rules - -* added -Xmaxdfaedges to allow you to increase number of edges allowed - for a single DFA state before it becomes "special" and can't fit in - a simple table. - -* Bug in tables. Short are signed so min/max tables for DFA are now - char[]. Bizarre. - -July 3, 2006 - -* Added a method to reset the tool error state for current thread. - See ErrorManager.java - -* [Got this working properly today] backtrack mode that let's you type - in any old crap and ANTLR will backtrack if it can't figure out what - you meant. No errors are reported by antlr during analysis. It - implicitly adds a syn pred in front of every production, using them - only if static grammar LL(*) analysis fails. Syn pred code is not - generated if the pred is not used in a decision. - - This is essentially a rapid prototyping mode. - -* Added backtracking report to the -report option - -* Added NFA->DFA conversion early termination report to the -report option - -* Added grammar level k and backtrack options to -report - -* Added a dozen unit tests to test autobacktrack NFA construction. - -* If you are using filter mode, you must manually use option - memoize=true now. - -July 2, 2006 - -* Added k=* option so you can set k=2, for example, on whole grammar, - but an individual decision can be LL(*). - -* memoize option for grammars, rules, blocks. Remove -nomemo cmd-line option - -* but in DOT generator for DFA; fixed. - -* runtime.DFA reported errors even when backtracking - -July 1, 2006 - -* Added -X option list to help - -* Syn preds were being hoisted into other rules, causing lots of extra - backtracking. - -June 29, 2006 - -* unnecessary files removed during build. - -* Matt Benson updated build.xml - -* Detecting use of synpreds in analysis now instead of codegen. In - this way, I can avoid analyzing decisions in synpreds for synpreds - not used in a DFA for a real rule. This is used to optimize things - for backtrack option. - -* Code gen must add _fragment or whatever to end of pred name in - template synpredRule to avoid having ANTLR know anything about - method names. - -* Added -IdbgST option to emit ST delimiters at start/stop of all - templates spit out. - -June 28, 2006 - -* Tweaked message when ANTLR cannot handle analysis. - -3.0b1 - June 27, 2006 - -June 24, 2006 - -* syn preds no longer generate little static classes; they also don't - generate a whole bunch of extra crap in the rules built to test syn - preds. Removed GrammarFragmentPointer class from runtime. - -June 23-24, 2006 - -* added output option to -report output. - -* added profiling info: - Number of rule invocations in "guessing" mode - number of rule memoization cache hits - number of rule memoization cache misses - -* made DFA DOT diagrams go left to right not top to bottom - -* I try to recursive overflow states now by resolving these states - with semantic/syntactic predicates if they exist. The DFA is then - deterministic rather than simply resolving by choosing first - nondeterministic alt. I used to generated errors: - -~/tmp $ java org.antlr.Tool -dfa t.g -ANTLR Parser Generator Early Access Version 3.0b2 (July 5, 2006) 1989-2006 -t.g:2:5: Alternative 1: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b -t.g:2:5: Alternative 2: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b - - Now, I uses predicates if available and emits no warnings. - -* made sem preds share accept states. Previously, multiple preds in a -decision forked new accepts each time for each nondet state. - -June 19, 2006 - -* Need parens around the prediction expressions in templates. - -* Referencing $ID.text in an action forced bad code gen in lexer rule ID. - -* Fixed a bug in how predicates are collected. The definition of - "last predicated alternative" was incorrect in the analysis. Further, - gated predicates incorrectly missed a case where an edge should become - true (a tautology). - -* Removed an unnecessary input.consume() reference in the runtime/DFA class. - -June 14, 2006 - -* -> ($rulelabel)? didn't generate proper code for ASTs. - -* bug in code gen (did not compile) -a : ID -> ID - | ID -> ID - ; -Problem is repeated ref to ID from left side. Juergen pointed this out. - -* use of tokenVocab with missing file yielded exception - -* (A|B)=> foo yielded an exception as (A|B) is a set not a block. Fixed. - -* Didn't set ID1= and INT1= for this alt: - | ^(ID INT+ {System.out.print(\"^(\"+$ID+\" \"+$INT+\")\");}) - -* Fixed so repeated dangling state errors only occur once like: -t.g:4:17: the decision cannot distinguish between alternative(s) 2,1 for at least one input sequence - -* tracking of rule elements was on (making list defs at start of - method) with templates instead of just with ASTs. Turned off. - -* Doesn't crash when you give it a missing file now. - -* -report: add output info: how many LL(1) decisions. - -June 13, 2006 - -* ^(ROOT ID?) Didn't work; nor did any other nullable child list such as - ^(ROOT ID* INT?). Now, I check to see if child list is nullable using - Grammar.LOOK() and, if so, I generate an "IF lookahead is DOWN" gate - around the child list so the whole thing is optional. - -* Fixed a bug in LOOK that made it not look through nullable rules. - -* Using AST suffixes or -> rewrite syntax now gives an error w/o a grammar - output option. Used to crash ;) - -* References to EOF ended up with improper -1 refs instead of EOF in output. - -* didn't warn of ambig ref to $expr in rewrite; fixed. -list - : '[' expr 'for' type ID 'in' expr ']' - -> comprehension(expr={$expr.st},type={},list={},i={}) - ; - -June 12, 2006 - -* EOF works in the parser as a token name. - -* Rule b:(A B?)*; didn't display properly in AW due to the way ANTLR - generated NFA. - -* "scope x;" in a rule for unknown x gives no error. Fixed. Added unit test. - -* Label type for refs to start/stop in tree parser and other parsers were - not used. Lots of casting. Ick. Fixed. - -* couldn't refer to $tokenlabel in isolation; but need so we can test if - something was matched. Fixed. - -* Lots of little bugs fixed in $x.y, %... translation due to new - action translator. - -* Improperly tracking block nesting level; result was that you couldn't - see $ID in action of rule "a : A+ | ID {Token t = $ID;} | C ;" - -* a : ID ID {$ID.text;} ; did not get a warning about ambiguous $ID ref. - -* No error was found on $COMMENT.text: - -COMMENT - : '/*' (options {greedy=false;} : . )* '*/' - {System.out.println("found method "+$COMMENT.text);} - ; - - $enclosinglexerrule scope does not exist. Use text or setText() here. - -June 11, 2006 - -* Single return values are initialized now to default or to your spec. - -* cleaned up input stream stuff. Added ANTLRReaderStream, ANTLRInputStream - and refactored. You can specify encodings now on ANTLRFileStream (and - ANTLRInputStream) now. - -* You can set text local var now in a lexer rule and token gets that text. - start/stop indexes are still set for the token. - -* Changed lexer slightly. Calling a nonfragment rule from a - nonfragment rule does not set the overall token. - -June 10, 2006 - -* Fixed bug where unnecessary escapes yield char==0 like '\{'. - -* Fixed analysis bug. This grammar didn't report a recursion warning: -x : y X - | y Y - ; -y : L y R - | B - ; - The DFAState.equals() method was messed up. - -* Added @synpredgate {...} action so you can tell ANTLR how to gate actions - in/out during syntactic predicate evaluation. - -* Fuzzy parsing should be more efficient. It should backtrack over a rule - and then rewind and do it again "with feeling" to exec actions. It was - actually doing it 3x not 2x. - -June 9, 2006 - -* Gutted and rebuilt the action translator for $x.y, $x::y, ... - Uses ANTLR v3 now for the first time inside v3 source. :) - ActionTranslator.java - -* Fixed a bug where referencing a return value on a rule didn't work - because later a ref to that rule's predefined properties didn't - properly force a return value struct to be built. Added unit test. - -June 6, 2006 - -* New DFA mechanisms. Cyclic DFA are implemented as state tables, - encoded via strings as java cannot handle large static arrays :( - States with edges emanating that have predicates are specially - treated. A method is generated to do these states. The DFA - simulation routine uses the "special" array to figure out if the - state is special. See March 25, 2006 entry for description: - http://www.antlr.org/blog/antlr3/codegen.tml. analysis.DFA now has - all the state tables generated for code gen. CyclicCodeGenerator.java - disappeared as it's unneeded code. :) - -* Internal general clean up of the DFA.states vs uniqueStates thing. - Fixed lookahead decisions no longer fill uniqueStates. Waste of - time. Also noted that when adding sem pred edges, I didn't check - for state reuse. Fixed. - -June 4, 2006 - -* When resolving ambig DFA states predicates, I did not add the new states - to the list of unique DFA states. No observable effect on output except - that DFA state numbers were not always contiguous for predicated decisions. - I needed this fix for new DFA tables. - -3.0ea10 - June 2, 2006 - -June 2, 2006 - -* Improved grammar stats and added syntactic pred tracking. - -June 1, 2006 - -* Due to a type mismatch, the DebugParser.recoverFromMismatchedToken() - method was not called. Debug events for mismatched token error - notification were not sent to ANTLRWorks probably - -* Added getBacktrackingLevel() for any recognizer; needed for profiler. - -* Only writes profiling data for antlr grammar analysis with -profile set - -* Major update and bug fix to (runtime) Profiler. - -May 27, 2006 - -* Added Lexer.skip() to force lexer to ignore current token and look for - another; no token is created for current rule and is not passed on to - parser (or other consumer of the lexer). - -* Parsers are much faster now. I removed use of java.util.Stack for pushing - follow sets and use a hardcoded array stack instead. Dropped from - 5900ms to 3900ms for parse+lex time parsing entire java 1.4.2 source. Lex - time alone was about 1500ms. Just looking at parse time, we get about 2x - speed improvement. :) - -May 26, 2006 - -* Fixed NFA construction so it generates NFA for (A*)* such that ANTLRWorks - can display it properly. - -May 25, 2006 - -* added abort method to Grammar so AW can terminate the conversion if it's - taking too long. - -May 24, 2006 - -* added method to get left recursive rules from grammar without doing full - grammar analysis. - -* analysis, code gen not attempted if serious error (like - left-recursion or missing rule definition) occurred while reading - the grammar in and defining symbols. - -* added amazing optimization; reduces analysis time by 90% for java - grammar; simple IF statement addition! - -3.0ea9 - May 20, 2006 - -* added global k value for grammar to limit lookahead for all decisions unless -overridden in a particular decision. - -* added failsafe so that any decision taking longer than 2 seconds to create -the DFA will fall back on k=1. Use -ImaxtimeforDFA n (in ms) to set the time. - -* added an option (turned off for now) to use multiple threads to -perform grammar analysis. Not much help on a 2-CPU computer as -garbage collection seems to peg the 2nd CPU already. :( Gotta wait for -a 4 CPU box ;) - -* switched from #src to // $ANTLR src directive. - -* CommonTokenStream.getTokens() looked past end of buffer sometimes. fixed. - -* unicode literals didn't really work in DOT output and generated code. fixed. - -* fixed the unit test rig so it compiles nicely with Java 1.5 - -* Added ant build.xml file (reads build.properties file) - -* predicates sometimes failed to compile/eval properly due to missing (...) - in IF expressions. Forced (..) - -* (...)? with only one alt were not optimized. Was: - - // t.g:4:7: ( B )? - int alt1=2; - int LA1_0 = input.LA(1); - if ( LA1_0==B ) { - alt1=1; - } - else if ( LA1_0==-1 ) { - alt1=2; - } - else { - NoViableAltException nvae = - new NoViableAltException("4:7: ( B )?", 1, 0, input); - throw nvae; - } - -is now: - - // t.g:4:7: ( B )? - int alt1=2; - int LA1_0 = input.LA(1); - if ( LA1_0==B ) { - alt1=1; - } - - Smaller, faster and more readable. - -* Allow manual init of return values now: - functionHeader returns [int x=3*4, char (*f)()=null] : ... ; - -* Added optimization for DFAs that fixed a codegen bug with rules in lexer: - EQ : '=' ; - ASSIGNOP : '=' | '+=' ; - EQ is a subset of other rule. It did not given an error which is - correct, but generated bad code. - -* ANTLR was sending column not char position to ANTLRWorks. - -* Bug fix: location 0, 0 emitted for synpreds and empty alts. - -* debugging event handshake how sends grammar file name. Added getGrammarFileName() to recognizers. Java.stg generates it: - - public String getGrammarFileName() { return ""; } - -* tree parsers can do arbitrary lookahead now including backtracking. I - updated CommonTreeNodeStream. - -* added events for debugging tree parsers: - - /** Input for a tree parser is an AST, but we know nothing for sure - * about a node except its type and text (obtained from the adaptor). - * This is the analog of the consumeToken method. Again, the ID is - * the hashCode usually of the node so it only works if hashCode is - * not implemented. - */ - public void consumeNode(int ID, String text, int type); - - /** The tree parser looked ahead */ - public void LT(int i, int ID, String text, int type); - - /** The tree parser has popped back up from the child list to the - * root node. - */ - public void goUp(); - - /** The tree parser has descended to the first child of a the current - * root node. - */ - public void goDown(); - -* Added DebugTreeNodeStream and DebugTreeParser classes - -* Added ctor because the debug tree node stream will need to ask quesitons about nodes and since nodes are just Object, it needs an adaptor to decode the nodes and get text/type info for the debugger. - -public CommonTreeNodeStream(TreeAdaptor adaptor, Tree tree); - -* added getter to TreeNodeStream: - public TreeAdaptor getTreeAdaptor(); - -* Implemented getText/getType in CommonTreeAdaptor. - -* Added TraceDebugEventListener that can dump all events to stdout. - -* I broke down and make Tree implement getText - -* tree rewrites now gen location debug events. - -* added AST debug events to listener; added blank listener for convenience - -* updated debug events to send begin/end backtrack events for debugging - -* with a : (b->b) ('+' b -> ^(PLUS $a b))* ; you get b[0] each time as - there is no loop in rewrite rule itself. Need to know context that - the -> is inside the rule and hence b means last value of b not all - values. - -* Bug in TokenRewriteStream; ops at indexes < start index blocked proper op. - -* Actions in ST rewrites "-> ({$op})()" were not translated - -* Added new action name: - -@rulecatch { -catch (RecognitionException re) { - reportError(re); - recover(input,re); -} -catch (Throwable t) { - System.err.println(t); -} -} -Overrides rule catch stuff. - -* Isolated $ refs caused exception - -3.0ea8 - March 11, 2006 - -* added @finally {...} action like @init for rules. Executes in - finally block (java target) after all other stuff like rule memoization. - No code changes needs; ST just refs a new action: - - -* hideous bug fixed: PLUS='+' didn't result in '+' rule in lexer - -* TokenRewriteStream didn't do toString() right when no rewrites had been done. - -* lexer errors in interpreter were not printed properly - -* bitsets are dumped in hex not decimal now for FOLLOW sets - -* /* epsilon */ is not printed now when printing out grammars with empty alts - -* Fixed another bug in tree rewrite stuff where it was checking that elements - had at least one element. Strange...commented out for now to see if I can remember what's up. - -* Tree rewrites had problems when you didn't have x+=FOO variables. Rules - like this work now: - - a : (x=ID)? y=ID -> ($x $y)?; - -* filter=true for lexers turns on k=1 and backtracking for every token - alternative. Put the rules in priority order. - -* added getLine() etc... to Tree to support better error reporting for - trees. Added MismatchedTreeNodeException. - -* $templates::foo() is gone. added % as special template symbol. - %foo(a={},b={},...) ctor (even shorter than $templates::foo(...)) - %({name-expr})(a={},...) indirect template ctor reference - - The above are parsed by antlr.g and translated by codegen.g - The following are parsed manually here: - - %{string-expr} anonymous template from string expr - %{expr}.y = z; template attribute y of StringTemplate-typed expr to z - %x.y = z; set template attribute y of x (always set never get attr) - to z [languages like python without ';' must still use the - ';' which the code generator is free to remove during code gen] - -* -> ({expr})(a={},...) notation for indirect template rewrite. - expr is the name of the template. - -* $x[i]::y and $x[-i]::y notation for accesssing absolute scope stack - indexes and relative negative scopes. $x[-1]::y is the y attribute - of the previous scope (stack top - 1). - -* filter=true mode for lexers; can do this now...upon mismatch, just - consumes a char and tries again: -lexer grammar FuzzyJava; -options {filter=true;} - -FIELD - : TYPE WS? name=ID WS? (';'|'=') - {System.out.println("found var "+$name.text);} - ; - -* refactored char streams so ANTLRFileStream is now a subclass of - ANTLRStringStream. - -* char streams for lexer now allowed nested backtracking in lexer. - -* added TokenLabelType for lexer/parser for all token labels - -* line numbers for error messages were not updated properly in antlr.g - for strings, char literals and <<...>> - -* init action in lexer rules was before the type,start,line,... decls. - -* Tree grammars can now specify output; I've only tested output=templat - though. - -* You can reference EOF now in the parser and lexer. It's just token type - or char value -1. - -* Bug fix: $ID refs in the *lexer* were all messed up. Cleaned up the - set of properties available... - -* Bug fix: .st not found in rule ref when rule has scope: -field -scope { - StringTemplate funcDef; -} - : ... - {$field::funcDef = $field.st;} - ; -it gets field_stack.st instead - -* return in backtracking must return retval or null if return value. - -* $property within a rule now works like $text, $st, ... - -* AST/Template Rewrites were not gated by backtracking==0 so they - executed even when guessing. Auto AST construction is now gated also. - -* CommonTokenStream was somehow returning tokens not text in toString() - -* added useful methods to runtime.BitSet and also to CommonToken so you can - update the text. Added nice Token stream method: - - /** Given a start and stop index, return a List of all tokens in - * the token type BitSet. Return null if no tokens were found. This - * method looks at both on and off channel tokens. - */ - public List getTokens(int start, int stop, BitSet types); - -* literals are now passed in the .tokens files so you can ref them in - tree parses, for example. - -* added basic exception handling; no labels, just general catches: - -a : {;}A | B ; - exception - catch[RecognitionException re] { - System.out.println("recog error"); - } - catch[Exception e] { - System.out.println("error"); - } - -* Added method to TokenStream: - public String toString(Token start, Token stop); - -* antlr generates #src lines in lexer grammars generated from combined grammars - so error messages refer to original file. - -* lexers generated from combined grammars now use originally formatting. - -* predicates have $x.y stuff translated now. Warning: predicates might be - hoisted out of context. - -* return values in return val structs are now public. - -* output=template with return values on rules was broken. I assume return values with ASTs was broken too. Fixed. - -3.0ea7 - December 14, 2005 - -* Added -print option to print out grammar w/o actions - -* Renamed BaseParser to be BaseRecognizer and even made Lexer derive from - this; nice as it now shares backtracking support code. - -* Added syntactic predicates (...)=>. See December 4, 2005 entry: - - http://www.antlr.org/blog/antlr3/lookahead.tml - - Note that we have a new option for turning off rule memoization during - backtracking: - - -nomemo when backtracking don't generate memoization code - -* Predicates are now tested in order that you specify the alts. If you - leave the last alt "naked" (w/o pred), it will assume a true pred rather - than union of other preds. - -* Added gated predicates "{p}?=>" that literally turn off a production whereas -disambiguating predicates are only hoisted into the predictor when syntax alone -is not sufficient to uniquely predict alternatives. - -A : {p}? => "a" ; -B : {!p}? => ("a"|"b")+ ; - -* bug fixed related to predicates in predictor -lexer grammar w; -A : {p}? "a" ; -B : {!p}? ("a"|"b")+ ; -DFA is correct. A state splits for input "a" on the pred. -Generated code though was hosed. No pred tests in prediction code! -I added testLexerPreds() and others in TestSemanticPredicateEvaluation.java - -* added execAction template in case we want to do something in front of - each action execution or something. - -* left-recursive cycles from rules w/o decisions were not detected. - -* undefined lexer rules were not announced! fixed. - -* unreachable messages for Tokens rule now indicate rule name not alt. E.g., - - Ruby.lexer.g:24:1: The following token definitions are unreachable: IVAR - -* nondeterminism warnings improved for Tokens rule: - -Ruby.lexer.g:10:1: Multiple token rules can match input such as ""0".."9"": INT, FLOAT -As a result, tokens(s) FLOAT were disabled for that input - - -* DOT diagrams didn't show escaped char properly. - -* Char/string literals are now all 'abc' not "abc". - -* action syntax changed "@scope::actionname {action}" where scope defaults - to "parser" if parser grammar or combined grammar, "lexer" if lexer grammar, - and "treeparser" if tree grammar. The code generation targets decide - what scopes are available. Each "scope" yields a hashtable for use in - the output templates. The scopes full of actions are sent to all output - file templates (currently headerFile and outputFile) as attribute actions. - Then you can reference to get the map of actions associated - with scope and to get the parser's header action - for example. This should be very flexible. The target should only have - to define which scopes are valid, but the action names should be variable - so we don't have to recompile ANTLR to add actions to code gen templates. - - grammar T; - options {language=Java;} - @header { package foo; } - @parser::stuff { int i; } // names within scope not checked; target dependent - @members { int i; } - @lexer::header {head} - @lexer::members { int j; } - @headerfile::blort {...} // error: this target doesn't have headerfile - @treeparser::members {...} // error: this is not a tree parser - a - @init {int i;} - : ID - ; - ID : 'a'..'z'; - - For now, the Java target uses members and header as a valid name. Within a - rule, the init action name is valid. - -* changed $dynamicscope.value to $dynamicscope::value even if value is defined - in same rule such as $function::name where rule function defines name. - -* $dynamicscope gets you the stack - -* rule scopes go like this now: - - rule - scope {...} - scope slist,Symbols; - : ... - ; - -* Created RuleReturnScope as a generic rule return value. Makes it easier - to do this: - RuleReturnScope r = parser.program(); - System.out.println(r.getTemplate().toString()); - -* $template, $tree, $start, etc... - -* $r.x in current rule. $r is ignored as fully-qualified name. $r.start works too - -* added warning about $r referring to both return value of rule and dynamic scope of rule - -* integrated StringTemplate in a very simple manner - -Syntax: --> template(arglist) "..." --> template(arglist) <<...>> --> namedTemplate(arglist) --> {free expression} --> // empty - -Predicate syntax: -a : A B -> {p1}? foo(a={$A.text}) - -> {p2}? foo(a={$B.text}) - -> // return nothing - -An arg list is just a list of template attribute assignments to actions in curlies. - -There is a setTemplateLib() method for you to use with named template rewrites. - -Use a new option: - -grammar t; -options {output=template;} -... - -This all should work for tree grammars too, but I'm still testing. - -* fixed bugs where strings were improperly escaped in exceptions, comments, etc.. For example, newlines came out as newlines not the escaped version - -3.0ea6 - November 13, 2005 - -* turned off -debug/-profile, which was on by default - -* completely refactored the output templates; added some missing templates. - -* dramatically improved infinite recursion error messages (actually - left-recursion never even was printed out before). - -* wasn't printing dangling state messages when it reanalyzes with k=1. - -* fixed a nasty bug in the analysis engine dealing with infinite recursion. - Spent all day thinking about it and cleaned up the code dramatically. - Bug fixed and software is more powerful and I understand it better! :) - -* improved verbose DFA nodes; organized by alt - -* got much better random phrase generation. For example: - - $ java org.antlr.tool.RandomPhrase simple.g program - int Ktcdn ';' method wh '(' ')' '{' return 5 ';' '}' - -* empty rules like "a : ;" generated code that didn't compile due to - try/catch for RecognitionException. Generated code couldn't possibly - throw that exception. - -* when printing out a grammar, such as in comments in generated code, - ANTLR didn't print ast suffix stuff back out for literals. - -* This never exited loop: - DATA : (options {greedy=false;}: .* '\n' )* '\n' '.' ; - and now it works due to new default nongreedy .* Also this works: - DATA : (options {greedy=false;}: .* '\n' )* '.' ; - -* Dot star ".*" syntax didn't work; in lexer it is nongreedy by - default. In parser it is on greedy but also k=1 by default. Added - unit tests. Added blog entry to describe. - -* ~T where T is the only token yielded an empty set but no error - -* Used to generate unreachable message here: - - parser grammar t; - a : ID a - | ID - ; - - z.g:3:11: The following alternatives are unreachable: 2 - - In fact it should really be an error; now it generates: - - no start rule in grammar t (no rule can obviously be followed by EOF) - - Per next change item, ANTLR cannot know that EOF follows rule 'a'. - -* added error message indicating that ANTLR can't figure out what your - start rule is. Required to properly generate code in some cases. - -* validating semantic predicates now work (if they are false, they - throw a new FailedPredicateException - -* two hideous bug fixes in the IntervalSet, which made analysis go wrong - in a few cases. Thanks to Oliver Zeigermann for finding lots of bugs - and making suggested fixes (including the next two items)! - -* cyclic DFAs are now nonstatic and hence can access instance variables - -* labels are now allowed on lexical elements (in the lexer) - -* added some internal debugging options - -* ~'a'* and ~('a')* were not working properly; refactored antlr.g grammar - -3.0ea5 - July 5, 2005 - -* Using '\n' in a parser grammar resulted in a nonescaped version of '\n' in the token names table making compilation fail. I fixed this by reorganizing/cleaning up portion of ANTLR that deals with literals. See comment org.antlr.codegen.Target. - -* Target.getMaxCharValue() did not use the appropriate max value constant. - -* ALLCHAR was a constant when it should use the Target max value def. set complement for wildcard also didn't use the Target def. Generally cleaned up the max char value stuff. - -* Code gen didn't deal with ASTLabelType properly...I think even the 3.0ea7 example tree parser was broken! :( - -* Added a few more unit tests dealing with escaped literals - -3.0ea4 - June 29, 2005 - -* tree parsers work; added CommonTreeNodeStream. See simplecTreeParser - example in examples-v3 tarball. - -* added superClass and ASTLabelType options - -* refactored Parser to have a BaseParser and added TreeParser - -* bug fix: actions being dumped in description strings; compile errors - resulted - -3.0ea3 - June 23, 2005 - -Enhancements - -* Automatic tree construction operators are in: ! ^ ^^ - -* Tree construction rewrite rules are in - -> {pred1}? rewrite1 - -> {pred2}? rewrite2 - ... - -> rewriteN - - The rewrite rules may be elements like ID, expr, $label, {node expr} - and trees ^( ). You have have (...)?, (...)*, (...)+ - subrules as well. - - You may have rewrites in subrules not just at outer level of rule, but - any -> rewrite forces auto AST construction off for that alternative - of that rule. - - To avoid cycles, copy semantics are used: - - r : INT -> INT INT ; - - means make two new nodes from the same INT token. - - Repeated references to a rule element implies a copy for at least one - tree: - - a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom tree) - -* $ruleLabel.tree refers to tree created by matching the labeled element. - -* A description of the blocks/alts is generated as a comment in output code - -* A timestamp / signature is put at top of each generated code file - -3.0ea2 - June 12, 2005 - -Bug fixes - -* Some error messages were missing the stackTrace parameter - -* Removed the file locking mechanism as it's not cross platform - -* Some absolute vs relative path name problems with writing output - files. Rules are now more concrete. -o option takes precedence - // -o /tmp /var/lib/t.g => /tmp/T.java - // -o subdir/output /usr/lib/t.g => subdir/output/T.java - // -o . /usr/lib/t.g => ./T.java - // -o /tmp subdir/t.g => /tmp/subdir/t.g - // If they didn't specify a -o dir so just write to location - // where grammar is, absolute or relative - -* does error checking on unknown option names now - -* Using just language code not locale name for error message file. I.e., - the default (and for any English speaking locale) is en.stg not en_US.stg - anymore. - -* The error manager now asks the Tool to panic rather than simply doing - a System.exit(). - -* Lots of refactoring concerning grammar, rule, subrule options. Now - detects invalid options. - -3.0ea1 - June 1, 2005 - -Initial early access release diff --git a/thirdparty/antlr3-antlr-3.5/LICENSE.txt b/thirdparty/antlr3-antlr-3.5/LICENSE.txt deleted file mode 100644 index a5216ef2..00000000 --- a/thirdparty/antlr3-antlr-3.5/LICENSE.txt +++ /dev/null @@ -1,26 +0,0 @@ -[The "BSD license"] -Copyright (c) 2013 Terence Parr -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/antlr3-antlr-3.5/README.txt b/thirdparty/antlr3-antlr-3.5/README.txt deleted file mode 100644 index 3dec1caa..00000000 --- a/thirdparty/antlr3-antlr-3.5/README.txt +++ /dev/null @@ -1,142 +0,0 @@ -ANTLR v3.5 -January 4, 2013 - -Terence Parr, parrt at cs usfca edu -ANTLR project lead and supreme dictator for life -University of San Francisco - -INTRODUCTION - -Welcome to ANTLR v3! ANTLR (ANother Tool for Language Recognition) is -a language tool that provides a framework for constructing -recognizers, interpreters, compilers, and translators from grammatical -descriptions containing actions in a variety of target -languages. ANTLR provides excellent support for tree construction, -tree walking, translation, error recovery, and error reporting. I've -been working on parser generators for 25 years and on this particular -version of ANTLR for 9 years. - -You should use v3 in conjunction with ANTLRWorks: - - http://www.antlr.org/works/index.html - -and gUnit (grammar unit testing tool included in distribution): - - http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing - -The book will also help you a great deal (printed May 15, 2007); you -can also buy the PDF: - - http://www.pragmaticprogrammer.com/titles/tpantlr/index.html - -2nd book, Language Implementation Patterns: - - http://pragprog.com/titles/tpdsl/language-implementation-patterns - -See the getting started document: - - http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started - -You also have the examples plus the source to guide you. - -See the wiki FAQ: - - http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ - -and general doc root: - - http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home - -Please help add/update FAQ entries. - -If all else fails, you can buy support or ask the antlr-interest list: - - http://www.antlr.org/support.html - -Per the license in LICENSE.txt, this software is not guaranteed to -work and might even destroy all life on this planet: - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -EXAMPLES - -ANTLR v3 sample grammars: - - https://github.com/antlr/examples-v3 - -Examples from Language Implementation Patterns: - - http://www.pragprog.com/titles/tpdsl/source_code - ----------------------------------------------------------------------- - -What is ANTLR? - -ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition -and generates LL(*) recursive-descent parsers. ANTLR is a language tool -that provides a framework for constructing recognizers, compilers, and -translators from grammatical descriptions containing actions. -Target language list: - -http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets - ----------------------------------------------------------------------- - -How is ANTLR v3 different than ANTLR v2? - -See "What is the difference between ANTLR v2 and v3?" - - http://www.antlr.org/wiki/pages/viewpage.action?pageId=719 - -See migration guide: - - http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3 - ----------------------------------------------------------------------- - -How do I install this damn thing? - -You will have grabbed either of these: - - http://antlr.org/download/antlr-3.5-complete-no-antlrv2.jar - http://antlr.org/download/antlr-3.5-complete.jar - -It has all of the jars you need combined into one. Then you need to -add antlr-3.5-complete.jar to your CLASSPATH or add to arg list; e.g., on unix: - -$ java -cp "/usr/local/lib/antlr-3.5-complete.jar:$CLASSPATH" org.antlr.Tool Test.g - -Source + java binaries: Just untar antlr-3.5.tar.gz and you'll get: - -antlr-3.5/BUILD.txt -antlr-3.5/antlr3-maven-plugin -antlr-3.5/antlrjar.xml -antlr-3.5/antlrsources.xml -antlr-3.5/gunit -antlr-3.5/gunit-maven-plugin -antlr-3.5/pom.xml -antlr-3.5/runtime -antlr-3.5/tool -antlr-3.5/lib - -Please see the FAQ - - http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ - -------------------------- - -How can I contribute to ANTLR v3? - -http://www.antlr.org/wiki/pages/viewpage.action?pageId=33947666 diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/AUTHORS b/thirdparty/antlr3-antlr-3.5/runtime/Python/AUTHORS deleted file mode 100644 index 01e79eea..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/AUTHORS +++ /dev/null @@ -1,2 +0,0 @@ -Benjamin Niemann : Main developer of Python target. -Clinton Roy : AST templates and runtime. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/ChangeLog b/thirdparty/antlr3-antlr-3.5/runtime/Python/ChangeLog deleted file mode 100644 index e9796202..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/ChangeLog +++ /dev/null @@ -1,38 +0,0 @@ -2007-11-03 Benjamin Niemann - - * PythonTarget.java, dfa.py, exceptions.py, recognizer.py, streams.py: - ANTLRStringStream.LA() now returns the character's ordinal and - generated lexers operate on integers. Also made various performance - tunings. - -2007-10-07 Benjamin Niemann - - * main.py, Python.stg (outputFile): Added simple __main__ section to - generated code, so (simple) grammars can be executed as standalone - script. - - * tree.py (RecognitionException.extractInformationFromTreeNodeStream), - exceptions.py (CommonTree): Small bugfixes. - -2007-09-30 Benjamin Niemann - - * recognizers.py (TokenSource): Added iterator interface to TokenSource - class - and thus to Lexer. - -2007-06-27 Benjamin Niemann - - * Python.stg (genericParser, parser, treeParser): Use correct @init - action block for tree parsers. - -2007-05-24 Benjamin Niemann - - * Python.stg (rule): Added support for @decorate {...} action for - parser rules to add decorators to the rule method. - -2007-05-18 Benjamin Niemann - - * Python.stg (isolatedLookaheadRangeTest, lookaheadRangeTest): - Minor improvement of generated code (use ' <= <= ' - instead of ' >= and <= '). - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/LICENSE b/thirdparty/antlr3-antlr-3.5/runtime/Python/LICENSE deleted file mode 100644 index 1d1d5d64..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -[The "BSD licence"] -Copyright (c) 2003-2006 Terence Parr -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/MANIFEST.in b/thirdparty/antlr3-antlr-3.5/runtime/Python/MANIFEST.in deleted file mode 100644 index 29c4ad64..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include LICENSE AUTHORS ez_setup.py - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/README b/thirdparty/antlr3-antlr-3.5/runtime/Python/README deleted file mode 100644 index 98a50bb7..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/README +++ /dev/null @@ -1,90 +0,0 @@ -1) ABOUT -======== - -This is the Python package 'antlr3', which is required to use parsers created -by the ANTLR3 tool. See for more information about -ANTLR3. - - -2) STATUS -========= - -The Python target for ANTLR3 is still in beta. Documentation is lacking, some -bits of the code is not yet done, some functionality has not been tested yet. -Also the API might change a bit - it currently mimics the Java implementation, -but it may be made a bit more pythonic here and there. - -WARNING: Currently the runtime library for V3.1 is not compatible with -recognizers generated by ANTLR V3.0.x. If you are an application developer, -then the suggested way to solve this is to package the correct runtime with -your application. Installing the runtime in the global site-packages directory -may not be a good idea. -It is still undetermined, if a future release of the V3.1 runtime will be -compatible with V3.0.x recognizers or if future runtimes V3.2+ will be -compatible with V3.1 recognizers. -Sorry for the inconvenience. - - -3) DOWNLOAD -=========== - -This runtime is part of the ANTLR distribution. The latest version can be found -at . - -If you are interested in the latest, most bleeding edge version, have a look at -the perforce depot at . There are -tarballs ready to download, so you don't have to install the perforce client. - - -4) INSTALLATION -=============== - -Just like any other Python package: -$ python setup.py install - -See for more information. - - -5) DOCUMENTATION -================ - -Documentation (as far as it exists) can be found in the wiki - - - -6) REPORTING BUGS -================= - -Please send bug reports to the ANTLR mailing list - or -. - -Existing bugs may appear someday in the bugtracker: - - - -7) HACKING -========== - -Only the runtime package can be found here. There are also some StringTemplate -files in 'src/org/antlr/codegen/templates/Python/' and some Java code in -'src/org/antlr/codegen/PythonTarget.java' (of the main ANTLR3 source -distribution). - -If there are no directories 'tests' and 'unittests' in 'runtime/Python', you -should fetch the latest ANTLR3 version from the perforce depot. See section -DOWNLOAD. -You'll need java and ant in order to compile and use the tool. -Be sure to properly setup your CLASSPATH. -(FIXME: is there some generic information, how to build it yourself? I should -point to it to avoid duplication.) - -You can then use the commands -$ python setup.py unittest -$ python setup.py functest -to ensure that changes do not break existing behaviour. - -Please send patches to . For larger code contributions you'll -have to sign the "Developer's Certificate of Origin", which can be found on - or use the feedback form at -. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/TODO b/thirdparty/antlr3-antlr-3.5/runtime/Python/TODO deleted file mode 100644 index 4ae5cdd9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/TODO +++ /dev/null @@ -1,82 +0,0 @@ -- new test from CL4832 -- CS4531 -- testcases for error nodes -- did I miss a change to Python.stg/returnScope? - - there are base classes Tree-/ParserRuleReturnScope -- update old and add new examples -- need protections in scopeAttributeRef? CL4426 -- testcase for $ID.int CL4413 -- need to override Target.encodeIntAsCharEscape? CL4389 -- look into buildbot - - link in report mails is broken - - timezone bug in p4 scraper - - core: - - only look at changes in src/ & runtime/Java - - quick - - incremential build - - sanity check - - full - - depend on quick - - full build - - ant test - - targets - - depend on changes on src/ and runtime/XXX - - depend on successful core/quick build - - nightlybuild - - depend on core/full - - somehow check which targets are ok -- TreeWizard: - - raise exception on parse errors - - document it in wiki -- publish runtime on cheeseshop -- better documentation for output=template w/ full examples -- antlr3.main: - - verbose/quiet flag: show/hide warnings - - set options in grammar? - - write optionparser descriptions - - better output for return objects - - st support -- custom grammar options per target - - make Grammar.legalOptions changeable - - first extract language option, load target class - - pass options not known by Grammar to target -- patch for CS4010 "null check for $scope::var now" once action parser is fixed -- rename @members/@init to @classmembers, @instancemembers? -- gunit? -- testcases error handling in tree parsers -- better test coverage for runtime modules -- documentation - - more documentation in docstrings - - tune doxygen output - - doxygen frontpage -- do not use Set* templates for properties for Python target - - gate with sempred {target.usePropertySetTemplates()}? -- special template for empty alternative -> pass -- complete runtime - - tree.DoubleLinkTree - - tree.ParseTree - - tree.UnBufferedTreeNodeStream -- default values in rule arguments? -- turn some methods into attributes - - (String|CommonToken)Stream.index() - - (String|CommonToken)Stream.size() --> __len__ -- get rid of getter/setter in generated code -- document differences to java API -- add methods to emulate java API, but mark 'em as deprecated -- using Stream.index as a state for 'error-already-reported' or memoization - will be a problem when the stream is not a linear buffer -- optimizations which should be explored: - - DFA: perhaps zip() the lists into a tuple - (eot, eof, min, max, accept, special, transition) - for each state. checkout potential performance gain. - - StingStream: look into optimizing LA(). Perhaps use LAk instead of LA(k) - and create the attributes when needed. - - Perform some magic to improve dfaStateSwitch. - - in lexer rules: - LA == u'a' or LA == u'b' or LA == u'c'... - -> LA in (u'a', u'b', u'c', ...) - or "LA in self.set_xyz" with set_xyz as a class member - - tweak CodeGenerator.genSetExpr() - - make BaseTree.nil() an attribute? or singleton? - - psycho?? -- ... diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/__init__.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/__init__.py deleted file mode 100644 index 40685598..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/__init__.py +++ /dev/null @@ -1,152 +0,0 @@ -""" @package antlr3 -@brief ANTLR3 runtime package - -This module contains all support classes, which are needed to use recognizers -generated by ANTLR3. - -@mainpage - -\note Please be warned that the line numbers in the API documentation do not -match the real locations in the source code of the package. This is an -unintended artifact of doxygen, which I could only convince to use the -correct module names by concatenating all files from the package into a single -module file... - -Here is a little overview over the most commonly used classes provided by -this runtime: - -@section recognizers Recognizers - -These recognizers are baseclasses for the code which is generated by ANTLR3. - -- BaseRecognizer: Base class with common recognizer functionality. -- Lexer: Base class for lexers. -- Parser: Base class for parsers. -- tree.TreeParser: Base class for %tree parser. - -@section streams Streams - -Each recognizer pulls its input from one of the stream classes below. Streams -handle stuff like buffering, look-ahead and seeking. - -A character stream is usually the first element in the pipeline of a typical -ANTLR3 application. It is used as the input for a Lexer. - -- ANTLRStringStream: Reads from a string objects. The input should be a unicode - object, or ANTLR3 will have trouble decoding non-ascii data. -- ANTLRFileStream: Opens a file and read the contents, with optional character - decoding. -- ANTLRInputStream: Reads the date from a file-like object, with optional - character decoding. - -A Parser needs a TokenStream as input (which in turn is usually fed by a -Lexer): - -- CommonTokenStream: A basic and most commonly used TokenStream - implementation. -- TokenRewriteStream: A modification of CommonTokenStream that allows the - stream to be altered (by the Parser). See the 'tweak' example for a usecase. - -And tree.TreeParser finally fetches its input from a tree.TreeNodeStream: - -- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream - implementation. - - -@section tokenstrees Tokens and Trees - -A Lexer emits Token objects which are usually buffered by a TokenStream. A -Parser can build a Tree, if the output=AST option has been set in the grammar. - -The runtime provides these Token implementations: - -- CommonToken: A basic and most commonly used Token implementation. -- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree - construction. - -Tree objects are wrapper for Token objects. - -- tree.CommonTree: A basic and most commonly used Tree implementation. - -A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the -input Token objects. - -- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor -implementation. - - -@section Exceptions - -RecognitionException are generated, when a recognizer encounters incorrect -or unexpected input. - -- RecognitionException - - MismatchedRangeException - - MismatchedSetException - - MismatchedNotSetException - . - - MismatchedTokenException - - MismatchedTreeNodeException - - NoViableAltException - - EarlyExitException - - FailedPredicateException - . -. - -A tree.RewriteCardinalityException is raised, when the parsers hits a -cardinality mismatch during AST construction. Although this is basically a -bug in your grammar, it can only be detected at runtime. - -- tree.RewriteCardinalityException - - tree.RewriteEarlyExitException - - tree.RewriteEmptyStreamException - . -. - -""" - -# tree.RewriteRuleElementStream -# tree.RewriteRuleSubtreeStream -# tree.RewriteRuleTokenStream -# CharStream -# DFA -# TokenSource - -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__version__ = '3.4' - -# This runtime is compatible with generated parsers using the following -# API versions. 'HEAD' is only used by unittests. -compatible_api_versions = ['HEAD', 1] - -from constants import * -from dfa import * -from exceptions import * -from recognizers import * -from streams import * -from tokens import * diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/compat.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/compat.py deleted file mode 100644 index b29afcaa..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/compat.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Compatibility stuff""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -try: - set = set - frozenset = frozenset -except NameError: - from sets import Set as set, ImmutableSet as frozenset - - -try: - reversed = reversed -except NameError: - def reversed(l): - l = l[:] - l.reverse() - return l - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/constants.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/constants.py deleted file mode 100644 index bf4a47a0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/constants.py +++ /dev/null @@ -1,57 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -EOF = -1 - -## All tokens go to the parser (unless skip() is called in that rule) -# on a particular "channel". The parser tunes to a particular channel -# so that whitespace etc... can go to the parser on a "hidden" channel. -DEFAULT_CHANNEL = 0 - -## Anything on different channel than DEFAULT_CHANNEL is not parsed -# by parser. -HIDDEN_CHANNEL = 99 - -# Predefined token types -EOR_TOKEN_TYPE = 1 - -## -# imaginary tree navigation type; traverse "get child" link -DOWN = 2 -## -#imaginary tree navigation type; finish with a child list -UP = 3 - -MIN_TOKEN_TYPE = UP+1 - -INVALID_TOKEN_TYPE = 0 - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/debug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/debug.py deleted file mode 100644 index 6668fa58..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/debug.py +++ /dev/null @@ -1,1137 +0,0 @@ -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2009 Terence Parr -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -import socket -from antlr3 import Parser, TokenStream, RecognitionException, Token -from antlr3.tree import CommonTreeAdaptor, TreeAdaptor, Tree - -class DebugParser(Parser): - def __init__(self, stream, state=None, dbg=None, *args, **kwargs): - # wrap token stream in DebugTokenStream (unless user already did so). - if not isinstance(stream, DebugTokenStream): - stream = DebugTokenStream(stream, dbg) - - super(DebugParser, self).__init__(stream, state, *args, **kwargs) - - # Who to notify when events in the parser occur. - self._dbg = None - - self.setDebugListener(dbg) - - - def setDebugListener(self, dbg): - """Provide a new debug event listener for this parser. Notify the - input stream too that it should send events to this listener. - """ - - if hasattr(self.input, 'dbg'): - self.input.dbg = dbg - - self._dbg = dbg - - def getDebugListener(self): - return self._dbg - - dbg = property(getDebugListener, setDebugListener) - - - def beginResync(self): - self._dbg.beginResync() - - - def endResync(self): - self._dbg.endResync() - - - def beginBacktrack(self, level): - self._dbg.beginBacktrack(level) - - - def endBacktrack(self, level, successful): - self._dbg.endBacktrack(level,successful) - - - def reportError(self, exc): - Parser.reportError(self, exc) - - if isinstance(exc, RecognitionException): - self._dbg.recognitionException(exc) - - -class DebugTokenStream(TokenStream): - def __init__(self, input, dbg=None): - self.input = input - self.initialStreamState = True - # Track the last mark() call result value for use in rewind(). - self.lastMarker = None - - self._dbg = None - self.setDebugListener(dbg) - - # force TokenStream to get at least first valid token - # so we know if there are any hidden tokens first in the stream - self.input.LT(1) - - - def getDebugListener(self): - return self._dbg - - def setDebugListener(self, dbg): - self._dbg = dbg - - dbg = property(getDebugListener, setDebugListener) - - - def consume(self): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - a = self.input.index() - t = self.input.LT(1) - self.input.consume() - b = self.input.index() - self._dbg.consumeToken(t) - - if b > a+1: - # then we consumed more than one token; must be off channel tokens - for idx in range(a+1, b): - self._dbg.consumeHiddenToken(self.input.get(idx)); - - - def consumeInitialHiddenTokens(self): - """consume all initial off-channel tokens""" - - firstOnChannelTokenIndex = self.input.index() - for idx in range(firstOnChannelTokenIndex): - self._dbg.consumeHiddenToken(self.input.get(idx)) - - self.initialStreamState = False - - - def LT(self, i): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - t = self.input.LT(i) - self._dbg.LT(i, t) - return t - - - def LA(self, i): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - t = self.input.LT(i) - self._dbg.LT(i, t) - return t.type - - - def get(self, i): - return self.input.get(i) - - - def index(self): - return self.input.index() - - - def mark(self): - self.lastMarker = self.input.mark() - self._dbg.mark(self.lastMarker) - return self.lastMarker - - - def rewind(self, marker=None): - self._dbg.rewind(marker) - self.input.rewind(marker) - - - def release(self, marker): - pass - - - def seek(self, index): - # TODO: implement seek in dbg interface - # self._dbg.seek(index); - self.input.seek(index) - - - def size(self): - return self.input.size() - - - def getTokenSource(self): - return self.input.getTokenSource() - - - def getSourceName(self): - return self.getTokenSource().getSourceName() - - - def toString(self, start=None, stop=None): - return self.input.toString(start, stop) - - -class DebugTreeAdaptor(TreeAdaptor): - """A TreeAdaptor proxy that fires debugging events to a DebugEventListener - delegate and uses the TreeAdaptor delegate to do the actual work. All - AST events are triggered by this adaptor; no code gen changes are needed - in generated rules. Debugging events are triggered *after* invoking - tree adaptor routines. - - Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})" - cannot be tracked as they might not use the adaptor to create foo, bar. - The debug listener has to deal with tree node IDs for which it did - not see a createNode event. A single node is sufficient even - if it represents a whole tree. - """ - - def __init__(self, dbg, adaptor): - self.dbg = dbg - self.adaptor = adaptor - - - def createWithPayload(self, payload): - if payload.getTokenIndex() < 0: - # could be token conjured up during error recovery - return self.createFromType(payload.getType(), payload.getText()) - - node = self.adaptor.createWithPayload(payload) - self.dbg.createNode(node, payload) - return node - - def createFromToken(self, tokenType, fromToken, text=None): - node = self.adaptor.createFromToken(tokenType, fromToken, text) - self.dbg.createNode(node) - return node - - def createFromType(self, tokenType, text): - node = self.adaptor.createFromType(tokenType, text) - self.dbg.createNode(node) - return node - - - def errorNode(self, input, start, stop, exc): - node = selfadaptor.errorNode(input, start, stop, exc) - if node is not None: - dbg.errorNode(node) - - return node - - - def dupTree(self, tree): - t = self.adaptor.dupTree(tree) - # walk the tree and emit create and add child events - # to simulate what dupTree has done. dupTree does not call this debug - # adapter so I must simulate. - self.simulateTreeConstruction(t) - return t - - - def simulateTreeConstruction(self, t): - """^(A B C): emit create A, create B, add child, ...""" - self.dbg.createNode(t) - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self.simulateTreeConstruction(child) - self.dbg.addChild(t, child) - - - def dupNode(self, treeNode): - d = self.adaptor.dupNode(treeNode) - self.dbg.createNode(d) - return d - - - def nil(self): - node = self.adaptor.nil() - self.dbg.nilNode(node) - return node - - - def isNil(self, tree): - return self.adaptor.isNil(tree) - - - def addChild(self, t, child): - if isinstance(child, Token): - n = self.createWithPayload(child) - self.addChild(t, n) - - else: - if t is None or child is None: - return - - self.adaptor.addChild(t, child) - self.dbg.addChild(t, child) - - def becomeRoot(self, newRoot, oldRoot): - if isinstance(newRoot, Token): - n = self.createWithPayload(newRoot) - self.adaptor.becomeRoot(n, oldRoot) - else: - n = self.adaptor.becomeRoot(newRoot, oldRoot) - - self.dbg.becomeRoot(newRoot, oldRoot) - return n - - - def rulePostProcessing(self, root): - return self.adaptor.rulePostProcessing(root) - - - def getType(self, t): - return self.adaptor.getType(t) - - - def setType(self, t, type): - self.adaptor.setType(t, type) - - - def getText(self, t): - return self.adaptor.getText(t) - - - def setText(self, t, text): - self.adaptor.setText(t, text) - - - def getToken(self, t): - return self.adaptor.getToken(t) - - - def setTokenBoundaries(self, t, startToken, stopToken): - self.adaptor.setTokenBoundaries(t, startToken, stopToken) - if t is not None and startToken is not None and stopToken is not None: - self.dbg.setTokenBoundaries( - t, startToken.getTokenIndex(), - stopToken.getTokenIndex()) - - - def getTokenStartIndex(self, t): - return self.adaptor.getTokenStartIndex(t) - - - def getTokenStopIndex(self, t): - return self.adaptor.getTokenStopIndex(t) - - - def getChild(self, t, i): - return self.adaptor.getChild(t, i) - - - def setChild(self, t, i, child): - self.adaptor.setChild(t, i, child) - - - def deleteChild(self, t, i): - return self.adaptor.deleteChild(t, i) - - - def getChildCount(self, t): - return self.adaptor.getChildCount(t) - - - def getUniqueID(self, node): - return self.adaptor.getUniqueID(node) - - - def getParent(self, t): - return self.adaptor.getParent(t) - - - def getChildIndex(self, t): - return self.adaptor.getChildIndex(t) - - - def setParent(self, t, parent): - self.adaptor.setParent(t, parent) - - - def setChildIndex(self, t, index): - self.adaptor.setChildIndex(t, index) - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - self.adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t) - - - ## support - - def getDebugListener(self): - return dbg - - def setDebugListener(self, dbg): - self.dbg = dbg - - - def getTreeAdaptor(self): - return self.adaptor - - - -class DebugEventListener(object): - """All debugging events that a recognizer can trigger. - - I did not create a separate AST debugging interface as it would create - lots of extra classes and DebugParser has a dbg var defined, which makes - it hard to change to ASTDebugEventListener. I looked hard at this issue - and it is easier to understand as one monolithic event interface for all - possible events. Hopefully, adding ST debugging stuff won't be bad. Leave - for future. 4/26/2006. - """ - - # Moved to version 2 for v3.1: added grammar name to enter/exit Rule - PROTOCOL_VERSION = "2" - - def enterRule(self, grammarFileName, ruleName): - """The parser has just entered a rule. No decision has been made about - which alt is predicted. This is fired AFTER init actions have been - executed. Attributes are defined and available etc... - The grammarFileName allows composite grammars to jump around among - multiple grammar files. - """ - - pass - - - def enterAlt(self, alt): - """Because rules can have lots of alternatives, it is very useful to - know which alt you are entering. This is 1..n for n alts. - """ - pass - - - def exitRule(self, grammarFileName, ruleName): - """This is the last thing executed before leaving a rule. It is - executed even if an exception is thrown. This is triggered after - error reporting and recovery have occurred (unless the exception is - not caught in this rule). This implies an "exitAlt" event. - The grammarFileName allows composite grammars to jump around among - multiple grammar files. - """ - pass - - - def enterSubRule(self, decisionNumber): - """Track entry into any (...) subrule other EBNF construct""" - pass - - - def exitSubRule(self, decisionNumber): - pass - - - def enterDecision(self, decisionNumber, couldBacktrack): - """Every decision, fixed k or arbitrary, has an enter/exit event - so that a GUI can easily track what LT/consume events are - associated with prediction. You will see a single enter/exit - subrule but multiple enter/exit decision events, one for each - loop iteration. - """ - pass - - - def exitDecision(self, decisionNumber): - pass - - - def consumeToken(self, t): - """An input token was consumed; matched by any kind of element. - Trigger after the token was matched by things like match(), matchAny(). - """ - pass - - - def consumeHiddenToken(self, t): - """An off-channel input token was consumed. - Trigger after the token was matched by things like match(), matchAny(). - (unless of course the hidden token is first stuff in the input stream). - """ - pass - - - def LT(self, i, t): - """Somebody (anybody) looked ahead. Note that this actually gets - triggered by both LA and LT calls. The debugger will want to know - which Token object was examined. Like consumeToken, this indicates - what token was seen at that depth. A remote debugger cannot look - ahead into a file it doesn't have so LT events must pass the token - even if the info is redundant. - """ - pass - - - def mark(self, marker): - """The parser is going to look arbitrarily ahead; mark this location, - the token stream's marker is sent in case you need it. - """ - pass - - - def rewind(self, marker=None): - """After an arbitrairly long lookahead as with a cyclic DFA (or with - any backtrack), this informs the debugger that stream should be - rewound to the position associated with marker. - - """ - pass - - - def beginBacktrack(self, level): - pass - - - def endBacktrack(self, level, successful): - pass - - - def location(self, line, pos): - """To watch a parser move through the grammar, the parser needs to - inform the debugger what line/charPos it is passing in the grammar. - For now, this does not know how to switch from one grammar to the - other and back for island grammars etc... - - This should also allow breakpoints because the debugger can stop - the parser whenever it hits this line/pos. - """ - pass - - - def recognitionException(self, e): - """A recognition exception occurred such as NoViableAltException. I made - this a generic event so that I can alter the exception hierachy later - without having to alter all the debug objects. - - Upon error, the stack of enter rule/subrule must be properly unwound. - If no viable alt occurs it is within an enter/exit decision, which - also must be rewound. Even the rewind for each mark must be unwount. - In the Java target this is pretty easy using try/finally, if a bit - ugly in the generated code. The rewind is generated in DFA.predict() - actually so no code needs to be generated for that. For languages - w/o this "finally" feature (C++?), the target implementor will have - to build an event stack or something. - - Across a socket for remote debugging, only the RecognitionException - data fields are transmitted. The token object or whatever that - caused the problem was the last object referenced by LT. The - immediately preceding LT event should hold the unexpected Token or - char. - - Here is a sample event trace for grammar: - - b : C ({;}A|B) // {;} is there to prevent A|B becoming a set - | D - ; - - The sequence for this rule (with no viable alt in the subrule) for - input 'c c' (there are 3 tokens) is: - - commence - LT(1) - enterRule b - location 7 1 - enter decision 3 - LT(1) - exit decision 3 - enterAlt1 - location 7 5 - LT(1) - consumeToken [c/<4>,1:0] - location 7 7 - enterSubRule 2 - enter decision 2 - LT(1) - LT(1) - recognitionException NoViableAltException 2 1 2 - exit decision 2 - exitSubRule 2 - beginResync - LT(1) - consumeToken [c/<4>,1:1] - LT(1) - endResync - LT(-1) - exitRule b - terminate - """ - pass - - - def beginResync(self): - """Indicates the recognizer is about to consume tokens to resynchronize - the parser. Any consume events from here until the recovered event - are not part of the parse--they are dead tokens. - """ - pass - - - def endResync(self): - """Indicates that the recognizer has finished consuming tokens in order - to resychronize. There may be multiple beginResync/endResync pairs - before the recognizer comes out of errorRecovery mode (in which - multiple errors are suppressed). This will be useful - in a gui where you want to probably grey out tokens that are consumed - but not matched to anything in grammar. Anything between - a beginResync/endResync pair was tossed out by the parser. - """ - pass - - - def semanticPredicate(self, result, predicate): - """A semantic predicate was evaluate with this result and action text""" - pass - - - def commence(self): - """Announce that parsing has begun. Not technically useful except for - sending events over a socket. A GUI for example will launch a thread - to connect and communicate with a remote parser. The thread will want - to notify the GUI when a connection is made. ANTLR parsers - trigger this upon entry to the first rule (the ruleLevel is used to - figure this out). - """ - pass - - - def terminate(self): - """Parsing is over; successfully or not. Mostly useful for telling - remote debugging listeners that it's time to quit. When the rule - invocation level goes to zero at the end of a rule, we are done - parsing. - """ - pass - - - ## T r e e P a r s i n g - - def consumeNode(self, t): - """Input for a tree parser is an AST, but we know nothing for sure - about a node except its type and text (obtained from the adaptor). - This is the analog of the consumeToken method. Again, the ID is - the hashCode usually of the node so it only works if hashCode is - not implemented. If the type is UP or DOWN, then - the ID is not really meaningful as it's fixed--there is - just one UP node and one DOWN navigation node. - """ - pass - - - def LT(self, i, t): - """The tree parser lookedahead. If the type is UP or DOWN, - then the ID is not really meaningful as it's fixed--there is - just one UP node and one DOWN navigation node. - """ - pass - - - - ## A S T E v e n t s - - def nilNode(self, t): - """A nil was created (even nil nodes have a unique ID... - they are not "null" per se). As of 4/28/2006, this - seems to be uniquely triggered when starting a new subtree - such as when entering a subrule in automatic mode and when - building a tree in rewrite mode. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID is set. - """ - pass - - - def errorNode(self, t): - """Upon syntax error, recognizers bracket the error with an error node - if they are building ASTs. - """ - pass - - - def createNode(self, node, token=None): - """Announce a new node built from token elements such as type etc... - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID, type, text are - set. - """ - pass - - - def becomeRoot(self, newRoot, oldRoot): - """Make a node the new root of an existing root. - - Note: the newRootID parameter is possibly different - than the TreeAdaptor.becomeRoot() newRoot parameter. - In our case, it will always be the result of calling - TreeAdaptor.becomeRoot() and not root_n or whatever. - - The listener should assume that this event occurs - only when the current subrule (or rule) subtree is - being reset to newRootID. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only IDs are set. - - @see antlr3.tree.TreeAdaptor.becomeRoot() - """ - pass - - - def addChild(self, root, child): - """Make childID a child of rootID. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only IDs are set. - - @see antlr3.tree.TreeAdaptor.addChild() - """ - pass - - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - """Set the token start/stop token index for a subtree root or node. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID is set. - """ - pass - - -class BlankDebugEventListener(DebugEventListener): - """A blank listener that does nothing; useful for real classes so - they don't have to have lots of blank methods and are less - sensitive to updates to debug interface. - - Note: this class is identical to DebugEventListener and exists purely - for compatibility with Java. - """ - pass - - -class TraceDebugEventListener(DebugEventListener): - """A listener that simply records text representations of the events. - - Useful for debugging the debugging facility ;) - - Subclasses can override the record() method (which defaults to printing to - stdout) to record the events in a different way. - """ - - def __init__(self, adaptor=None): - super(TraceDebugEventListener, self).__init__() - - if adaptor is None: - adaptor = CommonTreeAdaptor() - self.adaptor = adaptor - - def record(self, event): - sys.stdout.write(event + '\n') - - def enterRule(self, grammarFileName, ruleName): - self.record("enterRule "+ruleName) - - def exitRule(self, grammarFileName, ruleName): - self.record("exitRule "+ruleName) - - def enterSubRule(self, decisionNumber): - self.record("enterSubRule") - - def exitSubRule(self, decisionNumber): - self.record("exitSubRule") - - def location(self, line, pos): - self.record("location %s:%s" % (line, pos)) - - ## Tree parsing stuff - - def consumeNode(self, t): - self.record("consumeNode %s %s %s" % ( - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - def LT(self, i, t): - self.record("LT %s %s %s %s" % ( - i, - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - - ## AST stuff - def nilNode(self, t): - self.record("nilNode %s" % self.adaptor.getUniqueID(t)) - - def createNode(self, t, token=None): - if token is None: - self.record("create %s: %s, %s" % ( - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - else: - self.record("create %s: %s" % ( - self.adaptor.getUniqueID(t), - token.getTokenIndex())) - - def becomeRoot(self, newRoot, oldRoot): - self.record("becomeRoot %s, %s" % ( - self.adaptor.getUniqueID(newRoot), - self.adaptor.getUniqueID(oldRoot))) - - def addChild(self, root, child): - self.record("addChild %s, %s" % ( - self.adaptor.getUniqueID(root), - self.adaptor.getUniqueID(child))) - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - self.record("setTokenBoundaries %s, %s, %s" % ( - self.adaptor.getUniqueID(t), - tokenStartIndex, tokenStopIndex)) - - -class RecordDebugEventListener(TraceDebugEventListener): - """A listener that records events as strings in an array.""" - - def __init__(self, adaptor=None): - super(RecordDebugEventListener, self).__init__(adaptor) - - self.events = [] - - def record(self, event): - self.events.append(event) - - -class DebugEventSocketProxy(DebugEventListener): - """A proxy debug event listener that forwards events over a socket to - a debugger (or any other listener) using a simple text-based protocol; - one event per line. ANTLRWorks listens on server socket with a - RemoteDebugEventSocketListener instance. These two objects must therefore - be kept in sync. New events must be handled on both sides of socket. - """ - - DEFAULT_DEBUGGER_PORT = 49100 - - def __init__(self, recognizer, adaptor=None, port=None, - debug=None): - super(DebugEventSocketProxy, self).__init__() - - self.grammarFileName = recognizer.getGrammarFileName() - - # Almost certainly the recognizer will have adaptor set, but - # we don't know how to cast it (Parser or TreeParser) to get - # the adaptor field. Must be set with a constructor. :( - self.adaptor = adaptor - - self.port = port or self.DEFAULT_DEBUGGER_PORT - - self.debug = debug - - self.socket = None - self.connection = None - self.input = None - self.output = None - - - def log(self, msg): - if self.debug is not None: - self.debug.write(msg + '\n') - - - def handshake(self): - if self.socket is None: - # create listening socket - self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.socket.bind(('', self.port)) - self.socket.listen(1) - self.log("Waiting for incoming connection on port %d" % self.port) - - # wait for an incoming connection - self.connection, addr = self.socket.accept() - self.log("Accepted connection from %s:%d" % addr) - - self.connection.setblocking(1) - self.connection.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) - - # FIXME(pink): wrap into utf8 encoding stream - self.output = self.connection.makefile('w', 0) - self.input = self.connection.makefile('r', 0) - - self.write("ANTLR %s" % self.PROTOCOL_VERSION) - self.write("grammar \"%s" % self.grammarFileName) - self.ack() - - - def write(self, msg): - self.log("> %s" % msg) - self.output.write("%s\n" % msg) - self.output.flush() - - - def ack(self): - t = self.input.readline() - self.log("< %s" % t.rstrip()) - - - def transmit(self, event): - self.write(event); - self.ack(); - - - def commence(self): - # don't bother sending event; listener will trigger upon connection - pass - - - def terminate(self): - self.transmit("terminate") - self.output.close() - self.input.close() - self.connection.close() - self.socket.close() - - - def enterRule(self, grammarFileName, ruleName): - self.transmit("enterRule\t%s\t%s" % (grammarFileName, ruleName)) - - - def enterAlt(self, alt): - self.transmit("enterAlt\t%d" % alt) - - - def exitRule(self, grammarFileName, ruleName): - self.transmit("exitRule\t%s\t%s" % (grammarFileName, ruleName)) - - - def enterSubRule(self, decisionNumber): - self.transmit("enterSubRule\t%d" % decisionNumber) - - - def exitSubRule(self, decisionNumber): - self.transmit("exitSubRule\t%d" % decisionNumber) - - - def enterDecision(self, decisionNumber, couldBacktrack): - self.transmit( - "enterDecision\t%d\t%d" % (decisionNumber, couldBacktrack)) - - - def exitDecision(self, decisionNumber): - self.transmit("exitDecision\t%d" % decisionNumber) - - - def consumeToken(self, t): - self.transmit("consumeToken\t%s" % self.serializeToken(t)) - - - def consumeHiddenToken(self, t): - self.transmit("consumeHiddenToken\t%s" % self.serializeToken(t)) - - - def LT(self, i, o): - if isinstance(o, Tree): - return self.LT_tree(i, o) - return self.LT_token(i, o) - - - def LT_token(self, i, t): - if t is not None: - self.transmit("LT\t%d\t%s" % (i, self.serializeToken(t))) - - - def mark(self, i): - self.transmit("mark\t%d" % i) - - - def rewind(self, i=None): - if i is not None: - self.transmit("rewind\t%d" % i) - else: - self.transmit("rewind") - - - def beginBacktrack(self, level): - self.transmit("beginBacktrack\t%d" % level) - - - def endBacktrack(self, level, successful): - self.transmit("endBacktrack\t%d\t%s" % ( - level, ['0', '1'][bool(successful)])) - - - def location(self, line, pos): - self.transmit("location\t%d\t%d" % (line, pos)) - - - def recognitionException(self, exc): - self.transmit('\t'.join([ - "exception", - exc.__class__.__name__, - str(int(exc.index)), - str(int(exc.line)), - str(int(exc.charPositionInLine))])) - - - def beginResync(self): - self.transmit("beginResync") - - - def endResync(self): - self.transmit("endResync") - - - def semanticPredicate(self, result, predicate): - self.transmit('\t'.join([ - "semanticPredicate", - str(int(result)), - self.escapeNewlines(predicate)])) - - ## A S T P a r s i n g E v e n t s - - def consumeNode(self, t): - FIXME(31) -# StringBuffer buf = new StringBuffer(50); -# buf.append("consumeNode"); -# serializeNode(buf, t); -# transmit(buf.toString()); - - - def LT_tree(self, i, t): - FIXME(34) -# int ID = adaptor.getUniqueID(t); -# String text = adaptor.getText(t); -# int type = adaptor.getType(t); -# StringBuffer buf = new StringBuffer(50); -# buf.append("LN\t"); // lookahead node; distinguish from LT in protocol -# buf.append(i); -# serializeNode(buf, t); -# transmit(buf.toString()); - - - def serializeNode(self, buf, t): - FIXME(33) -# int ID = adaptor.getUniqueID(t); -# String text = adaptor.getText(t); -# int type = adaptor.getType(t); -# buf.append("\t"); -# buf.append(ID); -# buf.append("\t"); -# buf.append(type); -# Token token = adaptor.getToken(t); -# int line = -1; -# int pos = -1; -# if ( token!=null ) { -# line = token.getLine(); -# pos = token.getCharPositionInLine(); -# } -# buf.append("\t"); -# buf.append(line); -# buf.append("\t"); -# buf.append(pos); -# int tokenIndex = adaptor.getTokenStartIndex(t); -# buf.append("\t"); -# buf.append(tokenIndex); -# serializeText(buf, text); - - - ## A S T E v e n t s - - def nilNode(self, t): - self.transmit("nilNode\t%d" % self.adaptor.getUniqueID(t)) - - - def errorNode(self, t): - self.transmit("errorNode\t%d\t%d\t\"%s" % ( - self.adaptor.getUniqueID(t), - Token.INVALID_TOKEN_TYPE, - self.escapeNewlines(t.toString()))) - - - - def createNode(self, node, token=None): - if token is not None: - self.transmit("createNode\t%d\t%d" % ( - self.adaptor.getUniqueID(node), - token.getTokenIndex())) - - else: - self.transmit("createNodeFromTokenElements\t%d\t%d\t\"%s" % ( - self.adaptor.getUniqueID(node), - self.adaptor.getType(node), - self.adaptor.getText(node))) - - - def becomeRoot(self, newRoot, oldRoot): - self.transmit("becomeRoot\t%d\t%d" % ( - self.adaptor.getUniqueID(newRoot), - self.adaptor.getUniqueID(oldRoot))) - - - def addChild(self, root, child): - self.transmit("addChild\t%d\t%d" % ( - self.adaptor.getUniqueID(root), - self.adaptor.getUniqueID(child))) - - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - self.transmit("setTokenBoundaries\t%d\t%d\t%d" % ( - self.adaptor.getUniqueID(t), - tokenStartIndex, tokenStopIndex)) - - - - ## support - - def setTreeAdaptor(self, adaptor): - self.adaptor = adaptor - - def getTreeAdaptor(self): - return self.adaptor - - - def serializeToken(self, t): - buf = [str(int(t.getTokenIndex())), - str(int(t.getType())), - str(int(t.getChannel())), - str(int(t.getLine() or 0)), - str(int(t.getCharPositionInLine() or 0)), - '\"' + self.escapeNewlines(t.getText())] - return '\t'.join(buf) - - - def escapeNewlines(self, txt): - if txt is None: - return '' - - txt = txt.replace("%","%25") # escape all escape char ;) - txt = txt.replace("\n","%0A") # escape \n - txt = txt.replace("\r","%0D") # escape \r - return txt diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dfa.py deleted file mode 100644 index ff93761a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dfa.py +++ /dev/null @@ -1,213 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licensc] - -from antlr3.constants import EOF -from antlr3.exceptions import NoViableAltException, BacktrackingFailed - - -class DFA(object): - """@brief A DFA implemented as a set of transition tables. - - Any state that has a semantic predicate edge is special; those states - are generated with if-then-else structures in a specialStateTransition() - which is generated by cyclicDFA template. - - """ - - def __init__( - self, - recognizer, decisionNumber, - eot, eof, min, max, accept, special, transition - ): - ## Which recognizer encloses this DFA? Needed to check backtracking - self.recognizer = recognizer - - self.decisionNumber = decisionNumber - self.eot = eot - self.eof = eof - self.min = min - self.max = max - self.accept = accept - self.special = special - self.transition = transition - - - def predict(self, input): - """ - From the input stream, predict what alternative will succeed - using this DFA (representing the covering regular approximation - to the underlying CFL). Return an alternative number 1..n. Throw - an exception upon error. - """ - mark = input.mark() - s = 0 # we always start at s0 - try: - for _ in xrange(50000): - #print "***Current state = %d" % s - - specialState = self.special[s] - if specialState >= 0: - #print "is special" - s = self.specialStateTransition(specialState, input) - if s == -1: - self.noViableAlt(s, input) - return 0 - input.consume() - continue - - if self.accept[s] >= 1: - #print "accept state for alt %d" % self.accept[s] - return self.accept[s] - - # look for a normal char transition - c = input.LA(1) - - #print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF') - #print "range = %d..%d" % (self.min[s], self.max[s]) - - if c >= self.min[s] and c <= self.max[s]: - # move to next state - snext = self.transition[s][c-self.min[s]] - #print "in range, next state = %d" % snext - - if snext < 0: - #print "not a normal transition" - # was in range but not a normal transition - # must check EOT, which is like the else clause. - # eot[s]>=0 indicates that an EOT edge goes to another - # state. - if self.eot[s] >= 0: # EOT Transition to accept state? - #print "EOT trans to accept state %d" % self.eot[s] - - s = self.eot[s] - input.consume() - # TODO: I had this as return accept[eot[s]] - # which assumed here that the EOT edge always - # went to an accept...faster to do this, but - # what about predicated edges coming from EOT - # target? - continue - - #print "no viable alt" - self.noViableAlt(s, input) - return 0 - - s = snext - input.consume() - continue - - if self.eot[s] >= 0: - #print "EOT to %d" % self.eot[s] - - s = self.eot[s] - input.consume() - continue - - # EOF Transition to accept state? - if c == EOF and self.eof[s] >= 0: - #print "EOF Transition to accept state %d" \ - # % self.accept[self.eof[s]] - return self.accept[self.eof[s]] - - # not in range and not EOF/EOT, must be invalid symbol - self.noViableAlt(s, input) - return 0 - - else: - raise RuntimeError("DFA bang!") - - finally: - input.rewind(mark) - - - def noViableAlt(self, s, input): - if self.recognizer._state.backtracking > 0: - raise BacktrackingFailed - - nvae = NoViableAltException( - self.getDescription(), - self.decisionNumber, - s, - input - ) - - self.error(nvae) - raise nvae - - - def error(self, nvae): - """A hook for debugging interface""" - pass - - - def specialStateTransition(self, s, input): - return -1 - - - def getDescription(self): - return "n/a" - - -## def specialTransition(self, state, symbol): -## return 0 - - - def unpack(cls, string): - """@brief Unpack the runlength encoded table data. - - Terence implemented packed table initializers, because Java has a - size restriction on .class files and the lookup tables can grow - pretty large. The generated JavaLexer.java of the Java.g example - would be about 15MB with uncompressed array initializers. - - Python does not have any size restrictions, but the compilation of - such large source files seems to be pretty memory hungry. The memory - consumption of the python process grew to >1.5GB when importing a - 15MB lexer, eating all my swap space and I was to impacient to see, - if it could finish at all. With packed initializers that are unpacked - at import time of the lexer module, everything works like a charm. - - """ - - ret = [] - for i in range(len(string) / 2): - (n, v) = ord(string[i*2]), ord(string[i*2+1]) - - # Is there a bitwise operation to do this? - if v == 0xFFFF: - v = -1 - - ret += [v] * n - - return ret - - unpack = classmethod(unpack) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dottreegen.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dottreegen.py deleted file mode 100644 index 41415b13..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/dottreegen.py +++ /dev/null @@ -1,210 +0,0 @@ -""" @package antlr3.dottreegenerator -@brief ANTLR3 runtime package, tree module - -This module contains all support classes for AST construction and tree parsers. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -# lot's of docstrings are missing, don't complain for now... -# pylint: disable-msg=C0111 - -from antlr3.tree import CommonTreeAdaptor -import stringtemplate3 - -class DOTTreeGenerator(object): - """ - A utility class to generate DOT diagrams (graphviz) from - arbitrary trees. You can pass in your own templates and - can pass in any kind of tree or use Tree interface method. - """ - - _treeST = stringtemplate3.StringTemplate( - template=( - "digraph {\n" + - " ordering=out;\n" + - " ranksep=.4;\n" + - " node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" + - " width=.25, height=.25];\n" + - " edge [arrowsize=.5]\n" + - " $nodes$\n" + - " $edges$\n" + - "}\n") - ) - - _nodeST = stringtemplate3.StringTemplate( - template="$name$ [label=\"$text$\"];\n" - ) - - _edgeST = stringtemplate3.StringTemplate( - template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n" - ) - - def __init__(self): - ## Track node to number mapping so we can get proper node name back - self.nodeToNumberMap = {} - - ## Track node number so we can get unique node names - self.nodeNumber = 0 - - - def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST): - if adaptor is None: - adaptor = CommonTreeAdaptor() - - treeST = treeST.getInstanceOf() - - self.nodeNumber = 0 - self.toDOTDefineNodes(tree, adaptor, treeST) - - self.nodeNumber = 0 - self.toDOTDefineEdges(tree, adaptor, treeST, edgeST) - return treeST - - - def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None): - if knownNodes is None: - knownNodes = set() - - if tree is None: - return - - n = adaptor.getChildCount(tree) - if n == 0: - # must have already dumped as child from previous - # invocation; do nothing - return - - # define parent node - number = self.getNodeNumber(tree) - if number not in knownNodes: - parentNodeST = self.getNodeST(adaptor, tree) - treeST.setAttribute("nodes", parentNodeST) - knownNodes.add(number) - - # for each child, do a " [label=text]" node def - for i in range(n): - child = adaptor.getChild(tree, i) - - number = self.getNodeNumber(child) - if number not in knownNodes: - nodeST = self.getNodeST(adaptor, child) - treeST.setAttribute("nodes", nodeST) - knownNodes.add(number) - - self.toDOTDefineNodes(child, adaptor, treeST, knownNodes) - - - def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST): - if tree is None: - return - - n = adaptor.getChildCount(tree) - if n == 0: - # must have already dumped as child from previous - # invocation; do nothing - return - - parentName = "n%d" % self.getNodeNumber(tree) - - # for each child, do a parent -> child edge using unique node names - parentText = adaptor.getText(tree) - for i in range(n): - child = adaptor.getChild(tree, i) - childText = adaptor.getText(child) - childName = "n%d" % self.getNodeNumber(child) - edgeST = edgeST.getInstanceOf() - edgeST.setAttribute("parent", parentName) - edgeST.setAttribute("child", childName) - edgeST.setAttribute("parentText", parentText) - edgeST.setAttribute("childText", childText) - treeST.setAttribute("edges", edgeST) - self.toDOTDefineEdges(child, adaptor, treeST, edgeST) - - - def getNodeST(self, adaptor, t): - text = adaptor.getText(t) - nodeST = self._nodeST.getInstanceOf() - uniqueName = "n%d" % self.getNodeNumber(t) - nodeST.setAttribute("name", uniqueName) - if text is not None: - text = text.replace('"', r'\"') - nodeST.setAttribute("text", text) - return nodeST - - - def getNodeNumber(self, t): - try: - return self.nodeToNumberMap[t] - except KeyError: - self.nodeToNumberMap[t] = self.nodeNumber - self.nodeNumber += 1 - return self.nodeNumber - 1 - - -def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST): - """ - Generate DOT (graphviz) for a whole tree not just a node. - For example, 3+4*5 should generate: - - digraph { - node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier", - width=.4, height=.2]; - edge [arrowsize=.7] - "+"->3 - "+"->"*" - "*"->4 - "*"->5 - } - - Return the ST not a string in case people want to alter. - - Takes a Tree interface object. - - Example of invokation: - - import antlr3 - import antlr3.extras - - input = antlr3.ANTLRInputStream(sys.stdin) - lex = TLexer(input) - tokens = antlr3.CommonTokenStream(lex) - parser = TParser(tokens) - tree = parser.e().tree - print tree.toStringTree() - st = antlr3.extras.toDOT(t) - print st - - """ - - gen = DOTTreeGenerator() - return gen.toDOT(tree, adaptor, treeST, edgeST) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/exceptions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/exceptions.py deleted file mode 100644 index 97b10743..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/exceptions.py +++ /dev/null @@ -1,364 +0,0 @@ -"""ANTLR3 exception hierarchy""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from antlr3.constants import INVALID_TOKEN_TYPE - - -class BacktrackingFailed(Exception): - """@brief Raised to signal failed backtrack attempt""" - - pass - - -class RecognitionException(Exception): - """@brief The root of the ANTLR exception hierarchy. - - To avoid English-only error messages and to generally make things - as flexible as possible, these exceptions are not created with strings, - but rather the information necessary to generate an error. Then - the various reporting methods in Parser and Lexer can be overridden - to generate a localized error message. For example, MismatchedToken - exceptions are built with the expected token type. - So, don't expect getMessage() to return anything. - - Note that as of Java 1.4, you can access the stack trace, which means - that you can compute the complete trace of rules from the start symbol. - This gives you considerable context information with which to generate - useful error messages. - - ANTLR generates code that throws exceptions upon recognition error and - also generates code to catch these exceptions in each rule. If you - want to quit upon first error, you can turn off the automatic error - handling mechanism using rulecatch action, but you still need to - override methods mismatch and recoverFromMismatchSet. - - In general, the recognition exceptions can track where in a grammar a - problem occurred and/or what was the expected input. While the parser - knows its state (such as current input symbol and line info) that - state can change before the exception is reported so current token index - is computed and stored at exception time. From this info, you can - perhaps print an entire line of input not just a single token, for example. - Better to just say the recognizer had a problem and then let the parser - figure out a fancy report. - - """ - - def __init__(self, input=None): - Exception.__init__(self) - - # What input stream did the error occur in? - self.input = None - - # What is index of token/char were we looking at when the error - # occurred? - self.index = None - - # The current Token when an error occurred. Since not all streams - # can retrieve the ith Token, we have to track the Token object. - # For parsers. Even when it's a tree parser, token might be set. - self.token = None - - # If this is a tree parser exception, node is set to the node with - # the problem. - self.node = None - - # The current char when an error occurred. For lexers. - self.c = None - - # Track the line at which the error occurred in case this is - # generated from a lexer. We need to track this since the - # unexpected char doesn't carry the line info. - self.line = None - - self.charPositionInLine = None - - # If you are parsing a tree node stream, you will encounter som - # imaginary nodes w/o line/col info. We now search backwards looking - # for most recent token with line/col info, but notify getErrorHeader() - # that info is approximate. - self.approximateLineInfo = False - - - if input is not None: - self.input = input - self.index = input.index() - - # late import to avoid cyclic dependencies - from antlr3.streams import TokenStream, CharStream - from antlr3.tree import TreeNodeStream - - if isinstance(self.input, TokenStream): - self.token = self.input.LT(1) - self.line = self.token.line - self.charPositionInLine = self.token.charPositionInLine - - if isinstance(self.input, TreeNodeStream): - self.extractInformationFromTreeNodeStream(self.input) - - else: - if isinstance(self.input, CharStream): - self.c = self.input.LT(1) - self.line = self.input.line - self.charPositionInLine = self.input.charPositionInLine - - else: - self.c = self.input.LA(1) - - def extractInformationFromTreeNodeStream(self, nodes): - from antlr3.tree import Tree, CommonTree - from antlr3.tokens import CommonToken - - self.node = nodes.LT(1) - adaptor = nodes.adaptor - payload = adaptor.getToken(self.node) - if payload is not None: - self.token = payload - if payload.line <= 0: - # imaginary node; no line/pos info; scan backwards - i = -1 - priorNode = nodes.LT(i) - while priorNode is not None: - priorPayload = adaptor.getToken(priorNode) - if priorPayload is not None and priorPayload.line > 0: - # we found the most recent real line / pos info - self.line = priorPayload.line - self.charPositionInLine = priorPayload.charPositionInLine - self.approximateLineInfo = True - break - - i -= 1 - priorNode = nodes.LT(i) - - else: # node created from real token - self.line = payload.line - self.charPositionInLine = payload.charPositionInLine - - elif isinstance(self.node, Tree): - self.line = self.node.line - self.charPositionInLine = self.node.charPositionInLine - if isinstance(self.node, CommonTree): - self.token = self.node.token - - else: - type = adaptor.getType(self.node) - text = adaptor.getText(self.node) - self.token = CommonToken(type=type, text=text) - - - def getUnexpectedType(self): - """Return the token type or char of the unexpected input element""" - - from antlr3.streams import TokenStream - from antlr3.tree import TreeNodeStream - - if isinstance(self.input, TokenStream): - return self.token.type - - elif isinstance(self.input, TreeNodeStream): - adaptor = self.input.treeAdaptor - return adaptor.getType(self.node) - - else: - return self.c - - unexpectedType = property(getUnexpectedType) - - -class MismatchedTokenException(RecognitionException): - """@brief A mismatched char or Token or tree node.""" - - def __init__(self, expecting, input): - RecognitionException.__init__(self, input) - self.expecting = expecting - - - def __str__(self): - #return "MismatchedTokenException("+self.expecting+")" - return "MismatchedTokenException(%r!=%r)" % ( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class UnwantedTokenException(MismatchedTokenException): - """An extra token while parsing a TokenStream""" - - def getUnexpectedToken(self): - return self.token - - - def __str__(self): - exp = ", expected %s" % self.expecting - if self.expecting == INVALID_TOKEN_TYPE: - exp = "" - - if self.token is None: - return "UnwantedTokenException(found=%s%s)" % (None, exp) - - return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp) - __repr__ = __str__ - - -class MissingTokenException(MismatchedTokenException): - """ - We were expecting a token but it's not found. The current token - is actually what we wanted next. - """ - - def __init__(self, expecting, input, inserted): - MismatchedTokenException.__init__(self, expecting, input) - - self.inserted = inserted - - - def getMissingType(self): - return self.expecting - - - def __str__(self): - if self.inserted is not None and self.token is not None: - return "MissingTokenException(inserted %r at %r)" % ( - self.inserted, self.token.text) - - if self.token is not None: - return "MissingTokenException(at %r)" % self.token.text - - return "MissingTokenException" - __repr__ = __str__ - - -class MismatchedRangeException(RecognitionException): - """@brief The next token does not match a range of expected types.""" - - def __init__(self, a, b, input): - RecognitionException.__init__(self, input) - - self.a = a - self.b = b - - - def __str__(self): - return "MismatchedRangeException(%r not in [%r..%r])" % ( - self.getUnexpectedType(), self.a, self.b - ) - __repr__ = __str__ - - -class MismatchedSetException(RecognitionException): - """@brief The next token does not match a set of expected types.""" - - def __init__(self, expecting, input): - RecognitionException.__init__(self, input) - - self.expecting = expecting - - - def __str__(self): - return "MismatchedSetException(%r not in %r)" % ( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class MismatchedNotSetException(MismatchedSetException): - """@brief Used for remote debugger deserialization""" - - def __str__(self): - return "MismatchedNotSetException(%r!=%r)" % ( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class NoViableAltException(RecognitionException): - """@brief Unable to decide which alternative to choose.""" - - def __init__( - self, grammarDecisionDescription, decisionNumber, stateNumber, input - ): - RecognitionException.__init__(self, input) - - self.grammarDecisionDescription = grammarDecisionDescription - self.decisionNumber = decisionNumber - self.stateNumber = stateNumber - - - def __str__(self): - return "NoViableAltException(%r!=[%r])" % ( - self.unexpectedType, self.grammarDecisionDescription - ) - __repr__ = __str__ - - -class EarlyExitException(RecognitionException): - """@brief The recognizer did not match anything for a (..)+ loop.""" - - def __init__(self, decisionNumber, input): - RecognitionException.__init__(self, input) - - self.decisionNumber = decisionNumber - - -class FailedPredicateException(RecognitionException): - """@brief A semantic predicate failed during validation. - - Validation of predicates - occurs when normally parsing the alternative just like matching a token. - Disambiguating predicate evaluation occurs when we hoist a predicate into - a prediction decision. - """ - - def __init__(self, input, ruleName, predicateText): - RecognitionException.__init__(self, input) - - self.ruleName = ruleName - self.predicateText = predicateText - - - def __str__(self): - return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)" - __repr__ = __str__ - - -class MismatchedTreeNodeException(RecognitionException): - """@brief The next tree mode does not match the expected type.""" - - def __init__(self, expecting, input): - RecognitionException.__init__(self, input) - - self.expecting = expecting - - def __str__(self): - return "MismatchedTreeNodeException(%r!=%r)" % ( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/extras.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/extras.py deleted file mode 100644 index 9155cda9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/extras.py +++ /dev/null @@ -1,47 +0,0 @@ -""" @package antlr3.dottreegenerator -@brief ANTLR3 runtime package, tree module - -This module contains all support classes for AST construction and tree parsers. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -# lot's of docstrings are missing, don't complain for now... -# pylint: disable-msg=C0111 - -from treewizard import TreeWizard - -try: - from antlr3.dottreegen import toDOT -except ImportError, exc: - def toDOT(*args, **kwargs): - raise exc diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/main.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/main.py deleted file mode 100644 index 9dcfa369..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/main.py +++ /dev/null @@ -1,305 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - - -import sys -import optparse - -import antlr3 - - -class _Main(object): - def __init__(self): - self.stdin = sys.stdin - self.stdout = sys.stdout - self.stderr = sys.stderr - - - def parseOptions(self, argv): - optParser = optparse.OptionParser() - optParser.add_option( - "--encoding", - action="store", - type="string", - dest="encoding" - ) - optParser.add_option( - "--input", - action="store", - type="string", - dest="input" - ) - optParser.add_option( - "--interactive", "-i", - action="store_true", - dest="interactive" - ) - optParser.add_option( - "--no-output", - action="store_true", - dest="no_output" - ) - optParser.add_option( - "--profile", - action="store_true", - dest="profile" - ) - optParser.add_option( - "--hotshot", - action="store_true", - dest="hotshot" - ) - optParser.add_option( - "--port", - type="int", - dest="port", - default=None - ) - optParser.add_option( - "--debug-socket", - action='store_true', - dest="debug_socket", - default=None - ) - - self.setupOptions(optParser) - - return optParser.parse_args(argv[1:]) - - - def setupOptions(self, optParser): - pass - - - def execute(self, argv): - options, args = self.parseOptions(argv) - - self.setUp(options) - - if options.interactive: - while True: - try: - input = input(">>> ") - except (EOFError, KeyboardInterrupt): - self.stdout.write("\nBye.\n") - break - - inStream = antlr3.ANTLRStringStream(input) - self.parseStream(options, inStream) - - else: - if options.input is not None: - inStream = antlr3.ANTLRStringStream(options.input) - - elif len(args) == 1 and args[0] != '-': - inStream = antlr3.ANTLRFileStream( - args[0], encoding=options.encoding - ) - - else: - inStream = antlr3.ANTLRInputStream( - self.stdin, encoding=options.encoding - ) - - if options.profile: - try: - import cProfile as profile - except ImportError: - import profile - - profile.runctx( - 'self.parseStream(options, inStream)', - globals(), - locals(), - 'profile.dat' - ) - - import pstats - stats = pstats.Stats('profile.dat') - stats.strip_dirs() - stats.sort_stats('time') - stats.print_stats(100) - - elif options.hotshot: - import hotshot - - profiler = hotshot.Profile('hotshot.dat') - profiler.runctx( - 'self.parseStream(options, inStream)', - globals(), - locals() - ) - - else: - self.parseStream(options, inStream) - - - def setUp(self, options): - pass - - - def parseStream(self, options, inStream): - raise NotImplementedError - - - def write(self, options, text): - if not options.no_output: - self.stdout.write(text) - - - def writeln(self, options, text): - self.write(options, text + '\n') - - -class LexerMain(_Main): - def __init__(self, lexerClass): - _Main.__init__(self) - - self.lexerClass = lexerClass - - - def parseStream(self, options, inStream): - lexer = self.lexerClass(inStream) - for token in lexer: - self.writeln(options, str(token)) - - -class ParserMain(_Main): - def __init__(self, lexerClassName, parserClass): - _Main.__init__(self) - - self.lexerClassName = lexerClassName - self.lexerClass = None - self.parserClass = parserClass - - - def setupOptions(self, optParser): - optParser.add_option( - "--lexer", - action="store", - type="string", - dest="lexerClass", - default=self.lexerClassName - ) - optParser.add_option( - "--rule", - action="store", - type="string", - dest="parserRule" - ) - - - def setUp(self, options): - lexerMod = __import__(options.lexerClass) - self.lexerClass = getattr(lexerMod, options.lexerClass) - - - def parseStream(self, options, inStream): - kwargs = {} - if options.port is not None: - kwargs['port'] = options.port - if options.debug_socket is not None: - kwargs['debug_socket'] = sys.stderr - - lexer = self.lexerClass(inStream) - tokenStream = antlr3.CommonTokenStream(lexer) - parser = self.parserClass(tokenStream, **kwargs) - result = getattr(parser, options.parserRule)() - if result is not None: - if hasattr(result, 'tree') and result.tree is not None: - self.writeln(options, result.tree.toStringTree()) - else: - self.writeln(options, repr(result)) - - -class WalkerMain(_Main): - def __init__(self, walkerClass): - _Main.__init__(self) - - self.lexerClass = None - self.parserClass = None - self.walkerClass = walkerClass - - - def setupOptions(self, optParser): - optParser.add_option( - "--lexer", - action="store", - type="string", - dest="lexerClass", - default=None - ) - optParser.add_option( - "--parser", - action="store", - type="string", - dest="parserClass", - default=None - ) - optParser.add_option( - "--parser-rule", - action="store", - type="string", - dest="parserRule", - default=None - ) - optParser.add_option( - "--rule", - action="store", - type="string", - dest="walkerRule" - ) - - - def setUp(self, options): - lexerMod = __import__(options.lexerClass) - self.lexerClass = getattr(lexerMod, options.lexerClass) - parserMod = __import__(options.parserClass) - self.parserClass = getattr(parserMod, options.parserClass) - - - def parseStream(self, options, inStream): - lexer = self.lexerClass(inStream) - tokenStream = antlr3.CommonTokenStream(lexer) - parser = self.parserClass(tokenStream) - result = getattr(parser, options.parserRule)() - if result is not None: - assert hasattr(result, 'tree'), "Parser did not return an AST" - nodeStream = antlr3.tree.CommonTreeNodeStream(result.tree) - nodeStream.setTokenStream(tokenStream) - walker = self.walkerClass(nodeStream) - result = getattr(walker, options.walkerRule)() - if result is not None: - if hasattr(result, 'tree'): - self.writeln(options, result.tree.toStringTree()) - else: - self.writeln(options, repr(result)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/recognizers.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/recognizers.py deleted file mode 100644 index d48280a5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/recognizers.py +++ /dev/null @@ -1,1485 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -import sys -import inspect - -from antlr3 import compatible_api_versions -from antlr3.constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \ - EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE -from antlr3.exceptions import RecognitionException, MismatchedTokenException, \ - MismatchedRangeException, MismatchedTreeNodeException, \ - NoViableAltException, EarlyExitException, MismatchedSetException, \ - MismatchedNotSetException, FailedPredicateException, \ - BacktrackingFailed, UnwantedTokenException, MissingTokenException -from antlr3.tokens import CommonToken, SKIP_TOKEN -from antlr3.compat import set, frozenset, reversed - - -class RecognizerSharedState(object): - """ - The set of fields needed by an abstract recognizer to recognize input - and recover from errors etc... As a separate state object, it can be - shared among multiple grammars; e.g., when one grammar imports another. - - These fields are publically visible but the actual state pointer per - parser is protected. - """ - - def __init__(self): - # Track the set of token types that can follow any rule invocation. - # Stack grows upwards. - self.following = [] - - # This is true when we see an error and before having successfully - # matched a token. Prevents generation of more than one error message - # per error. - self.errorRecovery = False - - # The index into the input stream where the last error occurred. - # This is used to prevent infinite loops where an error is found - # but no token is consumed during recovery...another error is found, - # ad naseum. This is a failsafe mechanism to guarantee that at least - # one token/tree node is consumed for two errors. - self.lastErrorIndex = -1 - - # If 0, no backtracking is going on. Safe to exec actions etc... - # If >0 then it's the level of backtracking. - self.backtracking = 0 - - # An array[size num rules] of Map that tracks - # the stop token index for each rule. ruleMemo[ruleIndex] is - # the memoization table for ruleIndex. For key ruleStartIndex, you - # get back the stop token for associated rule or MEMO_RULE_FAILED. - # - # This is only used if rule memoization is on (which it is by default). - self.ruleMemo = None - - ## Did the recognizer encounter a syntax error? Track how many. - self.syntaxErrors = 0 - - - # LEXER FIELDS (must be in same state object to avoid casting - # constantly in generated code and Lexer object) :( - - - ## The goal of all lexer rules/methods is to create a token object. - # This is an instance variable as multiple rules may collaborate to - # create a single token. nextToken will return this object after - # matching lexer rule(s). If you subclass to allow multiple token - # emissions, then set this to the last token to be matched or - # something nonnull so that the auto token emit mechanism will not - # emit another token. - self.token = None - - ## What character index in the stream did the current token start at? - # Needed, for example, to get the text for current token. Set at - # the start of nextToken. - self.tokenStartCharIndex = -1 - - ## The line on which the first character of the token resides - self.tokenStartLine = None - - ## The character position of first character within the line - self.tokenStartCharPositionInLine = None - - ## The channel number for the current token - self.channel = None - - ## The token type for the current token - self.type = None - - ## You can set the text for the current token to override what is in - # the input char buffer. Use setText() or can set this instance var. - self.text = None - - -class BaseRecognizer(object): - """ - @brief Common recognizer functionality. - - A generic recognizer that can handle recognizers generated from - lexer, parser, and tree grammars. This is all the parsing - support code essentially; most of it is error recovery stuff and - backtracking. - """ - - MEMO_RULE_FAILED = -2 - MEMO_RULE_UNKNOWN = -1 - - # copies from Token object for convenience in actions - DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL - - # for convenience in actions - HIDDEN = HIDDEN_CHANNEL - - # overridden by generated subclasses - tokenNames = None - - # The api_version attribute has been introduced in 3.3. If it is not - # overwritten in the generated recognizer, we assume a default of v0. - api_version = 0 - - def __init__(self, state=None): - # Input stream of the recognizer. Must be initialized by a subclass. - self.input = None - - ## State of a lexer, parser, or tree parser are collected into a state - # object so the state can be shared. This sharing is needed to - # have one grammar import others and share same error variables - # and other state variables. It's a kind of explicit multiple - # inheritance via delegation of methods and shared state. - if state is None: - state = RecognizerSharedState() - self._state = state - - if self.api_version not in compatible_api_versions: - raise RuntimeError( - ("ANTLR version mismatch: " - "The recognizer has been generated with API V%s, " - "but this runtime does not support this.") - % self.api_version) - - # this one only exists to shut up pylint :( - def setInput(self, input): - self.input = input - - - def reset(self): - """ - reset the parser's state; subclasses must rewinds the input stream - """ - - # wack everything related to error recovery - if self._state is None: - # no shared state work to do - return - - self._state.following = [] - self._state.errorRecovery = False - self._state.lastErrorIndex = -1 - self._state.syntaxErrors = 0 - # wack everything related to backtracking and memoization - self._state.backtracking = 0 - if self._state.ruleMemo is not None: - self._state.ruleMemo = {} - - - def match(self, input, ttype, follow): - """ - Match current input symbol against ttype. Attempt - single token insertion or deletion error recovery. If - that fails, throw MismatchedTokenException. - - To turn off single token insertion or deletion error - recovery, override recoverFromMismatchedToken() and have it - throw an exception. See TreeParser.recoverFromMismatchedToken(). - This way any error in a rule will cause an exception and - immediate exit from rule. Rule would recover by resynchronizing - to the set of symbols that can follow rule ref. - """ - - matchedSymbol = self.getCurrentInputSymbol(input) - if self.input.LA(1) == ttype: - self.input.consume() - self._state.errorRecovery = False - return matchedSymbol - - if self._state.backtracking > 0: - # FIXME: need to return matchedSymbol here as well. damn!! - raise BacktrackingFailed - - matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow) - return matchedSymbol - - - def matchAny(self, input): - """Match the wildcard: in a symbol""" - - self._state.errorRecovery = False - self.input.consume() - - - def mismatchIsUnwantedToken(self, input, ttype): - return input.LA(2) == ttype - - - def mismatchIsMissingToken(self, input, follow): - if follow is None: - # we have no information about the follow; we can only consume - # a single token and hope for the best - return False - - # compute what can follow this grammar element reference - if EOR_TOKEN_TYPE in follow: - viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW() - follow = follow | viableTokensFollowingThisRule - - if len(self._state.following) > 0: - # remove EOR if we're not the start symbol - follow = follow - set([EOR_TOKEN_TYPE]) - - # if current token is consistent with what could come after set - # then we know we're missing a token; error recovery is free to - # "insert" the missing token - if input.LA(1) in follow or EOR_TOKEN_TYPE in follow: - return True - - return False - - - def reportError(self, e): - """Report a recognition problem. - - This method sets errorRecovery to indicate the parser is recovering - not parsing. Once in recovery mode, no errors are generated. - To get out of recovery mode, the parser must successfully match - a token (after a resync). So it will go: - - 1. error occurs - 2. enter recovery mode, report error - 3. consume until token found in resynch set - 4. try to resume parsing - 5. next match() will reset errorRecovery mode - - If you override, make sure to update syntaxErrors if you care about - that. - - """ - - # if we've already reported an error and have not matched a token - # yet successfully, don't report any errors. - if self._state.errorRecovery: - return - - self._state.syntaxErrors += 1 # don't count spurious - self._state.errorRecovery = True - - self.displayRecognitionError(self.tokenNames, e) - - - def displayRecognitionError(self, tokenNames, e): - hdr = self.getErrorHeader(e) - msg = self.getErrorMessage(e, tokenNames) - self.emitErrorMessage(hdr+" "+msg) - - - def getErrorMessage(self, e, tokenNames): - """ - What error message should be generated for the various - exception types? - - Not very object-oriented code, but I like having all error message - generation within one method rather than spread among all of the - exception classes. This also makes it much easier for the exception - handling because the exception classes do not have to have pointers back - to this object to access utility routines and so on. Also, changing - the message for an exception type would be difficult because you - would have to subclassing exception, but then somehow get ANTLR - to make those kinds of exception objects instead of the default. - This looks weird, but trust me--it makes the most sense in terms - of flexibility. - - For grammar debugging, you will want to override this to add - more information such as the stack frame with - getRuleInvocationStack(e, this.getClass().getName()) and, - for no viable alts, the decision description and state etc... - - Override this to change the message generated for one or more - exception types. - """ - - if isinstance(e, UnwantedTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - - else: - tokenName = self.tokenNames[e.expecting] - - msg = "extraneous input %s expecting %s" % ( - self.getTokenErrorDisplay(e.getUnexpectedToken()), - tokenName - ) - - elif isinstance(e, MissingTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - - else: - tokenName = self.tokenNames[e.expecting] - - msg = "missing %s at %s" % ( - tokenName, self.getTokenErrorDisplay(e.token) - ) - - elif isinstance(e, MismatchedTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - else: - tokenName = self.tokenNames[e.expecting] - - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting " \ - + tokenName - - elif isinstance(e, MismatchedTreeNodeException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - else: - tokenName = self.tokenNames[e.expecting] - - msg = "mismatched tree node: %s expecting %s" \ - % (e.node, tokenName) - - elif isinstance(e, NoViableAltException): - msg = "no viable alternative at input " \ - + self.getTokenErrorDisplay(e.token) - - elif isinstance(e, EarlyExitException): - msg = "required (...)+ loop did not match anything at input " \ - + self.getTokenErrorDisplay(e.token) - - elif isinstance(e, MismatchedSetException): - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedNotSetException): - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, FailedPredicateException): - msg = "rule " \ - + e.ruleName \ - + " failed predicate: {" \ - + e.predicateText \ - + "}?" - - else: - msg = str(e) - - return msg - - - def getNumberOfSyntaxErrors(self): - """ - Get number of recognition errors (lexer, parser, tree parser). Each - recognizer tracks its own number. So parser and lexer each have - separate count. Does not count the spurious errors found between - an error and next valid token match - - See also reportError() - """ - return self._state.syntaxErrors - - - def getErrorHeader(self, e): - """ - What is the error header, normally line/character position information? - """ - - source_name = self.getSourceName() - if source_name is not None: - return "%s line %d:%d" % (source_name, e.line, e.charPositionInLine) - return "line %d:%d" % (e.line, e.charPositionInLine) - - - def getTokenErrorDisplay(self, t): - """ - How should a token be displayed in an error message? The default - is to display just the text, but during development you might - want to have a lot of information spit out. Override in that case - to use t.toString() (which, for CommonToken, dumps everything about - the token). This is better than forcing you to override a method in - your token objects because you don't have to go modify your lexer - so that it creates a new Java type. - """ - - s = t.text - if s is None: - if t.type == EOF: - s = "" - else: - s = "<"+t.type+">" - - return repr(s) - - - def emitErrorMessage(self, msg): - """Override this method to change where error messages go""" - sys.stderr.write(msg + '\n') - - - def recover(self, input, re): - """ - Recover from an error found on the input stream. This is - for NoViableAlt and mismatched symbol exceptions. If you enable - single token insertion and deletion, this will usually not - handle mismatched symbol exceptions but there could be a mismatched - token that the match() routine could not recover from. - """ - - # PROBLEM? what if input stream is not the same as last time - # perhaps make lastErrorIndex a member of input - if self._state.lastErrorIndex == input.index(): - # uh oh, another error at same token index; must be a case - # where LT(1) is in the recovery token set so nothing is - # consumed; consume a single token so at least to prevent - # an infinite loop; this is a failsafe. - input.consume() - - self._state.lastErrorIndex = input.index() - followSet = self.computeErrorRecoverySet() - - self.beginResync() - self.consumeUntil(input, followSet) - self.endResync() - - - def beginResync(self): - """ - A hook to listen in on the token consumption during error recovery. - The DebugParser subclasses this to fire events to the listenter. - """ - - pass - - - def endResync(self): - """ - A hook to listen in on the token consumption during error recovery. - The DebugParser subclasses this to fire events to the listenter. - """ - - pass - - - def computeErrorRecoverySet(self): - """ - Compute the error recovery set for the current rule. During - rule invocation, the parser pushes the set of tokens that can - follow that rule reference on the stack; this amounts to - computing FIRST of what follows the rule reference in the - enclosing rule. This local follow set only includes tokens - from within the rule; i.e., the FIRST computation done by - ANTLR stops at the end of a rule. - - EXAMPLE - - When you find a "no viable alt exception", the input is not - consistent with any of the alternatives for rule r. The best - thing to do is to consume tokens until you see something that - can legally follow a call to r *or* any rule that called r. - You don't want the exact set of viable next tokens because the - input might just be missing a token--you might consume the - rest of the input looking for one of the missing tokens. - - Consider grammar: - - a : '[' b ']' - | '(' b ')' - ; - b : c '^' INT ; - c : ID - | INT - ; - - At each rule invocation, the set of tokens that could follow - that rule is pushed on a stack. Here are the various "local" - follow sets: - - FOLLOW(b1_in_a) = FIRST(']') = ']' - FOLLOW(b2_in_a) = FIRST(')') = ')' - FOLLOW(c_in_b) = FIRST('^') = '^' - - Upon erroneous input "[]", the call chain is - - a -> b -> c - - and, hence, the follow context stack is: - - depth local follow set after call to rule - 0 \ a (from main()) - 1 ']' b - 3 '^' c - - Notice that ')' is not included, because b would have to have - been called from a different context in rule a for ')' to be - included. - - For error recovery, we cannot consider FOLLOW(c) - (context-sensitive or otherwise). We need the combined set of - all context-sensitive FOLLOW sets--the set of all tokens that - could follow any reference in the call chain. We need to - resync to one of those tokens. Note that FOLLOW(c)='^' and if - we resync'd to that token, we'd consume until EOF. We need to - sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. - In this case, for input "[]", LA(1) is in this set so we would - not consume anything and after printing an error rule c would - return normally. It would not find the required '^' though. - At this point, it gets a mismatched token error and throws an - exception (since LA(1) is not in the viable following token - set). The rule exception handler tries to recover, but finds - the same recovery set and doesn't consume anything. Rule b - exits normally returning to rule a. Now it finds the ']' (and - with the successful match exits errorRecovery mode). - - So, you cna see that the parser walks up call chain looking - for the token that was a member of the recovery set. - - Errors are not generated in errorRecovery mode. - - ANTLR's error recovery mechanism is based upon original ideas: - - "Algorithms + Data Structures = Programs" by Niklaus Wirth - - and - - "A note on error recovery in recursive descent parsers": - http://portal.acm.org/citation.cfm?id=947902.947905 - - Later, Josef Grosch had some good ideas: - - "Efficient and Comfortable Error Recovery in Recursive Descent - Parsers": - ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip - - Like Grosch I implemented local FOLLOW sets that are combined - at run-time upon error to avoid overhead during parsing. - """ - - return self.combineFollows(False) - - - def computeContextSensitiveRuleFOLLOW(self): - """ - Compute the context-sensitive FOLLOW set for current rule. - This is set of token types that can follow a specific rule - reference given a specific call chain. You get the set of - viable tokens that can possibly come next (lookahead depth 1) - given the current call chain. Contrast this with the - definition of plain FOLLOW for rule r: - - FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)} - - where x in T* and alpha, beta in V*; T is set of terminals and - V is the set of terminals and nonterminals. In other words, - FOLLOW(r) is the set of all tokens that can possibly follow - references to r in *any* sentential form (context). At - runtime, however, we know precisely which context applies as - we have the call chain. We may compute the exact (rather - than covering superset) set of following tokens. - - For example, consider grammar: - - stat : ID '=' expr ';' // FOLLOW(stat)=={EOF} - | "return" expr '.' - ; - expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'} - atom : INT // FOLLOW(atom)=={'+',')',';','.'} - | '(' expr ')' - ; - - The FOLLOW sets are all inclusive whereas context-sensitive - FOLLOW sets are precisely what could follow a rule reference. - For input input "i=(3);", here is the derivation: - - stat => ID '=' expr ';' - => ID '=' atom ('+' atom)* ';' - => ID '=' '(' expr ')' ('+' atom)* ';' - => ID '=' '(' atom ')' ('+' atom)* ';' - => ID '=' '(' INT ')' ('+' atom)* ';' - => ID '=' '(' INT ')' ';' - - At the "3" token, you'd have a call chain of - - stat -> expr -> atom -> expr -> atom - - What can follow that specific nested ref to atom? Exactly ')' - as you can see by looking at the derivation of this specific - input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}. - - You want the exact viable token set when recovering from a - token mismatch. Upon token mismatch, if LA(1) is member of - the viable next token set, then you know there is most likely - a missing token in the input stream. "Insert" one by just not - throwing an exception. - """ - - return self.combineFollows(True) - - - def combineFollows(self, exact): - followSet = set() - for idx, localFollowSet in reversed(list(enumerate(self._state.following))): - followSet |= localFollowSet - if exact: - # can we see end of rule? - if EOR_TOKEN_TYPE in localFollowSet: - # Only leave EOR in set if at top (start rule); this lets - # us know if have to include follow(start rule); i.e., EOF - if idx > 0: - followSet.remove(EOR_TOKEN_TYPE) - - else: - # can't see end of rule, quit - break - - return followSet - - - def recoverFromMismatchedToken(self, input, ttype, follow): - """Attempt to recover from a single missing or extra token. - - EXTRA TOKEN - - LA(1) is not what we are looking for. If LA(2) has the right token, - however, then assume LA(1) is some extra spurious token. Delete it - and LA(2) as if we were doing a normal match(), which advances the - input. - - MISSING TOKEN - - If current token is consistent with what could come after - ttype then it is ok to 'insert' the missing token, else throw - exception For example, Input 'i=(3;' is clearly missing the - ')'. When the parser returns from the nested call to expr, it - will have call chain: - - stat -> expr -> atom - - and it will be trying to match the ')' at this point in the - derivation: - - => ID '=' '(' INT ')' ('+' atom)* ';' - ^ - match() will see that ';' doesn't match ')' and report a - mismatched token error. To recover, it sees that LA(1)==';' - is in the set of tokens that can follow the ')' token - reference in rule atom. It can assume that you forgot the ')'. - """ - - e = None - - # if next token is what we are looking for then "delete" this token - if self.mismatchIsUnwantedToken(input, ttype): - e = UnwantedTokenException(ttype, input) - - self.beginResync() - input.consume() # simply delete extra token - self.endResync() - - # report after consuming so AW sees the token in the exception - self.reportError(e) - - # we want to return the token we're actually matching - matchedSymbol = self.getCurrentInputSymbol(input) - - # move past ttype token as if all were ok - input.consume() - return matchedSymbol - - # can't recover with single token deletion, try insertion - if self.mismatchIsMissingToken(input, follow): - inserted = self.getMissingSymbol(input, e, ttype, follow) - e = MissingTokenException(ttype, input, inserted) - - # report after inserting so AW sees the token in the exception - self.reportError(e) - return inserted - - # even that didn't work; must throw the exception - e = MismatchedTokenException(ttype, input) - raise e - - - def recoverFromMismatchedSet(self, input, e, follow): - """Not currently used""" - - if self.mismatchIsMissingToken(input, follow): - self.reportError(e) - # we don't know how to conjure up a token for sets yet - return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow) - - # TODO do single token deletion like above for Token mismatch - raise e - - - def getCurrentInputSymbol(self, input): - """ - Match needs to return the current input symbol, which gets put - into the label for the associated token ref; e.g., x=ID. Token - and tree parsers need to return different objects. Rather than test - for input stream type or change the IntStream interface, I use - a simple method to ask the recognizer to tell me what the current - input symbol is. - - This is ignored for lexers. - """ - - return None - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - """Conjure up a missing token during error recovery. - - The recognizer attempts to recover from single missing - symbols. But, actions might refer to that missing symbol. - For example, x=ID {f($x);}. The action clearly assumes - that there has been an identifier matched previously and that - $x points at that token. If that token is missing, but - the next token in the stream is what we want we assume that - this token is missing and we keep going. Because we - have to return some token to replace the missing token, - we have to conjure one up. This method gives the user control - over the tokens returned for missing tokens. Mostly, - you will want to create something special for identifier - tokens. For literals such as '{' and ',', the default - action in the parser or tree parser works. It simply creates - a CommonToken of the appropriate type. The text will be the token. - If you change what tokens must be created by the lexer, - override this method to create the appropriate tokens. - """ - - return None - - -## def recoverFromMissingElement(self, input, e, follow): -## """ -## This code is factored out from mismatched token and mismatched set -## recovery. It handles "single token insertion" error recovery for -## both. No tokens are consumed to recover from insertions. Return -## true if recovery was possible else return false. -## """ - -## if self.mismatchIsMissingToken(input, follow): -## self.reportError(e) -## return True - -## # nothing to do; throw exception -## return False - - - def consumeUntil(self, input, tokenTypes): - """ - Consume tokens until one matches the given token or token set - - tokenTypes can be a single token type or a set of token types - - """ - - if not isinstance(tokenTypes, (set, frozenset)): - tokenTypes = frozenset([tokenTypes]) - - ttype = input.LA(1) - while ttype != EOF and ttype not in tokenTypes: - input.consume() - ttype = input.LA(1) - - - def getRuleInvocationStack(self): - """ - Return List of the rules in your parser instance - leading up to a call to this method. You could override if - you want more details such as the file/line info of where - in the parser java code a rule is invoked. - - This is very useful for error messages and for context-sensitive - error recovery. - - You must be careful, if you subclass a generated recognizers. - The default implementation will only search the module of self - for rules, but the subclass will not contain any rules. - You probably want to override this method to look like - - def getRuleInvocationStack(self): - return self._getRuleInvocationStack(.__module__) - - where is the class of the generated recognizer, e.g. - the superclass of self. - """ - - return self._getRuleInvocationStack(self.__module__) - - - def _getRuleInvocationStack(cls, module): - """ - A more general version of getRuleInvocationStack where you can - pass in, for example, a RecognitionException to get it's rule - stack trace. This routine is shared with all recognizers, hence, - static. - - TODO: move to a utility class or something; weird having lexer call - this - """ - - # mmmhhh,... perhaps look at the first argument - # (f_locals[co_varnames[0]]?) and test if it's a (sub)class of - # requested recognizer... - - rules = [] - for frame in reversed(inspect.stack()): - code = frame[0].f_code - codeMod = inspect.getmodule(code) - if codeMod is None: - continue - - # skip frames not in requested module - if codeMod.__name__ != module: - continue - - # skip some unwanted names - if code.co_name in ('nextToken', ''): - continue - - rules.append(code.co_name) - - return rules - - _getRuleInvocationStack = classmethod(_getRuleInvocationStack) - - - def getBacktrackingLevel(self): - return self._state.backtracking - - def setBacktrackingLevel(self, n): - self._state.backtracking = n - - - def getGrammarFileName(self): - """For debugging and other purposes, might want the grammar name. - - Have ANTLR generate an implementation for this method. - """ - - return self.grammarFileName - - - def getSourceName(self): - raise NotImplementedError - - - def toStrings(self, tokens): - """A convenience method for use most often with template rewrites. - - Convert a List to List - """ - - if tokens is None: - return None - - return [token.text for token in tokens] - - - def getRuleMemoization(self, ruleIndex, ruleStartIndex): - """ - Given a rule number and a start token index number, return - MEMO_RULE_UNKNOWN if the rule has not parsed input starting from - start index. If this rule has parsed input starting from the - start index before, then return where the rule stopped parsing. - It returns the index of the last token matched by the rule. - """ - - if ruleIndex not in self._state.ruleMemo: - self._state.ruleMemo[ruleIndex] = {} - - return self._state.ruleMemo[ruleIndex].get( - ruleStartIndex, self.MEMO_RULE_UNKNOWN - ) - - - def alreadyParsedRule(self, input, ruleIndex): - """ - Has this rule already parsed input at the current index in the - input stream? Return the stop token index or MEMO_RULE_UNKNOWN. - If we attempted but failed to parse properly before, return - MEMO_RULE_FAILED. - - This method has a side-effect: if we have seen this input for - this rule and successfully parsed before, then seek ahead to - 1 past the stop token matched for this rule last time. - """ - - stopIndex = self.getRuleMemoization(ruleIndex, input.index()) - if stopIndex == self.MEMO_RULE_UNKNOWN: - return False - - if stopIndex == self.MEMO_RULE_FAILED: - raise BacktrackingFailed - - else: - input.seek(stopIndex + 1) - - return True - - - def memoize(self, input, ruleIndex, ruleStartIndex, success): - """ - Record whether or not this rule parsed the input at this position - successfully. - """ - - if success: - stopTokenIndex = input.index() - 1 - else: - stopTokenIndex = self.MEMO_RULE_FAILED - - if ruleIndex in self._state.ruleMemo: - self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex - - - def traceIn(self, ruleName, ruleIndex, inputSymbol): - sys.stdout.write("enter %s %s" % (ruleName, inputSymbol)) - - if self._state.backtracking > 0: - sys.stdout.write(" backtracking=%s" % self._state.backtracking) - - sys.stdout.write('\n') - - - def traceOut(self, ruleName, ruleIndex, inputSymbol): - sys.stdout.write("exit %s %s" % (ruleName, inputSymbol)) - - if self._state.backtracking > 0: - sys.stdout.write(" backtracking=%s" % self._state.backtracking) - - # mmmm... we use BacktrackingFailed exceptions now. So how could we - # get that information here? - #if self._state.failed: - # sys.stdout.write(" failed") - #else: - # sys.stdout.write(" succeeded") - - sys.stdout.write('\n') - - -class TokenSource(object): - """ - @brief Abstract baseclass for token producers. - - A source of tokens must provide a sequence of tokens via nextToken() - and also must reveal it's source of characters; CommonToken's text is - computed from a CharStream; it only store indices into the char stream. - - Errors from the lexer are never passed to the parser. Either you want - to keep going or you do not upon token recognition error. If you do not - want to continue lexing then you do not want to continue parsing. Just - throw an exception not under RecognitionException and Java will naturally - toss you all the way out of the recognizers. If you want to continue - lexing then you should not throw an exception to the parser--it has already - requested a token. Keep lexing until you get a valid one. Just report - errors and keep going, looking for a valid token. - """ - - def nextToken(self): - """Return a Token object from your input stream (usually a CharStream). - - Do not fail/return upon lexing error; keep chewing on the characters - until you get a good one; errors are not passed through to the parser. - """ - - raise NotImplementedError - - - def __iter__(self): - """The TokenSource is an interator. - - The iteration will not include the final EOF token, see also the note - for the next() method. - - """ - - return self - - - def next(self): - """Return next token or raise StopIteration. - - Note that this will raise StopIteration when hitting the EOF token, - so EOF will not be part of the iteration. - - """ - - token = self.nextToken() - if token is None or token.type == EOF: - raise StopIteration - return token - - -class Lexer(BaseRecognizer, TokenSource): - """ - @brief Baseclass for generated lexer classes. - - A lexer is recognizer that draws input symbols from a character stream. - lexer grammars result in a subclass of this object. A Lexer object - uses simplified match() and error recovery mechanisms in the interest - of speed. - """ - - def __init__(self, input, state=None): - BaseRecognizer.__init__(self, state) - TokenSource.__init__(self) - - # Where is the lexer drawing characters from? - self.input = input - - - def reset(self): - BaseRecognizer.reset(self) # reset all recognizer state variables - - if self.input is not None: - # rewind the input - self.input.seek(0) - - if self._state is None: - # no shared state work to do - return - - # wack Lexer state variables - self._state.token = None - self._state.type = INVALID_TOKEN_TYPE - self._state.channel = DEFAULT_CHANNEL - self._state.tokenStartCharIndex = -1 - self._state.tokenStartLine = -1 - self._state.tokenStartCharPositionInLine = -1 - self._state.text = None - - - def makeEOFToken(self): - eof = CommonToken( - type=EOF, channel=DEFAULT_CHANNEL, - input=self.input, - start=self.input.index(), stop=self.input.index()) - eof.line = self.input.line - eof.charPositionInLine = self.input.charPositionInLine - return eof - - def nextToken(self): - """ - Return a token from this source; i.e., match a token on the char - stream. - """ - - while 1: - self._state.token = None - self._state.channel = DEFAULT_CHANNEL - self._state.tokenStartCharIndex = self.input.index() - self._state.tokenStartCharPositionInLine = self.input.charPositionInLine - self._state.tokenStartLine = self.input.line - self._state.text = None - if self.input.LA(1) == EOF: - return self.makeEOFToken() - - try: - self.mTokens() - - if self._state.token is None: - self.emit() - - elif self._state.token == SKIP_TOKEN: - continue - - return self._state.token - - except NoViableAltException, re: - self.reportError(re) - self.recover(re) # throw out current char and try again - - except RecognitionException, re: - self.reportError(re) - # match() routine has already called recover() - - - def skip(self): - """ - Instruct the lexer to skip creating a token for current lexer rule - and look for another token. nextToken() knows to keep looking when - a lexer rule finishes with token set to SKIP_TOKEN. Recall that - if token==null at end of any token rule, it creates one for you - and emits it. - """ - - self._state.token = SKIP_TOKEN - - - def mTokens(self): - """This is the lexer entry point that sets instance var 'token'""" - - # abstract method - raise NotImplementedError - - - def setCharStream(self, input): - """Set the char stream and reset the lexer""" - self.input = None - self.reset() - self.input = input - - - def getSourceName(self): - return self.input.getSourceName() - - - def emit(self, token=None): - """ - The standard method called to automatically emit a token at the - outermost lexical rule. The token object should point into the - char buffer start..stop. If there is a text override in 'text', - use that to set the token's text. Override this method to emit - custom Token objects. - - If you are building trees, then you should also override - Parser or TreeParser.getMissingSymbol(). - """ - - if token is None: - token = CommonToken( - input=self.input, - type=self._state.type, - channel=self._state.channel, - start=self._state.tokenStartCharIndex, - stop=self.getCharIndex()-1 - ) - token.line = self._state.tokenStartLine - token.text = self._state.text - token.charPositionInLine = self._state.tokenStartCharPositionInLine - - self._state.token = token - - return token - - - def match(self, s): - if isinstance(s, basestring): - for c in s: - if self.input.LA(1) != ord(c): - if self._state.backtracking > 0: - raise BacktrackingFailed - - mte = MismatchedTokenException(c, self.input) - self.recover(mte) - raise mte - - self.input.consume() - - else: - if self.input.LA(1) != s: - if self._state.backtracking > 0: - raise BacktrackingFailed - - mte = MismatchedTokenException(unichr(s), self.input) - self.recover(mte) # don't really recover; just consume in lexer - raise mte - - self.input.consume() - - - def matchAny(self): - self.input.consume() - - - def matchRange(self, a, b): - if self.input.LA(1) < a or self.input.LA(1) > b: - if self._state.backtracking > 0: - raise BacktrackingFailed - - mre = MismatchedRangeException(unichr(a), unichr(b), self.input) - self.recover(mre) - raise mre - - self.input.consume() - - - def getLine(self): - return self.input.line - - - def getCharPositionInLine(self): - return self.input.charPositionInLine - - - def getCharIndex(self): - """What is the index of the current character of lookahead?""" - - return self.input.index() - - - def getText(self): - """ - Return the text matched so far for the current token or any - text override. - """ - if self._state.text is not None: - return self._state.text - - return self.input.substring( - self._state.tokenStartCharIndex, - self.getCharIndex()-1 - ) - - - def setText(self, text): - """ - Set the complete text of this token; it wipes any previous - changes to the text. - """ - self._state.text = text - - - text = property(getText, setText) - - - def reportError(self, e): - ## TODO: not thought about recovery in lexer yet. - - ## # if we've already reported an error and have not matched a token - ## # yet successfully, don't report any errors. - ## if self.errorRecovery: - ## #System.err.print("[SPURIOUS] "); - ## return; - ## - ## self.errorRecovery = True - - self.displayRecognitionError(self.tokenNames, e) - - - def getErrorMessage(self, e, tokenNames): - msg = None - - if isinstance(e, MismatchedTokenException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting " \ - + self.getCharErrorDisplay(e.expecting) - - elif isinstance(e, NoViableAltException): - msg = "no viable alternative at character " \ - + self.getCharErrorDisplay(e.c) - - elif isinstance(e, EarlyExitException): - msg = "required (...)+ loop did not match anything at character " \ - + self.getCharErrorDisplay(e.c) - - elif isinstance(e, MismatchedNotSetException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedSetException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedRangeException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + self.getCharErrorDisplay(e.a) \ - + ".." \ - + self.getCharErrorDisplay(e.b) - - else: - msg = BaseRecognizer.getErrorMessage(self, e, tokenNames) - - return msg - - - def getCharErrorDisplay(self, c): - if c == EOF: - c = '' - return repr(c) - - - def recover(self, re): - """ - Lexers can normally match any char in it's vocabulary after matching - a token, so do the easy thing and just kill a character and hope - it all works out. You can instead use the rule invocation stack - to do sophisticated error recovery if you are in a fragment rule. - """ - - self.input.consume() - - - def traceIn(self, ruleName, ruleIndex): - inputSymbol = "%s line=%d:%s" % (self.input.LT(1), - self.getLine(), - self.getCharPositionInLine() - ) - - BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol) - - - def traceOut(self, ruleName, ruleIndex): - inputSymbol = "%s line=%d:%s" % (self.input.LT(1), - self.getLine(), - self.getCharPositionInLine() - ) - - BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol) - - - -class Parser(BaseRecognizer): - """ - @brief Baseclass for generated parser classes. - """ - - def __init__(self, lexer, state=None): - BaseRecognizer.__init__(self, state) - - self.input = lexer - - - def reset(self): - BaseRecognizer.reset(self) # reset all recognizer state variables - if self.input is not None: - self.input.seek(0) # rewind the input - - - def getCurrentInputSymbol(self, input): - return input.LT(1) - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - if expectedTokenType == EOF: - tokenText = "" - else: - tokenText = "" - t = CommonToken(type=expectedTokenType, text=tokenText) - current = input.LT(1) - if current.type == EOF: - current = input.LT(-1) - - if current is not None: - t.line = current.line - t.charPositionInLine = current.charPositionInLine - t.channel = DEFAULT_CHANNEL - return t - - - def setTokenStream(self, input): - """Set the token stream and reset the parser""" - - self.input = None - self.reset() - self.input = input - - - def getTokenStream(self): - return self.input - - - def getSourceName(self): - return self.input.getSourceName() - - - def traceIn(self, ruleName, ruleIndex): - BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) - - - def traceOut(self, ruleName, ruleIndex): - BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) - - -class RuleReturnScope(object): - """ - Rules can return start/stop info as well as possible trees and templates. - """ - - def getStart(self): - """Return the start token or tree.""" - return None - - - def getStop(self): - """Return the stop token or tree.""" - return None - - - def getTree(self): - """Has a value potentially if output=AST.""" - return None - - - def getTemplate(self): - """Has a value potentially if output=template.""" - return None - - -class ParserRuleReturnScope(RuleReturnScope): - """ - Rules that return more than a single value must return an object - containing all the values. Besides the properties defined in - RuleLabelScope.predefinedRulePropertiesScope there may be user-defined - return values. This class simply defines the minimum properties that - are always defined and methods to access the others that might be - available depending on output option such as template and tree. - - Note text is not an actual property of the return value, it is computed - from start and stop using the input stream's toString() method. I - could add a ctor to this so that we can pass in and store the input - stream, but I'm not sure we want to do that. It would seem to be undefined - to get the .text property anyway if the rule matches tokens from multiple - input streams. - - I do not use getters for fields of objects that are used simply to - group values such as this aggregate. The getters/setters are there to - satisfy the superclass interface. - """ - - def __init__(self): - self.start = None - self.stop = None - self.tree = None # only used when output=AST - - - def getStart(self): - return self.start - - - def getStop(self): - return self.stop - - - def getTree(self): - return self.tree diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/streams.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/streams.py deleted file mode 100644 index 84016bd5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/streams.py +++ /dev/null @@ -1,1522 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -import codecs -from StringIO import StringIO - -from antlr3.constants import DEFAULT_CHANNEL, EOF -from antlr3.tokens import Token, CommonToken - - -############################################################################ -# -# basic interfaces -# IntStream -# +- CharStream -# \- TokenStream -# -# subclasses must implemented all methods -# -############################################################################ - -class IntStream(object): - """ - @brief Base interface for streams of integer values. - - A simple stream of integers used when all I care about is the char - or token type sequence (such as interpretation). - """ - - def consume(self): - raise NotImplementedError - - - def LA(self, i): - """Get int at current input pointer + i ahead where i=1 is next int. - - Negative indexes are allowed. LA(-1) is previous token (token - just matched). LA(-i) where i is before first token should - yield -1, invalid char / EOF. - """ - - raise NotImplementedError - - - def mark(self): - """ - Tell the stream to start buffering if it hasn't already. Return - current input position, index(), or some other marker so that - when passed to rewind() you get back to the same spot. - rewind(mark()) should not affect the input cursor. The Lexer - track line/col info as well as input index so its markers are - not pure input indexes. Same for tree node streams. - """ - - raise NotImplementedError - - - def index(self): - """ - Return the current input symbol index 0..n where n indicates the - last symbol has been read. The index is the symbol about to be - read not the most recently read symbol. - """ - - raise NotImplementedError - - - def rewind(self, marker=None): - """ - Reset the stream so that next call to index would return marker. - The marker will usually be index() but it doesn't have to be. It's - just a marker to indicate what state the stream was in. This is - essentially calling release() and seek(). If there are markers - created after this marker argument, this routine must unroll them - like a stack. Assume the state the stream was in when this marker - was created. - - If marker is None: - Rewind to the input position of the last marker. - Used currently only after a cyclic DFA and just - before starting a sem/syn predicate to get the - input position back to the start of the decision. - Do not "pop" the marker off the state. mark(i) - and rewind(i) should balance still. It is - like invoking rewind(last marker) but it should not "pop" - the marker off. It's like seek(last marker's input position). - """ - - raise NotImplementedError - - - def release(self, marker=None): - """ - You may want to commit to a backtrack but don't want to force the - stream to keep bookkeeping objects around for a marker that is - no longer necessary. This will have the same behavior as - rewind() except it releases resources without the backward seek. - This must throw away resources for all markers back to the marker - argument. So if you're nested 5 levels of mark(), and then release(2) - you have to release resources for depths 2..5. - """ - - raise NotImplementedError - - - def seek(self, index): - """ - Set the input cursor to the position indicated by index. This is - normally used to seek ahead in the input stream. No buffering is - required to do this unless you know your stream will use seek to - move backwards such as when backtracking. - - This is different from rewind in its multi-directional - requirement and in that its argument is strictly an input cursor - (index). - - For char streams, seeking forward must update the stream state such - as line number. For seeking backwards, you will be presumably - backtracking using the mark/rewind mechanism that restores state and - so this method does not need to update state when seeking backwards. - - Currently, this method is only used for efficient backtracking using - memoization, but in the future it may be used for incremental parsing. - - The index is 0..n-1. A seek to position i means that LA(1) will - return the ith symbol. So, seeking to 0 means LA(1) will return the - first element in the stream. - """ - - raise NotImplementedError - - - def size(self): - """ - Only makes sense for streams that buffer everything up probably, but - might be useful to display the entire stream or for testing. This - value includes a single EOF. - """ - - raise NotImplementedError - - - def getSourceName(self): - """ - Where are you getting symbols from? Normally, implementations will - pass the buck all the way to the lexer who can ask its input stream - for the file name or whatever. - """ - - raise NotImplementedError - - -class CharStream(IntStream): - """ - @brief A source of characters for an ANTLR lexer. - - This is an abstract class that must be implemented by a subclass. - - """ - - # pylint does not realize that this is an interface, too - #pylint: disable-msg=W0223 - - EOF = -1 - - - def substring(self, start, stop): - """ - For infinite streams, you don't need this; primarily I'm providing - a useful interface for action code. Just make sure actions don't - use this on streams that don't support it. - """ - - raise NotImplementedError - - - def LT(self, i): - """ - Get the ith character of lookahead. This is the same usually as - LA(i). This will be used for labels in the generated - lexer code. I'd prefer to return a char here type-wise, but it's - probably better to be 32-bit clean and be consistent with LA. - """ - - raise NotImplementedError - - - def getLine(self): - """ANTLR tracks the line information automatically""" - - raise NotImplementedError - - - def setLine(self, line): - """ - Because this stream can rewind, we need to be able to reset the line - """ - - raise NotImplementedError - - - def getCharPositionInLine(self): - """ - The index of the character relative to the beginning of the line 0..n-1 - """ - - raise NotImplementedError - - - def setCharPositionInLine(self, pos): - raise NotImplementedError - - -class TokenStream(IntStream): - """ - - @brief A stream of tokens accessing tokens from a TokenSource - - This is an abstract class that must be implemented by a subclass. - - """ - - # pylint does not realize that this is an interface, too - #pylint: disable-msg=W0223 - - def LT(self, k): - """ - Get Token at current input pointer + i ahead where i=1 is next Token. - i<0 indicates tokens in the past. So -1 is previous token and -2 is - two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken. - Return null for LT(0) and any index that results in an absolute address - that is negative. - """ - - raise NotImplementedError - - - def range(self): - """ - How far ahead has the stream been asked to look? The return - value is a valid index from 0..n-1. - """ - - raise NotImplementedError - - - def get(self, i): - """ - Get a token at an absolute index i; 0..n-1. This is really only - needed for profiling and debugging and token stream rewriting. - If you don't want to buffer up tokens, then this method makes no - sense for you. Naturally you can't use the rewrite stream feature. - I believe DebugTokenStream can easily be altered to not use - this method, removing the dependency. - """ - - raise NotImplementedError - - - def getTokenSource(self): - """ - Where is this stream pulling tokens from? This is not the name, but - the object that provides Token objects. - """ - - raise NotImplementedError - - - def toString(self, start=None, stop=None): - """ - Return the text of all tokens from start to stop, inclusive. - If the stream does not buffer all the tokens then it can just - return "" or null; Users should not access $ruleLabel.text in - an action of course in that case. - - Because the user is not required to use a token with an index stored - in it, we must provide a means for two token objects themselves to - indicate the start/end location. Most often this will just delegate - to the other toString(int,int). This is also parallel with - the TreeNodeStream.toString(Object,Object). - """ - - raise NotImplementedError - - -############################################################################ -# -# character streams for use in lexers -# CharStream -# \- ANTLRStringStream -# -############################################################################ - - -class ANTLRStringStream(CharStream): - """ - @brief CharStream that pull data from a unicode string. - - A pretty quick CharStream that pulls all data from an array - directly. Every method call counts in the lexer. - - """ - - - def __init__(self, data): - """ - @param data This should be a unicode string holding the data you want - to parse. If you pass in a byte string, the Lexer will choke on - non-ascii data. - - """ - - CharStream.__init__(self) - - # The data being scanned - self.strdata = unicode(data) - self.data = [ord(c) for c in self.strdata] - - # How many characters are actually in the buffer - self.n = len(data) - - # 0..n-1 index into string of next char - self.p = 0 - - # line number 1..n within the input - self.line = 1 - - # The index of the character relative to the beginning of the - # line 0..n-1 - self.charPositionInLine = 0 - - # A list of CharStreamState objects that tracks the stream state - # values line, charPositionInLine, and p that can change as you - # move through the input stream. Indexed from 0..markDepth-1. - self._markers = [ ] - self.lastMarker = None - self.markDepth = 0 - - # What is name or source of this char stream? - self.name = None - - - def reset(self): - """ - Reset the stream so that it's in the same state it was - when the object was created *except* the data array is not - touched. - """ - - self.p = 0 - self.line = 1 - self.charPositionInLine = 0 - self._markers = [ ] - - - def consume(self): - try: - if self.data[self.p] == 10: # \n - self.line += 1 - self.charPositionInLine = 0 - else: - self.charPositionInLine += 1 - - self.p += 1 - - except IndexError: - # happend when we reached EOF and self.data[self.p] fails - # just do nothing - pass - - - - def LA(self, i): - if i == 0: - return 0 # undefined - - if i < 0: - i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] - - try: - return self.data[self.p+i-1] - except IndexError: - return EOF - - - - def LT(self, i): - if i == 0: - return 0 # undefined - - if i < 0: - i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] - - try: - return self.strdata[self.p+i-1] - except IndexError: - return EOF - - - def index(self): - """ - Return the current input symbol index 0..n where n indicates the - last symbol has been read. The index is the index of char to - be returned from LA(1). - """ - - return self.p - - - def size(self): - return self.n - - - def mark(self): - state = (self.p, self.line, self.charPositionInLine) - try: - self._markers[self.markDepth] = state - except IndexError: - self._markers.append(state) - self.markDepth += 1 - - self.lastMarker = self.markDepth - - return self.lastMarker - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - p, line, charPositionInLine = self._markers[marker-1] - - self.seek(p) - self.line = line - self.charPositionInLine = charPositionInLine - self.release(marker) - - - def release(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.markDepth = marker-1 - - - def seek(self, index): - """ - consume() ahead until p==index; can't just set p=index as we must - update line and charPositionInLine. - """ - - if index <= self.p: - self.p = index # just jump; don't update stream state (line, ...) - return - - # seek forward, consume until p hits index - while self.p < index: - self.consume() - - - def substring(self, start, stop): - return self.strdata[start:stop+1] - - - def getLine(self): - """Using setter/getter methods is deprecated. Use o.line instead.""" - return self.line - - - def getCharPositionInLine(self): - """ - Using setter/getter methods is deprecated. Use o.charPositionInLine - instead. - """ - return self.charPositionInLine - - - def setLine(self, line): - """Using setter/getter methods is deprecated. Use o.line instead.""" - self.line = line - - - def setCharPositionInLine(self, pos): - """ - Using setter/getter methods is deprecated. Use o.charPositionInLine - instead. - """ - self.charPositionInLine = pos - - - def getSourceName(self): - return self.name - - -class ANTLRFileStream(ANTLRStringStream): - """ - @brief CharStream that opens a file to read the data. - - This is a char buffer stream that is loaded from a file - all at once when you construct the object. - """ - - def __init__(self, fileName, encoding=None): - """ - @param fileName The path to the file to be opened. The file will be - opened with mode 'rb'. - - @param encoding If you set the optional encoding argument, then the - data will be decoded on the fly. - - """ - - self.fileName = fileName - - fp = codecs.open(fileName, 'rb', encoding) - try: - data = fp.read() - finally: - fp.close() - - ANTLRStringStream.__init__(self, data) - - - def getSourceName(self): - """Deprecated, access o.fileName directly.""" - - return self.fileName - - -class ANTLRInputStream(ANTLRStringStream): - """ - @brief CharStream that reads data from a file-like object. - - This is a char buffer stream that is loaded from a file like object - all at once when you construct the object. - - All input is consumed from the file, but it is not closed. - """ - - def __init__(self, file, encoding=None): - """ - @param file A file-like object holding your input. Only the read() - method must be implemented. - - @param encoding If you set the optional encoding argument, then the - data will be decoded on the fly. - - """ - - if encoding is not None: - # wrap input in a decoding reader - reader = codecs.lookup(encoding)[2] - file = reader(file) - - data = file.read() - - ANTLRStringStream.__init__(self, data) - - -# I guess the ANTLR prefix exists only to avoid a name clash with some Java -# mumbojumbo. A plain "StringStream" looks better to me, which should be -# the preferred name in Python. -StringStream = ANTLRStringStream -FileStream = ANTLRFileStream -InputStream = ANTLRInputStream - - -############################################################################ -# -# Token streams -# TokenStream -# +- CommonTokenStream -# \- TokenRewriteStream -# -############################################################################ - - -class CommonTokenStream(TokenStream): - """ - @brief The most common stream of tokens - - The most common stream of tokens is one where every token is buffered up - and tokens are prefiltered for a certain channel (the parser will only - see these tokens and cannot change the filter channel number during the - parse). - """ - - def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): - """ - @param tokenSource A TokenSource instance (usually a Lexer) to pull - the tokens from. - - @param channel Skip tokens on any channel but this one; this is how we - skip whitespace... - - """ - - TokenStream.__init__(self) - - self.tokenSource = tokenSource - - # Record every single token pulled from the source so we can reproduce - # chunks of it later. - self.tokens = [] - - # Map to override some Tokens' channel numbers - self.channelOverrideMap = {} - - # Set; discard any tokens with this type - self.discardSet = set() - - # Skip tokens on any channel but this one; this is how we skip - # whitespace... - self.channel = channel - - # By default, track all incoming tokens - self.discardOffChannelTokens = False - - # The index into the tokens list of the current token (next token - # to consume). p==-1 indicates that the tokens list is empty - self.p = -1 - - # Remember last marked position - self.lastMarker = None - - # how deep have we gone? - self._range = -1 - - - def makeEOFToken(self): - return self.tokenSource.makeEOFToken() - - - def setTokenSource(self, tokenSource): - """Reset this token stream by setting its token source.""" - - self.tokenSource = tokenSource - self.tokens = [] - self.p = -1 - self.channel = DEFAULT_CHANNEL - - - def reset(self): - self.p = 0 - self.lastMarker = None - - - def fillBuffer(self): - """ - Load all tokens from the token source and put in tokens. - This is done upon first LT request because you might want to - set some token type / channel overrides before filling buffer. - """ - - - index = 0 - t = self.tokenSource.nextToken() - while t is not None and t.type != EOF: - discard = False - - if self.discardSet is not None and t.type in self.discardSet: - discard = True - - elif self.discardOffChannelTokens and t.channel != self.channel: - discard = True - - # is there a channel override for token type? - try: - overrideChannel = self.channelOverrideMap[t.type] - - except KeyError: - # no override for this type - pass - - else: - if overrideChannel == self.channel: - t.channel = overrideChannel - else: - discard = True - - if not discard: - t.index = index - self.tokens.append(t) - index += 1 - - t = self.tokenSource.nextToken() - - # leave p pointing at first token on channel - self.p = 0 - self.p = self.skipOffTokenChannels(self.p) - - - def consume(self): - """ - Move the input pointer to the next incoming token. The stream - must become active with LT(1) available. consume() simply - moves the input pointer so that LT(1) points at the next - input symbol. Consume at least one token. - - Walk past any token not on the channel the parser is listening to. - """ - - if self.p < len(self.tokens): - self.p += 1 - - self.p = self.skipOffTokenChannels(self.p) # leave p on valid token - - - def skipOffTokenChannels(self, i): - """ - Given a starting index, return the index of the first on-channel - token. - """ - - try: - while self.tokens[i].channel != self.channel: - i += 1 - except IndexError: - # hit the end of token stream - pass - - return i - - - def skipOffTokenChannelsReverse(self, i): - while i >= 0 and self.tokens[i].channel != self.channel: - i -= 1 - - return i - - - def setTokenTypeChannel(self, ttype, channel): - """ - A simple filter mechanism whereby you can tell this token stream - to force all tokens of type ttype to be on channel. For example, - when interpreting, we cannot exec actions so we need to tell - the stream to force all WS and NEWLINE to be a different, ignored - channel. - """ - - self.channelOverrideMap[ttype] = channel - - - def discardTokenType(self, ttype): - self.discardSet.add(ttype) - - - def getTokens(self, start=None, stop=None, types=None): - """ - Given a start and stop index, return a list of all tokens in - the token type set. Return None if no tokens were found. This - method looks at both on and off channel tokens. - """ - - if self.p == -1: - self.fillBuffer() - - if stop is None or stop > len(self.tokens): - stop = len(self.tokens) - - if start is None or stop < 0: - start = 0 - - if start > stop: - return None - - if isinstance(types, (int, long)): - # called with a single type, wrap into set - types = set([types]) - - filteredTokens = [ - token for token in self.tokens[start:stop] - if types is None or token.type in types - ] - - if len(filteredTokens) == 0: - return None - - return filteredTokens - - - def LT(self, k): - """ - Get the ith token from the current position 1..n where k=1 is the - first symbol of lookahead. - """ - - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if k < 0: - return self.LB(-k) - - i = self.p - n = 1 - # find k good tokens - while n < k: - # skip off-channel tokens - i = self.skipOffTokenChannels(i+1) # leave p on valid token - n += 1 - - if i > self._range: - self._range = i - - try: - return self.tokens[i] - except IndexError: - return self.makeEOFToken() - - - def LB(self, k): - """Look backwards k tokens on-channel tokens""" - - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if self.p - k < 0: - return None - - i = self.p - n = 1 - # find k good tokens looking backwards - while n <= k: - # skip off-channel tokens - i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token - n += 1 - - if i < 0: - return None - - return self.tokens[i] - - - def get(self, i): - """ - Return absolute token i; ignore which channel the tokens are on; - that is, count all tokens not just on-channel tokens. - """ - - return self.tokens[i] - - - def slice(self, start, stop): - if self.p == -1: - self.fillBuffer() - - if start < 0 or stop < 0: - return None - - return self.tokens[start:stop+1] - - - def LA(self, i): - return self.LT(i).type - - - def mark(self): - self.lastMarker = self.index() - return self.lastMarker - - - def release(self, marker=None): - # no resources to release - pass - - - def size(self): - return len(self.tokens) - - - def range(self): - return self._range - - - def index(self): - return self.p - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.seek(marker) - - - def seek(self, index): - self.p = index - - - def getTokenSource(self): - return self.tokenSource - - - def getSourceName(self): - return self.tokenSource.getSourceName() - - - def toString(self, start=None, stop=None): - if self.p == -1: - self.fillBuffer() - - if start is None: - start = 0 - elif not isinstance(start, int): - start = start.index - - if stop is None: - stop = len(self.tokens) - 1 - elif not isinstance(stop, int): - stop = stop.index - - if stop >= len(self.tokens): - stop = len(self.tokens) - 1 - - return ''.join([t.text for t in self.tokens[start:stop+1]]) - - -class RewriteOperation(object): - """@brief Internal helper class.""" - - def __init__(self, stream, index, text): - self.stream = stream - - # What index into rewrites List are we? - self.instructionIndex = None - - # Token buffer index. - self.index = index - self.text = text - - def execute(self, buf): - """Execute the rewrite operation by possibly adding to the buffer. - Return the index of the next token to operate on. - """ - - return self.index - - def toString(self): - opName = self.__class__.__name__ - return '<%s@%d:"%s">' % ( - opName, self.index, self.text) - - __str__ = toString - __repr__ = toString - - -class InsertBeforeOp(RewriteOperation): - """@brief Internal helper class.""" - - def execute(self, buf): - buf.write(self.text) - if self.stream.tokens[self.index].type != EOF: - buf.write(self.stream.tokens[self.index].text) - return self.index + 1 - - -class ReplaceOp(RewriteOperation): - """ - @brief Internal helper class. - - I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp - instructions. - """ - - def __init__(self, stream, first, last, text): - RewriteOperation.__init__(self, stream, first, text) - self.lastIndex = last - - - def execute(self, buf): - if self.text is not None: - buf.write(self.text) - - return self.lastIndex + 1 - - - def toString(self): - if self.text is None: - return '' % (self.index, self.lastIndex) - - return '' % ( - self.index, self.lastIndex, self.text) - - __str__ = toString - __repr__ = toString - - -class TokenRewriteStream(CommonTokenStream): - """@brief CommonTokenStream that can be modified. - - Useful for dumping out the input stream after doing some - augmentation or other manipulations. - - You can insert stuff, replace, and delete chunks. Note that the - operations are done lazily--only if you convert the buffer to a - String. This is very efficient because you are not moving data around - all the time. As the buffer of tokens is converted to strings, the - toString() method(s) check to see if there is an operation at the - current index. If so, the operation is done and then normal String - rendering continues on the buffer. This is like having multiple Turing - machine instruction streams (programs) operating on a single input tape. :) - - Since the operations are done lazily at toString-time, operations do not - screw up the token index values. That is, an insert operation at token - index i does not change the index values for tokens i+1..n-1. - - Because operations never actually alter the buffer, you may always get - the original token stream back without undoing anything. Since - the instructions are queued up, you can easily simulate transactions and - roll back any changes if there is an error just by removing instructions. - For example, - - CharStream input = new ANTLRFileStream("input"); - TLexer lex = new TLexer(input); - TokenRewriteStream tokens = new TokenRewriteStream(lex); - T parser = new T(tokens); - parser.startRule(); - - Then in the rules, you can execute - Token t,u; - ... - input.insertAfter(t, "text to put after t");} - input.insertAfter(u, "text after u");} - System.out.println(tokens.toString()); - - Actually, you have to cast the 'input' to a TokenRewriteStream. :( - - You can also have multiple "instruction streams" and get multiple - rewrites from a single pass over the input. Just name the instruction - streams and use that name again when printing the buffer. This could be - useful for generating a C file and also its header file--all from the - same buffer: - - tokens.insertAfter("pass1", t, "text to put after t");} - tokens.insertAfter("pass2", u, "text after u");} - System.out.println(tokens.toString("pass1")); - System.out.println(tokens.toString("pass2")); - - If you don't use named rewrite streams, a "default" stream is used as - the first example shows. - """ - - DEFAULT_PROGRAM_NAME = "default" - MIN_TOKEN_INDEX = 0 - - def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): - CommonTokenStream.__init__(self, tokenSource, channel) - - # You may have multiple, named streams of rewrite operations. - # I'm calling these things "programs." - # Maps String (name) -> rewrite (List) - self.programs = {} - self.programs[self.DEFAULT_PROGRAM_NAME] = [] - - # Map String (program name) -> Integer index - self.lastRewriteTokenIndexes = {} - - - def rollback(self, *args): - """ - Rollback the instruction stream for a program so that - the indicated instruction (via instructionIndex) is no - longer in the stream. UNTESTED! - """ - - if len(args) == 2: - programName = args[0] - instructionIndex = args[1] - elif len(args) == 1: - programName = self.DEFAULT_PROGRAM_NAME - instructionIndex = args[0] - else: - raise TypeError("Invalid arguments") - - p = self.programs.get(programName, None) - if p is not None: - self.programs[programName] = ( - p[self.MIN_TOKEN_INDEX:instructionIndex]) - - - def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME): - """Reset the program so that no instructions exist""" - - self.rollback(programName, self.MIN_TOKEN_INDEX) - - - def insertAfter(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - index = args[0] - text = args[1] - - elif len(args) == 3: - programName = args[0] - index = args[1] - text = args[2] - - else: - raise TypeError("Invalid arguments") - - if isinstance(index, Token): - # index is a Token, grap the stream index from it - index = index.index - - # to insert after, just insert before next index (even if past end) - self.insertBefore(programName, index+1, text) - - - def insertBefore(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - index = args[0] - text = args[1] - - elif len(args) == 3: - programName = args[0] - index = args[1] - text = args[2] - - else: - raise TypeError("Invalid arguments") - - if isinstance(index, Token): - # index is a Token, grap the stream index from it - index = index.index - - op = InsertBeforeOp(self, index, text) - rewrites = self.getProgram(programName) - op.instructionIndex = len(rewrites) - rewrites.append(op) - - - def replace(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - first = args[0] - last = args[0] - text = args[1] - - elif len(args) == 3: - programName = self.DEFAULT_PROGRAM_NAME - first = args[0] - last = args[1] - text = args[2] - - elif len(args) == 4: - programName = args[0] - first = args[1] - last = args[2] - text = args[3] - - else: - raise TypeError("Invalid arguments") - - if isinstance(first, Token): - # first is a Token, grap the stream index from it - first = first.index - - if isinstance(last, Token): - # last is a Token, grap the stream index from it - last = last.index - - if first > last or first < 0 or last < 0 or last >= len(self.tokens): - raise ValueError( - "replace: range invalid: %d..%d (size=%d)" - % (first, last, len(self.tokens))) - - op = ReplaceOp(self, first, last, text) - rewrites = self.getProgram(programName) - op.instructionIndex = len(rewrites) - rewrites.append(op) - - - def delete(self, *args): - self.replace(*(list(args) + [None])) - - - def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME): - return self.lastRewriteTokenIndexes.get(programName, -1) - - - def setLastRewriteTokenIndex(self, programName, i): - self.lastRewriteTokenIndexes[programName] = i - - - def getProgram(self, name): - p = self.programs.get(name, None) - if p is None: - p = self.initializeProgram(name) - - return p - - - def initializeProgram(self, name): - p = [] - self.programs[name] = p - return p - - - def toOriginalString(self, start=None, end=None): - if self.p == -1: - self.fillBuffer() - - if start is None: - start = self.MIN_TOKEN_INDEX - if end is None: - end = self.size() - 1 - - buf = StringIO() - i = start - while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): - if self.get(i).type != EOF: - buf.write(self.get(i).text) - i += 1 - - return buf.getvalue() - - - def toString(self, *args): - if self.p == -1: - self.fillBuffer() - - if len(args) == 0: - programName = self.DEFAULT_PROGRAM_NAME - start = self.MIN_TOKEN_INDEX - end = self.size() - 1 - - elif len(args) == 1: - programName = args[0] - start = self.MIN_TOKEN_INDEX - end = self.size() - 1 - - elif len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - start = args[0] - end = args[1] - - if start is None: - start = self.MIN_TOKEN_INDEX - elif not isinstance(start, int): - start = start.index - - if end is None: - end = len(self.tokens) - 1 - elif not isinstance(end, int): - end = end.index - - # ensure start/end are in range - if end >= len(self.tokens): - end = len(self.tokens) - 1 - - if start < 0: - start = 0 - - rewrites = self.programs.get(programName) - if rewrites is None or len(rewrites) == 0: - # no instructions to execute - return self.toOriginalString(start, end) - - buf = StringIO() - - # First, optimize instruction stream - indexToOp = self.reduceToSingleOperationPerIndex(rewrites) - - # Walk buffer, executing instructions and emitting tokens - i = start - while i <= end and i < len(self.tokens): - op = indexToOp.get(i) - # remove so any left have index size-1 - try: - del indexToOp[i] - except KeyError: - pass - - t = self.tokens[i] - if op is None: - # no operation at that index, just dump token - if t.type != EOF: - buf.write(t.text) - i += 1 # move to next token - - else: - i = op.execute(buf) # execute operation and skip - - # include stuff after end if it's last index in buffer - # So, if they did an insertAfter(lastValidIndex, "foo"), include - # foo if end==lastValidIndex. - if end == len(self.tokens) - 1: - # Scan any remaining operations after last token - # should be included (they will be inserts). - for i in sorted(indexToOp.keys()): - op = indexToOp[i] - if op.index >= len(self.tokens)-1: - buf.write(op.text) - - return buf.getvalue() - - __str__ = toString - - - def reduceToSingleOperationPerIndex(self, rewrites): - """ - We need to combine operations and report invalid operations (like - overlapping replaces that are not completed nested). Inserts to - same index need to be combined etc... Here are the cases: - - I.i.u I.j.v leave alone, nonoverlapping - I.i.u I.i.v combine: Iivu - - R.i-j.u R.x-y.v | i-j in x-y delete first R - R.i-j.u R.i-j.v delete first R - R.i-j.u R.x-y.v | x-y in i-j ERROR - R.i-j.u R.x-y.v | boundaries overlap ERROR - - Delete special case of replace (text==null): - D.i-j.u D.x-y.v | boundaries overlapcombine to - max(min)..max(right) - - I.i.u R.x-y.v | i in (x+1)-ydelete I (since - insert before we're not deleting - i) - I.i.u R.x-y.v | i not in (x+1)-yleave alone, - nonoverlapping - - R.x-y.v I.i.u | i in x-y ERROR - R.x-y.v I.x.u R.x-y.uv (combine, delete I) - R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping - - I.i.u = insert u before op @ index i - R.x-y.u = replace x-y indexed tokens with u - - First we need to examine replaces. For any replace op: - - 1. wipe out any insertions before op within that range. - 2. Drop any replace op before that is contained completely within - that range. - 3. Throw exception upon boundary overlap with any previous replace. - - Then we can deal with inserts: - - 1. for any inserts to same index, combine even if not adjacent. - 2. for any prior replace with same left boundary, combine this - insert with replace and delete this replace. - 3. throw exception if index in same range as previous replace - - Don't actually delete; make op null in list. Easier to walk list. - Later we can throw as we add to index -> op map. - - Note that I.2 R.2-2 will wipe out I.2 even though, technically, the - inserted stuff would be before the replace range. But, if you - add tokens in front of a method body '{' and then delete the method - body, I think the stuff before the '{' you added should disappear too. - - Return a map from token index to operation. - """ - - # WALK REPLACES - for i, rop in enumerate(rewrites): - if rop is None: - continue - - if not isinstance(rop, ReplaceOp): - continue - - # Wipe prior inserts within range - for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i): - if iop.index == rop.index: - # E.g., insert before 2, delete 2..2; update replace - # text to include insert before, kill insert - rewrites[iop.instructionIndex] = None - rop.text = self.catOpText(iop.text, rop.text) - - elif iop.index > rop.index and iop.index <= rop.lastIndex: - # delete insert as it's a no-op. - rewrites[j] = None - - # Drop any prior replaces contained within - for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i): - if (prevRop.index >= rop.index - and prevRop.lastIndex <= rop.lastIndex): - # delete replace as it's a no-op. - rewrites[j] = None - continue - - # throw exception unless disjoint or identical - disjoint = (prevRop.lastIndex < rop.index - or prevRop.index > rop.lastIndex) - same = (prevRop.index == rop.index - and prevRop.lastIndex == rop.lastIndex) - - # Delete special case of replace (text==null): - # D.i-j.u D.x-y.v| boundaries overlapcombine to - # max(min)..max(right) - if prevRop.text is None and rop.text is None and not disjoint: - # kill first delete - rewrites[prevRop.instructionIndex] = None - - rop.index = min(prevRop.index, rop.index) - rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex) - - elif not disjoint and not same: - raise ValueError( - "replace op boundaries of %s overlap with previous %s" - % (rop, prevRop)) - - # WALK INSERTS - for i, iop in enumerate(rewrites): - if iop is None: - continue - - if not isinstance(iop, InsertBeforeOp): - continue - - # combine current insert with prior if any at same index - for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i): - if prevIop.index == iop.index: # combine objects - # convert to strings...we're in process of toString'ing - # whole token buffer so no lazy eval issue with any - # templates - iop.text = self.catOpText(iop.text, prevIop.text) - # delete redundant prior insert - rewrites[j] = None - - # look for replaces where iop.index is in range; error - for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i): - if iop.index == rop.index: - rop.text = self.catOpText(iop.text, rop.text) - # delete current insert - rewrites[i] = None - continue - - if iop.index >= rop.index and iop.index <= rop.lastIndex: - raise ValueError( - "insert op %s within boundaries of previous %s" - % (iop, rop)) - - m = {} - for i, op in enumerate(rewrites): - if op is None: - # ignore deleted ops - continue - - assert op.index not in m, "should only be one op per index" - m[op.index] = op - - return m - - - def catOpText(self, a, b): - x = "" - y = "" - if a is not None: - x = a - if b is not None: - y = b - return x + y - - - def getKindOfOps(self, rewrites, kind, before=None): - """Get all operations before an index of a particular kind.""" - - if before is None: - before = len(rewrites) - elif before > len(rewrites): - before = len(rewrites) - - for i, op in enumerate(rewrites[:before]): - if op is None: - # ignore deleted - continue - if op.__class__ == kind: - yield i, op - - - def toDebugString(self, start=None, end=None): - if start is None: - start = self.MIN_TOKEN_INDEX - if end is None: - end = self.size() - 1 - - buf = StringIO() - i = start - while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): - buf.write(self.get(i)) - i += 1 - - return buf.getvalue() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tokens.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tokens.py deleted file mode 100644 index d3f39b8e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tokens.py +++ /dev/null @@ -1,418 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE - -############################################################################ -# -# basic token interface -# -############################################################################ - -class Token(object): - """@brief Abstract token baseclass.""" - - def getText(self): - """@brief Get the text of the token. - - Using setter/getter methods is deprecated. Use o.text instead. - """ - raise NotImplementedError - - def setText(self, text): - """@brief Set the text of the token. - - Using setter/getter methods is deprecated. Use o.text instead. - """ - raise NotImplementedError - - - def getType(self): - """@brief Get the type of the token. - - Using setter/getter methods is deprecated. Use o.type instead.""" - - raise NotImplementedError - - def setType(self, ttype): - """@brief Get the type of the token. - - Using setter/getter methods is deprecated. Use o.type instead.""" - - raise NotImplementedError - - - def getLine(self): - """@brief Get the line number on which this token was matched - - Lines are numbered 1..n - - Using setter/getter methods is deprecated. Use o.line instead.""" - - raise NotImplementedError - - def setLine(self, line): - """@brief Set the line number on which this token was matched - - Using setter/getter methods is deprecated. Use o.line instead.""" - - raise NotImplementedError - - - def getCharPositionInLine(self): - """@brief Get the column of the tokens first character, - - Columns are numbered 0..n-1 - - Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" - - raise NotImplementedError - - def setCharPositionInLine(self, pos): - """@brief Set the column of the tokens first character, - - Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" - - raise NotImplementedError - - - def getChannel(self): - """@brief Get the channel of the token - - Using setter/getter methods is deprecated. Use o.channel instead.""" - - raise NotImplementedError - - def setChannel(self, channel): - """@brief Set the channel of the token - - Using setter/getter methods is deprecated. Use o.channel instead.""" - - raise NotImplementedError - - - def getTokenIndex(self): - """@brief Get the index in the input stream. - - An index from 0..n-1 of the token object in the input stream. - This must be valid in order to use the ANTLRWorks debugger. - - Using setter/getter methods is deprecated. Use o.index instead.""" - - raise NotImplementedError - - def setTokenIndex(self, index): - """@brief Set the index in the input stream. - - Using setter/getter methods is deprecated. Use o.index instead.""" - - raise NotImplementedError - - - def getInputStream(self): - """@brief From what character stream was this token created. - - You don't have to implement but it's nice to know where a Token - comes from if you have include files etc... on the input.""" - - raise NotImplementedError - - def setInputStream(self, input): - """@brief From what character stream was this token created. - - You don't have to implement but it's nice to know where a Token - comes from if you have include files etc... on the input.""" - - raise NotImplementedError - - -############################################################################ -# -# token implementations -# -# Token -# +- CommonToken -# \- ClassicToken -# -############################################################################ - -class CommonToken(Token): - """@brief Basic token implementation. - - This implementation does not copy the text from the input stream upon - creation, but keeps start/stop pointers into the stream to avoid - unnecessary copy operations. - - """ - - def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None, - input=None, start=None, stop=None, oldToken=None): - Token.__init__(self) - - if oldToken is not None: - self.type = oldToken.type - self.line = oldToken.line - self.charPositionInLine = oldToken.charPositionInLine - self.channel = oldToken.channel - self.index = oldToken.index - self._text = oldToken._text - self.input = oldToken.input - if isinstance(oldToken, CommonToken): - self.start = oldToken.start - self.stop = oldToken.stop - - else: - self.type = type - self.input = input - self.charPositionInLine = -1 # set to invalid position - self.line = 0 - self.channel = channel - - #What token number is this from 0..n-1 tokens; < 0 implies invalid index - self.index = -1 - - # We need to be able to change the text once in a while. If - # this is non-null, then getText should return this. Note that - # start/stop are not affected by changing this. - self._text = text - - # The char position into the input buffer where this token starts - self.start = start - - # The char position into the input buffer where this token stops - # This is the index of the last char, *not* the index after it! - self.stop = stop - - - def getText(self): - if self._text is not None: - return self._text - - if self.input is None: - return None - - if self.start < self.input.size() and self.stop < self.input.size(): - return self.input.substring(self.start, self.stop) - - return '' - - - def setText(self, text): - """ - Override the text for this token. getText() will return this text - rather than pulling from the buffer. Note that this does not mean - that start/stop indexes are not valid. It means that that input - was converted to a new string in the token object. - """ - self._text = text - - text = property(getText, setText) - - - def getType(self): - return self.type - - def setType(self, ttype): - self.type = ttype - - def getTypeName(self): - return str(self.type) - - typeName = property(lambda s: s.getTypeName()) - - def getLine(self): - return self.line - - def setLine(self, line): - self.line = line - - - def getCharPositionInLine(self): - return self.charPositionInLine - - def setCharPositionInLine(self, pos): - self.charPositionInLine = pos - - - def getChannel(self): - return self.channel - - def setChannel(self, channel): - self.channel = channel - - - def getTokenIndex(self): - return self.index - - def setTokenIndex(self, index): - self.index = index - - - def getInputStream(self): - return self.input - - def setInputStream(self, input): - self.input = input - - - def __str__(self): - if self.type == EOF: - return "" - - channelStr = "" - if self.channel > 0: - channelStr = ",channel=" + str(self.channel) - - txt = self.text - if txt is not None: - txt = txt.replace("\n","\\\\n") - txt = txt.replace("\r","\\\\r") - txt = txt.replace("\t","\\\\t") - else: - txt = "" - - return "[@%d,%d:%d=%r,<%s>%s,%d:%d]" % ( - self.index, - self.start, self.stop, - txt, - self.typeName, channelStr, - self.line, self.charPositionInLine - ) - - -class ClassicToken(Token): - """@brief Alternative token implementation. - - A Token object like we'd use in ANTLR 2.x; has an actual string created - and associated with this object. These objects are needed for imaginary - tree nodes that have payload objects. We need to create a Token object - that has a string; the tree node will point at this token. CommonToken - has indexes into a char stream and hence cannot be used to introduce - new strings. - """ - - def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL, - oldToken=None - ): - Token.__init__(self) - - if oldToken is not None: - self.text = oldToken.text - self.type = oldToken.type - self.line = oldToken.line - self.charPositionInLine = oldToken.charPositionInLine - self.channel = oldToken.channel - - self.text = text - self.type = type - self.line = None - self.charPositionInLine = None - self.channel = channel - self.index = None - - - def getText(self): - return self.text - - def setText(self, text): - self.text = text - - - def getType(self): - return self.type - - def setType(self, ttype): - self.type = ttype - - - def getLine(self): - return self.line - - def setLine(self, line): - self.line = line - - - def getCharPositionInLine(self): - return self.charPositionInLine - - def setCharPositionInLine(self, pos): - self.charPositionInLine = pos - - - def getChannel(self): - return self.channel - - def setChannel(self, channel): - self.channel = channel - - - def getTokenIndex(self): - return self.index - - def setTokenIndex(self, index): - self.index = index - - - def getInputStream(self): - return None - - def setInputStream(self, input): - pass - - - def toString(self): - channelStr = "" - if self.channel > 0: - channelStr = ",channel=" + str(self.channel) - - txt = self.text - if txt is None: - txt = "" - - return "[@%r,%r,<%r>%s,%r:%r]" % (self.index, - txt, - self.type, - channelStr, - self.line, - self.charPositionInLine - ) - - - __str__ = toString - __repr__ = toString - - -INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) - -# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR -# will avoid creating a token for this symbol and try to fetch another. -SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tree.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tree.py deleted file mode 100644 index 7bc84467..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/tree.py +++ /dev/null @@ -1,2843 +0,0 @@ -""" @package antlr3.tree -@brief ANTLR3 runtime package, tree module - -This module contains all support classes for AST construction and tree parsers. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -# lot's of docstrings are missing, don't complain for now... -# pylint: disable-msg=C0111 - -import re - -from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE -from antlr3.recognizers import BaseRecognizer, RuleReturnScope -from antlr3.streams import IntStream -from antlr3.tokens import CommonToken, Token, INVALID_TOKEN -from antlr3.exceptions import MismatchedTreeNodeException, \ - MissingTokenException, UnwantedTokenException, MismatchedTokenException, \ - NoViableAltException - - -############################################################################ -# -# tree related exceptions -# -############################################################################ - - -class RewriteCardinalityException(RuntimeError): - """ - @brief Base class for all exceptions thrown during AST rewrite construction. - - This signifies a case where the cardinality of two or more elements - in a subrule are different: (ID INT)+ where |ID|!=|INT| - """ - - def __init__(self, elementDescription): - RuntimeError.__init__(self, elementDescription) - - self.elementDescription = elementDescription - - - def getMessage(self): - return self.elementDescription - - -class RewriteEarlyExitException(RewriteCardinalityException): - """@brief No elements within a (...)+ in a rewrite rule""" - - def __init__(self, elementDescription=None): - RewriteCardinalityException.__init__(self, elementDescription) - - -class RewriteEmptyStreamException(RewriteCardinalityException): - """ - @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream - """ - - pass - - -############################################################################ -# -# basic Tree and TreeAdaptor interfaces -# -############################################################################ - -class Tree(object): - """ - @brief Abstract baseclass for tree nodes. - - What does a tree look like? ANTLR has a number of support classes - such as CommonTreeNodeStream that work on these kinds of trees. You - don't have to make your trees implement this interface, but if you do, - you'll be able to use more support code. - - NOTE: When constructing trees, ANTLR can build any kind of tree; it can - even use Token objects as trees if you add a child list to your tokens. - - This is a tree node without any payload; just navigation and factory stuff. - """ - - - def getChild(self, i): - raise NotImplementedError - - - def getChildCount(self): - raise NotImplementedError - - - def getParent(self): - """Tree tracks parent and child index now > 3.0""" - - raise NotImplementedError - - def setParent(self, t): - """Tree tracks parent and child index now > 3.0""" - - raise NotImplementedError - - - def hasAncestor(self, ttype): - """Walk upwards looking for ancestor with this token type.""" - - raise NotImplementedError - - def getAncestor(self, ttype): - """Walk upwards and get first ancestor with this token type.""" - - raise NotImplementedError - - def getAncestors(self): - """Return a list of all ancestors of this node. - - The first node of list is the root and the last is the parent of - this node. - """ - - raise NotImplementedError - - - def getChildIndex(self): - """This node is what child index? 0..n-1""" - - raise NotImplementedError - - def setChildIndex(self, index): - """This node is what child index? 0..n-1""" - - raise NotImplementedError - - - def freshenParentAndChildIndexes(self): - """Set the parent and child index values for all children""" - - raise NotImplementedError - - - def addChild(self, t): - """ - Add t as a child to this node. If t is null, do nothing. If t - is nil, add all children of t to this' children. - """ - - raise NotImplementedError - - - def setChild(self, i, t): - """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" - - raise NotImplementedError - - - def deleteChild(self, i): - raise NotImplementedError - - - def replaceChildren(self, startChildIndex, stopChildIndex, t): - """ - Delete children from start to stop and replace with t even if t is - a list (nil-root tree). num of children can increase or decrease. - For huge child lists, inserting children can force walking rest of - children to set their childindex; could be slow. - """ - - raise NotImplementedError - - - def isNil(self): - """ - Indicates the node is a nil node but may still have children, meaning - the tree is a flat list. - """ - - raise NotImplementedError - - - def getTokenStartIndex(self): - """ - What is the smallest token index (indexing from 0) for this node - and its children? - """ - - raise NotImplementedError - - - def setTokenStartIndex(self, index): - raise NotImplementedError - - - def getTokenStopIndex(self): - """ - What is the largest token index (indexing from 0) for this node - and its children? - """ - - raise NotImplementedError - - - def setTokenStopIndex(self, index): - raise NotImplementedError - - - def dupNode(self): - raise NotImplementedError - - - def getType(self): - """Return a token type; needed for tree parsing.""" - - raise NotImplementedError - - - def getText(self): - raise NotImplementedError - - - def getLine(self): - """ - In case we don't have a token payload, what is the line for errors? - """ - - raise NotImplementedError - - - def getCharPositionInLine(self): - raise NotImplementedError - - - def toStringTree(self): - raise NotImplementedError - - - def toString(self): - raise NotImplementedError - - - -class TreeAdaptor(object): - """ - @brief Abstract baseclass for tree adaptors. - - How to create and navigate trees. Rather than have a separate factory - and adaptor, I've merged them. Makes sense to encapsulate. - - This takes the place of the tree construction code generated in the - generated code in 2.x and the ASTFactory. - - I do not need to know the type of a tree at all so they are all - generic Objects. This may increase the amount of typecasting needed. :( - """ - - # C o n s t r u c t i o n - - def createWithPayload(self, payload): - """ - Create a tree node from Token object; for CommonTree type trees, - then the token just becomes the payload. This is the most - common create call. - - Override if you want another kind of node to be built. - """ - - raise NotImplementedError - - - def dupNode(self, treeNode): - """Duplicate a single tree node. - - Override if you want another kind of node to be built.""" - - raise NotImplementedError - - - def dupTree(self, tree): - """Duplicate tree recursively, using dupNode() for each node""" - - raise NotImplementedError - - - def nil(self): - """ - Return a nil node (an empty but non-null node) that can hold - a list of element as the children. If you want a flat tree (a list) - use "t=adaptor.nil(); t.addChild(x); t.addChild(y);" - """ - - raise NotImplementedError - - - def errorNode(self, input, start, stop, exc): - """ - Return a tree node representing an error. This node records the - tokens consumed during error recovery. The start token indicates the - input symbol at which the error was detected. The stop token indicates - the last symbol consumed during recovery. - - You must specify the input stream so that the erroneous text can - be packaged up in the error node. The exception could be useful - to some applications; default implementation stores ptr to it in - the CommonErrorNode. - - This only makes sense during token parsing, not tree parsing. - Tree parsing should happen only when parsing and tree construction - succeed. - """ - - raise NotImplementedError - - - def isNil(self, tree): - """Is tree considered a nil node used to make lists of child nodes?""" - - raise NotImplementedError - - - def addChild(self, t, child): - """ - Add a child to the tree t. If child is a flat tree (a list), make all - in list children of t. Warning: if t has no children, but child does - and child isNil then you can decide it is ok to move children to t via - t.children = child.children; i.e., without copying the array. Just - make sure that this is consistent with have the user will build - ASTs. Do nothing if t or child is null. - """ - - raise NotImplementedError - - - def becomeRoot(self, newRoot, oldRoot): - """ - If oldRoot is a nil root, just copy or move the children to newRoot. - If not a nil root, make oldRoot a child of newRoot. - - old=^(nil a b c), new=r yields ^(r a b c) - old=^(a b c), new=r yields ^(r ^(a b c)) - - If newRoot is a nil-rooted single child tree, use the single - child as the new root node. - - old=^(nil a b c), new=^(nil r) yields ^(r a b c) - old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) - - If oldRoot was null, it's ok, just return newRoot (even if isNil). - - old=null, new=r yields r - old=null, new=^(nil r) yields ^(nil r) - - Return newRoot. Throw an exception if newRoot is not a - simple node or nil root with a single child node--it must be a root - node. If newRoot is ^(nil x) return x as newRoot. - - Be advised that it's ok for newRoot to point at oldRoot's - children; i.e., you don't have to copy the list. We are - constructing these nodes so we should have this control for - efficiency. - """ - - raise NotImplementedError - - - def rulePostProcessing(self, root): - """ - Given the root of the subtree created for this rule, post process - it to do any simplifications or whatever you want. A required - behavior is to convert ^(nil singleSubtree) to singleSubtree - as the setting of start/stop indexes relies on a single non-nil root - for non-flat trees. - - Flat trees such as for lists like "idlist : ID+ ;" are left alone - unless there is only one ID. For a list, the start/stop indexes - are set in the nil node. - - This method is executed after all rule tree construction and right - before setTokenBoundaries(). - """ - - raise NotImplementedError - - - def getUniqueID(self, node): - """For identifying trees. - - How to identify nodes so we can say "add node to a prior node"? - Even becomeRoot is an issue. Use System.identityHashCode(node) - usually. - """ - - raise NotImplementedError - - - # R e w r i t e R u l e s - - def createFromToken(self, tokenType, fromToken, text=None): - """ - Create a new node derived from a token, with a new token type and - (optionally) new text. - - This is invoked from an imaginary node ref on right side of a - rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"]. - - This should invoke createToken(Token). - """ - - raise NotImplementedError - - - def createFromType(self, tokenType, text): - """Create a new node derived from a token, with a new token type. - - This is invoked from an imaginary node ref on right side of a - rewrite rule as IMAG["IMAG"]. - - This should invoke createToken(int,String). - """ - - raise NotImplementedError - - - # C o n t e n t - - def getType(self, t): - """For tree parsing, I need to know the token type of a node""" - - raise NotImplementedError - - - def setType(self, t, type): - """Node constructors can set the type of a node""" - - raise NotImplementedError - - - def getText(self, t): - raise NotImplementedError - - def setText(self, t, text): - """Node constructors can set the text of a node""" - - raise NotImplementedError - - - def getToken(self, t): - """Return the token object from which this node was created. - - Currently used only for printing an error message. - The error display routine in BaseRecognizer needs to - display where the input the error occurred. If your - tree of limitation does not store information that can - lead you to the token, you can create a token filled with - the appropriate information and pass that back. See - BaseRecognizer.getErrorMessage(). - """ - - raise NotImplementedError - - - def setTokenBoundaries(self, t, startToken, stopToken): - """ - Where are the bounds in the input token stream for this node and - all children? Each rule that creates AST nodes will call this - method right before returning. Flat trees (i.e., lists) will - still usually have a nil root node just to hold the children list. - That node would contain the start/stop indexes then. - """ - - raise NotImplementedError - - - def getTokenStartIndex(self, t): - """ - Get the token start index for this subtree; return -1 if no such index - """ - - raise NotImplementedError - - - def getTokenStopIndex(self, t): - """ - Get the token stop index for this subtree; return -1 if no such index - """ - - raise NotImplementedError - - - # N a v i g a t i o n / T r e e P a r s i n g - - def getChild(self, t, i): - """Get a child 0..n-1 node""" - - raise NotImplementedError - - - def setChild(self, t, i, child): - """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" - - raise NotImplementedError - - - def deleteChild(self, t, i): - """Remove ith child and shift children down from right.""" - - raise NotImplementedError - - - def getChildCount(self, t): - """How many children? If 0, then this is a leaf node""" - - raise NotImplementedError - - - def getParent(self, t): - """ - Who is the parent node of this node; if null, implies node is root. - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def setParent(self, t, parent): - """ - Who is the parent node of this node; if null, implies node is root. - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def getChildIndex(self, t): - """ - What index is this node in the child list? Range: 0..n-1 - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def setChildIndex(self, t, index): - """ - What index is this node in the child list? Range: 0..n-1 - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - """ - Replace from start to stop child index of parent with t, which might - be a list. Number of children may be different - after this call. - - If parent is null, don't do anything; must be at root of overall tree. - Can't replace whatever points to the parent externally. Do nothing. - """ - - raise NotImplementedError - - - # Misc - - def create(self, *args): - """ - Deprecated, use createWithPayload, createFromToken or createFromType. - - This method only exists to mimic the Java interface of TreeAdaptor. - - """ - - if len(args) == 1 and isinstance(args[0], Token): - # Object create(Token payload); -## warnings.warn( -## "Using create() is deprecated, use createWithPayload()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createWithPayload(args[0]) - - if (len(args) == 2 - and isinstance(args[0], (int, long)) - and isinstance(args[1], Token) - ): - # Object create(int tokenType, Token fromToken); -## warnings.warn( -## "Using create() is deprecated, use createFromToken()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromToken(args[0], args[1]) - - if (len(args) == 3 - and isinstance(args[0], (int, long)) - and isinstance(args[1], Token) - and isinstance(args[2], basestring) - ): - # Object create(int tokenType, Token fromToken, String text); -## warnings.warn( -## "Using create() is deprecated, use createFromToken()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromToken(args[0], args[1], args[2]) - - if (len(args) == 2 - and isinstance(args[0], (int, long)) - and isinstance(args[1], basestring) - ): - # Object create(int tokenType, String text); -## warnings.warn( -## "Using create() is deprecated, use createFromType()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromType(args[0], args[1]) - - raise TypeError( - "No create method with this signature found: %s" - % (', '.join(type(v).__name__ for v in args)) - ) - - -############################################################################ -# -# base implementation of Tree and TreeAdaptor -# -# Tree -# \- BaseTree -# -# TreeAdaptor -# \- BaseTreeAdaptor -# -############################################################################ - - -class BaseTree(Tree): - """ - @brief A generic tree implementation with no payload. - - You must subclass to - actually have any user data. ANTLR v3 uses a list of children approach - instead of the child-sibling approach in v2. A flat tree (a list) is - an empty node whose children represent the list. An empty, but - non-null node is called "nil". - """ - - # BaseTree is abstract, no need to complain about not implemented abstract - # methods - # pylint: disable-msg=W0223 - - def __init__(self, node=None): - """ - Create a new node from an existing node does nothing for BaseTree - as there are no fields other than the children list, which cannot - be copied as the children are not considered part of this node. - """ - - Tree.__init__(self) - self.children = [] - self.parent = None - self.childIndex = 0 - - - def getChild(self, i): - try: - return self.children[i] - except IndexError: - return None - - - def getChildren(self): - """@brief Get the children internal List - - Note that if you directly mess with - the list, do so at your own risk. - """ - - # FIXME: mark as deprecated - return self.children - - - def getFirstChildWithType(self, treeType): - for child in self.children: - if child.getType() == treeType: - return child - - return None - - - def getChildCount(self): - return len(self.children) - - - def addChild(self, childTree): - """Add t as child of this node. - - Warning: if t has no children, but child does - and child isNil then this routine moves children to t via - t.children = child.children; i.e., without copying the array. - """ - - # this implementation is much simpler and probably less efficient - # than the mumbo-jumbo that Ter did for the Java runtime. - - if childTree is None: - return - - if childTree.isNil(): - # t is an empty node possibly with children - - if self.children is childTree.children: - raise ValueError("attempt to add child list to itself") - - # fix parent pointer and childIndex for new children - for idx, child in enumerate(childTree.children): - child.parent = self - child.childIndex = len(self.children) + idx - - self.children += childTree.children - - else: - # child is not nil (don't care about children) - self.children.append(childTree) - childTree.parent = self - childTree.childIndex = len(self.children) - 1 - - - def addChildren(self, children): - """Add all elements of kids list as children of this node""" - - self.children += children - - - def setChild(self, i, t): - if t is None: - return - - if t.isNil(): - raise ValueError("Can't set single child to a list") - - self.children[i] = t - t.parent = self - t.childIndex = i - - - def deleteChild(self, i): - killed = self.children[i] - - del self.children[i] - - # walk rest and decrement their child indexes - for idx, child in enumerate(self.children[i:]): - child.childIndex = i + idx - - return killed - - - def replaceChildren(self, startChildIndex, stopChildIndex, newTree): - """ - Delete children from start to stop and replace with t even if t is - a list (nil-root tree). num of children can increase or decrease. - For huge child lists, inserting children can force walking rest of - children to set their childindex; could be slow. - """ - - if (startChildIndex >= len(self.children) - or stopChildIndex >= len(self.children) - ): - raise IndexError("indexes invalid") - - replacingHowMany = stopChildIndex - startChildIndex + 1 - - # normalize to a list of children to add: newChildren - if newTree.isNil(): - newChildren = newTree.children - - else: - newChildren = [newTree] - - replacingWithHowMany = len(newChildren) - delta = replacingHowMany - replacingWithHowMany - - - if delta == 0: - # if same number of nodes, do direct replace - for idx, child in enumerate(newChildren): - self.children[idx + startChildIndex] = child - child.parent = self - child.childIndex = idx + startChildIndex - - else: - # length of children changes... - - # ...delete replaced segment... - del self.children[startChildIndex:stopChildIndex+1] - - # ...insert new segment... - self.children[startChildIndex:startChildIndex] = newChildren - - # ...and fix indeces - self.freshenParentAndChildIndexes(startChildIndex) - - - def isNil(self): - return False - - - def freshenParentAndChildIndexes(self, offset=0): - for idx, child in enumerate(self.children[offset:]): - child.childIndex = idx + offset - child.parent = self - - - def sanityCheckParentAndChildIndexes(self, parent=None, i=-1): - if parent != self.parent: - raise ValueError( - "parents don't match; expected %r found %r" - % (parent, self.parent) - ) - - if i != self.childIndex: - raise ValueError( - "child indexes don't match; expected %d found %d" - % (i, self.childIndex) - ) - - for idx, child in enumerate(self.children): - child.sanityCheckParentAndChildIndexes(self, idx) - - - def getChildIndex(self): - """BaseTree doesn't track child indexes.""" - - return 0 - - - def setChildIndex(self, index): - """BaseTree doesn't track child indexes.""" - - pass - - - def getParent(self): - """BaseTree doesn't track parent pointers.""" - - return None - - def setParent(self, t): - """BaseTree doesn't track parent pointers.""" - - pass - - - def hasAncestor(self, ttype): - """Walk upwards looking for ancestor with this token type.""" - return self.getAncestor(ttype) is not None - - def getAncestor(self, ttype): - """Walk upwards and get first ancestor with this token type.""" - t = self.getParent() - while t is not None: - if t.getType() == ttype: - return t - t = t.getParent() - - return None - - def getAncestors(self): - """Return a list of all ancestors of this node. - - The first node of list is the root and the last is the parent of - this node. - """ - if selfgetParent() is None: - return None - - ancestors = [] - t = self.getParent() - while t is not None: - ancestors.insert(0, t) # insert at start - t = t.getParent() - - return ancestors - - - def toStringTree(self): - """Print out a whole tree not just a node""" - - if len(self.children) == 0: - return self.toString() - - buf = [] - if not self.isNil(): - buf.append('(') - buf.append(self.toString()) - buf.append(' ') - - for i, child in enumerate(self.children): - if i > 0: - buf.append(' ') - buf.append(child.toStringTree()) - - if not self.isNil(): - buf.append(')') - - return ''.join(buf) - - - def getLine(self): - return 0 - - - def getCharPositionInLine(self): - return 0 - - - def toString(self): - """Override to say how a node (not a tree) should look as text""" - - raise NotImplementedError - - - -class BaseTreeAdaptor(TreeAdaptor): - """ - @brief A TreeAdaptor that works with any Tree implementation. - """ - - # BaseTreeAdaptor is abstract, no need to complain about not implemented - # abstract methods - # pylint: disable-msg=W0223 - - def nil(self): - return self.createWithPayload(None) - - - def errorNode(self, input, start, stop, exc): - """ - create tree node that holds the start and stop tokens associated - with an error. - - If you specify your own kind of tree nodes, you will likely have to - override this method. CommonTree returns Token.INVALID_TOKEN_TYPE - if no token payload but you might have to set token type for diff - node type. - - You don't have to subclass CommonErrorNode; you will likely need to - subclass your own tree node class to avoid class cast exception. - """ - - return CommonErrorNode(input, start, stop, exc) - - - def isNil(self, tree): - return tree.isNil() - - - def dupTree(self, t, parent=None): - """ - This is generic in the sense that it will work with any kind of - tree (not just Tree interface). It invokes the adaptor routines - not the tree node routines to do the construction. - """ - - if t is None: - return None - - newTree = self.dupNode(t) - - # ensure new subtree root has parent/child index set - - # same index in new tree - self.setChildIndex(newTree, self.getChildIndex(t)) - - self.setParent(newTree, parent) - - for i in range(self.getChildCount(t)): - child = self.getChild(t, i) - newSubTree = self.dupTree(child, t) - self.addChild(newTree, newSubTree) - - return newTree - - - def addChild(self, tree, child): - """ - Add a child to the tree t. If child is a flat tree (a list), make all - in list children of t. Warning: if t has no children, but child does - and child isNil then you can decide it is ok to move children to t via - t.children = child.children; i.e., without copying the array. Just - make sure that this is consistent with have the user will build - ASTs. - """ - - #if isinstance(child, Token): - # child = self.createWithPayload(child) - - if tree is not None and child is not None: - tree.addChild(child) - - - def becomeRoot(self, newRoot, oldRoot): - """ - If oldRoot is a nil root, just copy or move the children to newRoot. - If not a nil root, make oldRoot a child of newRoot. - - old=^(nil a b c), new=r yields ^(r a b c) - old=^(a b c), new=r yields ^(r ^(a b c)) - - If newRoot is a nil-rooted single child tree, use the single - child as the new root node. - - old=^(nil a b c), new=^(nil r) yields ^(r a b c) - old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) - - If oldRoot was null, it's ok, just return newRoot (even if isNil). - - old=null, new=r yields r - old=null, new=^(nil r) yields ^(nil r) - - Return newRoot. Throw an exception if newRoot is not a - simple node or nil root with a single child node--it must be a root - node. If newRoot is ^(nil x) return x as newRoot. - - Be advised that it's ok for newRoot to point at oldRoot's - children; i.e., you don't have to copy the list. We are - constructing these nodes so we should have this control for - efficiency. - """ - - if isinstance(newRoot, Token): - newRoot = self.create(newRoot) - - if oldRoot is None: - return newRoot - - if not isinstance(newRoot, CommonTree): - newRoot = self.createWithPayload(newRoot) - - # handle ^(nil real-node) - if newRoot.isNil(): - nc = newRoot.getChildCount() - if nc == 1: - newRoot = newRoot.getChild(0) - - elif nc > 1: - # TODO: make tree run time exceptions hierarchy - raise RuntimeError("more than one node as root") - - # add oldRoot to newRoot; addChild takes care of case where oldRoot - # is a flat list (i.e., nil-rooted tree). All children of oldRoot - # are added to newRoot. - newRoot.addChild(oldRoot) - return newRoot - - - def rulePostProcessing(self, root): - """Transform ^(nil x) to x and nil to null""" - - if root is not None and root.isNil(): - if root.getChildCount() == 0: - root = None - - elif root.getChildCount() == 1: - root = root.getChild(0) - # whoever invokes rule will set parent and child index - root.setParent(None) - root.setChildIndex(-1) - - return root - - - def createFromToken(self, tokenType, fromToken, text=None): - if fromToken is None: - return self.createFromType(tokenType, text) - - assert isinstance(tokenType, (int, long)), type(tokenType).__name__ - assert isinstance(fromToken, Token), type(fromToken).__name__ - assert text is None or isinstance(text, basestring), type(text).__name__ - - fromToken = self.createToken(fromToken) - fromToken.type = tokenType - if text is not None: - fromToken.text = text - t = self.createWithPayload(fromToken) - return t - - - def createFromType(self, tokenType, text): - assert isinstance(tokenType, (int, long)), type(tokenType).__name__ - assert isinstance(text, basestring) or text is None, type(text).__name__ - - fromToken = self.createToken(tokenType=tokenType, text=text) - t = self.createWithPayload(fromToken) - return t - - - def getType(self, t): - return t.getType() - - - def setType(self, t, type): - raise RuntimeError("don't know enough about Tree node") - - - def getText(self, t): - return t.getText() - - - def setText(self, t, text): - raise RuntimeError("don't know enough about Tree node") - - - def getChild(self, t, i): - return t.getChild(i) - - - def setChild(self, t, i, child): - t.setChild(i, child) - - - def deleteChild(self, t, i): - return t.deleteChild(i) - - - def getChildCount(self, t): - return t.getChildCount() - - - def getUniqueID(self, node): - return hash(node) - - - def createToken(self, fromToken=None, tokenType=None, text=None): - """ - Tell me how to create a token for use with imaginary token nodes. - For example, there is probably no input symbol associated with imaginary - token DECL, but you need to create it as a payload or whatever for - the DECL node as in ^(DECL type ID). - - If you care what the token payload objects' type is, you should - override this method and any other createToken variant. - """ - - raise NotImplementedError - - -############################################################################ -# -# common tree implementation -# -# Tree -# \- BaseTree -# \- CommonTree -# \- CommonErrorNode -# -# TreeAdaptor -# \- BaseTreeAdaptor -# \- CommonTreeAdaptor -# -############################################################################ - - -class CommonTree(BaseTree): - """@brief A tree node that is wrapper for a Token object. - - After 3.0 release - while building tree rewrite stuff, it became clear that computing - parent and child index is very difficult and cumbersome. Better to - spend the space in every tree node. If you don't want these extra - fields, it's easy to cut them out in your own BaseTree subclass. - - """ - - def __init__(self, payload): - BaseTree.__init__(self) - - # What token indexes bracket all tokens associated with this node - # and below? - self.startIndex = -1 - self.stopIndex = -1 - - # Who is the parent node of this node; if null, implies node is root - self.parent = None - - # What index is this node in the child list? Range: 0..n-1 - self.childIndex = -1 - - # A single token is the payload - if payload is None: - self.token = None - - elif isinstance(payload, CommonTree): - self.token = payload.token - self.startIndex = payload.startIndex - self.stopIndex = payload.stopIndex - - elif payload is None or isinstance(payload, Token): - self.token = payload - - else: - raise TypeError(type(payload).__name__) - - - - def getToken(self): - return self.token - - - def dupNode(self): - return CommonTree(self) - - - def isNil(self): - return self.token is None - - - def getType(self): - if self.token is None: - return INVALID_TOKEN_TYPE - - return self.token.getType() - - type = property(getType) - - - def getText(self): - if self.token is None: - return None - - return self.token.text - - text = property(getText) - - - def getLine(self): - if self.token is None or self.token.getLine() == 0: - if self.getChildCount(): - return self.getChild(0).getLine() - else: - return 0 - - return self.token.getLine() - - line = property(getLine) - - - def getCharPositionInLine(self): - if self.token is None or self.token.getCharPositionInLine() == -1: - if self.getChildCount(): - return self.getChild(0).getCharPositionInLine() - else: - return 0 - - else: - return self.token.getCharPositionInLine() - - charPositionInLine = property(getCharPositionInLine) - - - def getTokenStartIndex(self): - if self.startIndex == -1 and self.token is not None: - return self.token.getTokenIndex() - - return self.startIndex - - def setTokenStartIndex(self, index): - self.startIndex = index - - tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex) - - - def getTokenStopIndex(self): - if self.stopIndex == -1 and self.token is not None: - return self.token.getTokenIndex() - - return self.stopIndex - - def setTokenStopIndex(self, index): - self.stopIndex = index - - tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex) - - - def setUnknownTokenBoundaries(self): - """For every node in this subtree, make sure it's start/stop token's - are set. Walk depth first, visit bottom up. Only updates nodes - with at least one token index < 0. - """ - - if self.children is None: - if self.startIndex < 0 or self.stopIndex < 0: - self.startIndex = self.stopIndex = self.token.getTokenIndex() - - return - - for child in self.children: - child.setUnknownTokenBoundaries() - - if self.startIndex >= 0 and self.stopIndex >= 0: - # already set - return - - if self.children: - firstChild = self.children[0] - lastChild = self.children[-1] - self.startIndex = firstChild.getTokenStartIndex() - self.stopIndex = lastChild.getTokenStopIndex() - - - def getChildIndex(self): - #FIXME: mark as deprecated - return self.childIndex - - - def setChildIndex(self, idx): - #FIXME: mark as deprecated - self.childIndex = idx - - - def getParent(self): - #FIXME: mark as deprecated - return self.parent - - - def setParent(self, t): - #FIXME: mark as deprecated - self.parent = t - - - def toString(self): - if self.isNil(): - return "nil" - - if self.getType() == INVALID_TOKEN_TYPE: - return "" - - return self.token.text - - __str__ = toString - - - - def toStringTree(self): - if not self.children: - return self.toString() - - ret = '' - if not self.isNil(): - ret += '(%s ' % (self.toString()) - - ret += ' '.join([child.toStringTree() for child in self.children]) - - if not self.isNil(): - ret += ')' - - return ret - - -INVALID_NODE = CommonTree(INVALID_TOKEN) - - -class CommonErrorNode(CommonTree): - """A node representing erroneous token range in token stream""" - - def __init__(self, input, start, stop, exc): - CommonTree.__init__(self, None) - - if (stop is None or - (stop.getTokenIndex() < start.getTokenIndex() and - stop.getType() != EOF - ) - ): - # sometimes resync does not consume a token (when LT(1) is - # in follow set. So, stop will be 1 to left to start. adjust. - # Also handle case where start is the first token and no token - # is consumed during recovery; LT(-1) will return null. - stop = start - - self.input = input - self.start = start - self.stop = stop - self.trappedException = exc - - - def isNil(self): - return False - - - def getType(self): - return INVALID_TOKEN_TYPE - - - def getText(self): - if isinstance(self.start, Token): - i = self.start.getTokenIndex() - j = self.stop.getTokenIndex() - if self.stop.getType() == EOF: - j = self.input.size() - - badText = self.input.toString(i, j) - - elif isinstance(self.start, Tree): - badText = self.input.toString(self.start, self.stop) - - else: - # people should subclass if they alter the tree type so this - # next one is for sure correct. - badText = "" - - return badText - - - def toString(self): - if isinstance(self.trappedException, MissingTokenException): - return ("") - - elif isinstance(self.trappedException, UnwantedTokenException): - return ("") - - elif isinstance(self.trappedException, MismatchedTokenException): - return ("") - - elif isinstance(self.trappedException, NoViableAltException): - return ("") - - return "" - - -class CommonTreeAdaptor(BaseTreeAdaptor): - """ - @brief A TreeAdaptor that works with any Tree implementation. - - It provides - really just factory methods; all the work is done by BaseTreeAdaptor. - If you would like to have different tokens created than ClassicToken - objects, you need to override this and then set the parser tree adaptor to - use your subclass. - - To get your parser to build nodes of a different type, override - create(Token), errorNode(), and to be safe, YourTreeClass.dupNode(). - dupNode is called to duplicate nodes during rewrite operations. - """ - - def dupNode(self, treeNode): - """ - Duplicate a node. This is part of the factory; - override if you want another kind of node to be built. - - I could use reflection to prevent having to override this - but reflection is slow. - """ - - if treeNode is None: - return None - - return treeNode.dupNode() - - - def createWithPayload(self, payload): - return CommonTree(payload) - - - def createToken(self, fromToken=None, tokenType=None, text=None): - """ - Tell me how to create a token for use with imaginary token nodes. - For example, there is probably no input symbol associated with imaginary - token DECL, but you need to create it as a payload or whatever for - the DECL node as in ^(DECL type ID). - - If you care what the token payload objects' type is, you should - override this method and any other createToken variant. - """ - - if fromToken is not None: - return CommonToken(oldToken=fromToken) - - return CommonToken(type=tokenType, text=text) - - - def setTokenBoundaries(self, t, startToken, stopToken): - """ - Track start/stop token for subtree root created for a rule. - Only works with Tree nodes. For rules that match nothing, - seems like this will yield start=i and stop=i-1 in a nil node. - Might be useful info so I'll not force to be i..i. - """ - - if t is None: - return - - start = 0 - stop = 0 - - if startToken is not None: - start = startToken.index - - if stopToken is not None: - stop = stopToken.index - - t.setTokenStartIndex(start) - t.setTokenStopIndex(stop) - - - def getTokenStartIndex(self, t): - if t is None: - return -1 - return t.getTokenStartIndex() - - - def getTokenStopIndex(self, t): - if t is None: - return -1 - return t.getTokenStopIndex() - - - def getText(self, t): - if t is None: - return None - return t.getText() - - - def getType(self, t): - if t is None: - return INVALID_TOKEN_TYPE - - return t.getType() - - - def getToken(self, t): - """ - What is the Token associated with this node? If - you are not using CommonTree, then you must - override this in your own adaptor. - """ - - if isinstance(t, CommonTree): - return t.getToken() - - return None # no idea what to do - - - def getChild(self, t, i): - if t is None: - return None - return t.getChild(i) - - - def getChildCount(self, t): - if t is None: - return 0 - return t.getChildCount() - - - def getParent(self, t): - return t.getParent() - - - def setParent(self, t, parent): - t.setParent(parent) - - - def getChildIndex(self, t): - if t is None: - return 0 - return t.getChildIndex() - - - def setChildIndex(self, t, index): - t.setChildIndex(index) - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - if parent is not None: - parent.replaceChildren(startChildIndex, stopChildIndex, t) - - -############################################################################ -# -# streams -# -# TreeNodeStream -# \- BaseTree -# \- CommonTree -# -# TreeAdaptor -# \- BaseTreeAdaptor -# \- CommonTreeAdaptor -# -############################################################################ - - - -class TreeNodeStream(IntStream): - """@brief A stream of tree nodes - - It accessing nodes from a tree of some kind. - """ - - # TreeNodeStream is abstract, no need to complain about not implemented - # abstract methods - # pylint: disable-msg=W0223 - - def get(self, i): - """Get a tree node at an absolute index i; 0..n-1. - If you don't want to buffer up nodes, then this method makes no - sense for you. - """ - - raise NotImplementedError - - - def LT(self, k): - """ - Get tree node at current input pointer + i ahead where i=1 is next node. - i<0 indicates nodes in the past. So LT(-1) is previous node, but - implementations are not required to provide results for k < -1. - LT(0) is undefined. For i>=n, return null. - Return null for LT(0) and any index that results in an absolute address - that is negative. - - This is analogus to the LT() method of the TokenStream, but this - returns a tree node instead of a token. Makes code gen identical - for both parser and tree grammars. :) - """ - - raise NotImplementedError - - - def getTreeSource(self): - """ - Where is this stream pulling nodes from? This is not the name, but - the object that provides node objects. - """ - - raise NotImplementedError - - - def getTokenStream(self): - """ - If the tree associated with this stream was created from a TokenStream, - you can specify it here. Used to do rule $text attribute in tree - parser. Optional unless you use tree parser rule text attribute - or output=template and rewrite=true options. - """ - - raise NotImplementedError - - - def getTreeAdaptor(self): - """ - What adaptor can tell me how to interpret/navigate nodes and - trees. E.g., get text of a node. - """ - - raise NotImplementedError - - - def setUniqueNavigationNodes(self, uniqueNavigationNodes): - """ - As we flatten the tree, we use UP, DOWN nodes to represent - the tree structure. When debugging we need unique nodes - so we have to instantiate new ones. When doing normal tree - parsing, it's slow and a waste of memory to create unique - navigation nodes. Default should be false; - """ - - raise NotImplementedError - - - def reset(self): - """ - Reset the tree node stream in such a way that it acts like - a freshly constructed stream. - """ - - raise NotImplementedError - - - def toString(self, start, stop): - """ - Return the text of all nodes from start to stop, inclusive. - If the stream does not buffer all the nodes then it can still - walk recursively from start until stop. You can always return - null or "" too, but users should not access $ruleLabel.text in - an action of course in that case. - """ - - raise NotImplementedError - - - # REWRITING TREES (used by tree parser) - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - """ - Replace from start to stop child index of parent with t, which might - be a list. Number of children may be different - after this call. The stream is notified because it is walking the - tree and might need to know you are monkeying with the underlying - tree. Also, it might be able to modify the node stream to avoid - restreaming for future phases. - - If parent is null, don't do anything; must be at root of overall tree. - Can't replace whatever points to the parent externally. Do nothing. - """ - - raise NotImplementedError - - -class CommonTreeNodeStream(TreeNodeStream): - """@brief A buffered stream of tree nodes. - - Nodes can be from a tree of ANY kind. - - This node stream sucks all nodes out of the tree specified in - the constructor during construction and makes pointers into - the tree using an array of Object pointers. The stream necessarily - includes pointers to DOWN and UP and EOF nodes. - - This stream knows how to mark/release for backtracking. - - This stream is most suitable for tree interpreters that need to - jump around a lot or for tree parsers requiring speed (at cost of memory). - There is some duplicated functionality here with UnBufferedTreeNodeStream - but just in bookkeeping, not tree walking etc... - - @see UnBufferedTreeNodeStream - """ - - def __init__(self, *args): - TreeNodeStream.__init__(self) - - if len(args) == 1: - adaptor = CommonTreeAdaptor() - tree = args[0] - - nodes = None - down = None - up = None - eof = None - - elif len(args) == 2: - adaptor = args[0] - tree = args[1] - - nodes = None - down = None - up = None - eof = None - - elif len(args) == 3: - parent = args[0] - start = args[1] - stop = args[2] - - adaptor = parent.adaptor - tree = parent.root - - nodes = parent.nodes[start:stop] - down = parent.down - up = parent.up - eof = parent.eof - - else: - raise TypeError("Invalid arguments") - - # all these navigation nodes are shared and hence they - # cannot contain any line/column info - if down is not None: - self.down = down - else: - self.down = adaptor.createFromType(DOWN, "DOWN") - - if up is not None: - self.up = up - else: - self.up = adaptor.createFromType(UP, "UP") - - if eof is not None: - self.eof = eof - else: - self.eof = adaptor.createFromType(EOF, "EOF") - - # The complete mapping from stream index to tree node. - # This buffer includes pointers to DOWN, UP, and EOF nodes. - # It is built upon ctor invocation. The elements are type - # Object as we don't what the trees look like. - - # Load upon first need of the buffer so we can set token types - # of interest for reverseIndexing. Slows us down a wee bit to - # do all of the if p==-1 testing everywhere though. - if nodes is not None: - self.nodes = nodes - else: - self.nodes = [] - - # Pull nodes from which tree? - self.root = tree - - # IF this tree (root) was created from a token stream, track it. - self.tokens = None - - # What tree adaptor was used to build these trees - self.adaptor = adaptor - - # Reuse same DOWN, UP navigation nodes unless this is true - self.uniqueNavigationNodes = False - - # The index into the nodes list of the current node (next node - # to consume). If -1, nodes array not filled yet. - self.p = -1 - - # Track the last mark() call result value for use in rewind(). - self.lastMarker = None - - # Stack of indexes used for push/pop calls - self.calls = [] - - - def __iter__(self): - return TreeIterator(self.root, self.adaptor) - - - def fillBuffer(self): - """Walk tree with depth-first-search and fill nodes buffer. - Don't do DOWN, UP nodes if its a list (t is isNil). - """ - - self._fillBuffer(self.root) - self.p = 0 # buffer of nodes intialized now - - - def _fillBuffer(self, t): - nil = self.adaptor.isNil(t) - - if not nil: - self.nodes.append(t) # add this node - - # add DOWN node if t has children - n = self.adaptor.getChildCount(t) - if not nil and n > 0: - self.addNavigationNode(DOWN) - - # and now add all its children - for c in range(n): - self._fillBuffer(self.adaptor.getChild(t, c)) - - # add UP node if t has children - if not nil and n > 0: - self.addNavigationNode(UP) - - - def getNodeIndex(self, node): - """What is the stream index for node? 0..n-1 - Return -1 if node not found. - """ - - if self.p == -1: - self.fillBuffer() - - for i, t in enumerate(self.nodes): - if t == node: - return i - - return -1 - - - def addNavigationNode(self, ttype): - """ - As we flatten the tree, we use UP, DOWN nodes to represent - the tree structure. When debugging we need unique nodes - so instantiate new ones when uniqueNavigationNodes is true. - """ - - navNode = None - - if ttype == DOWN: - if self.hasUniqueNavigationNodes(): - navNode = self.adaptor.createFromType(DOWN, "DOWN") - - else: - navNode = self.down - - else: - if self.hasUniqueNavigationNodes(): - navNode = self.adaptor.createFromType(UP, "UP") - - else: - navNode = self.up - - self.nodes.append(navNode) - - - def get(self, i): - if self.p == -1: - self.fillBuffer() - - return self.nodes[i] - - - def LT(self, k): - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if k < 0: - return self.LB(-k) - - if self.p + k - 1 >= len(self.nodes): - return self.eof - - return self.nodes[self.p + k - 1] - - - def getCurrentSymbol(self): - return self.LT(1) - - - def LB(self, k): - """Look backwards k nodes""" - - if k == 0: - return None - - if self.p - k < 0: - return None - - return self.nodes[self.p - k] - - - def isEOF(self, obj): - return self.adaptor.getType(obj) == EOF - - - def getTreeSource(self): - return self.root - - - def getSourceName(self): - return self.getTokenStream().getSourceName() - - - def getTokenStream(self): - return self.tokens - - - def setTokenStream(self, tokens): - self.tokens = tokens - - - def getTreeAdaptor(self): - return self.adaptor - - - def hasUniqueNavigationNodes(self): - return self.uniqueNavigationNodes - - - def setUniqueNavigationNodes(self, uniqueNavigationNodes): - self.uniqueNavigationNodes = uniqueNavigationNodes - - - def consume(self): - if self.p == -1: - self.fillBuffer() - - self.p += 1 - - - def LA(self, i): - return self.adaptor.getType(self.LT(i)) - - - def mark(self): - if self.p == -1: - self.fillBuffer() - - - self.lastMarker = self.index() - return self.lastMarker - - - def release(self, marker=None): - # no resources to release - - pass - - - def index(self): - return self.p - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.seek(marker) - - - def seek(self, index): - if self.p == -1: - self.fillBuffer() - - self.p = index - - - def push(self, index): - """ - Make stream jump to a new location, saving old location. - Switch back with pop(). - """ - - self.calls.append(self.p) # save current index - self.seek(index) - - - def pop(self): - """ - Seek back to previous index saved during last push() call. - Return top of stack (return index). - """ - - ret = self.calls.pop(-1) - self.seek(ret) - return ret - - - def reset(self): - self.p = 0 - self.lastMarker = 0 - self.calls = [] - - - def size(self): - if self.p == -1: - self.fillBuffer() - - return len(self.nodes) - - - # TREE REWRITE INTERFACE - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - if parent is not None: - self.adaptor.replaceChildren( - parent, startChildIndex, stopChildIndex, t - ) - - - def __str__(self): - """Used for testing, just return the token type stream""" - - if self.p == -1: - self.fillBuffer() - - return ' '.join([str(self.adaptor.getType(node)) - for node in self.nodes - ]) - - - def toString(self, start, stop): - if start is None or stop is None: - return None - - if self.p == -1: - self.fillBuffer() - - #System.out.println("stop: "+stop); - #if ( start instanceof CommonTree ) - # System.out.print("toString: "+((CommonTree)start).getToken()+", "); - #else - # System.out.println(start); - #if ( stop instanceof CommonTree ) - # System.out.println(((CommonTree)stop).getToken()); - #else - # System.out.println(stop); - - # if we have the token stream, use that to dump text in order - if self.tokens is not None: - beginTokenIndex = self.adaptor.getTokenStartIndex(start) - endTokenIndex = self.adaptor.getTokenStopIndex(stop) - - # if it's a tree, use start/stop index from start node - # else use token range from start/stop nodes - if self.adaptor.getType(stop) == UP: - endTokenIndex = self.adaptor.getTokenStopIndex(start) - - elif self.adaptor.getType(stop) == EOF: - endTokenIndex = self.size() -2 # don't use EOF - - return self.tokens.toString(beginTokenIndex, endTokenIndex) - - # walk nodes looking for start - i, t = 0, None - for i, t in enumerate(self.nodes): - if t == start: - break - - # now walk until we see stop, filling string buffer with text - buf = [] - t = self.nodes[i] - while t != stop: - text = self.adaptor.getText(t) - if text is None: - text = " " + self.adaptor.getType(t) - - buf.append(text) - i += 1 - t = self.nodes[i] - - # include stop node too - text = self.adaptor.getText(stop) - if text is None: - text = " " +self.adaptor.getType(stop) - - buf.append(text) - - return ''.join(buf) - - - ## iterator interface - def __iter__(self): - if self.p == -1: - self.fillBuffer() - - for node in self.nodes: - yield node - - -############################################################################# -# -# tree parser -# -############################################################################# - -class TreeParser(BaseRecognizer): - """@brief Baseclass for generated tree parsers. - - A parser for a stream of tree nodes. "tree grammars" result in a subclass - of this. All the error reporting and recovery is shared with Parser via - the BaseRecognizer superclass. - """ - - def __init__(self, input, state=None): - BaseRecognizer.__init__(self, state) - - self.input = None - self.setTreeNodeStream(input) - - - def reset(self): - BaseRecognizer.reset(self) # reset all recognizer state variables - if self.input is not None: - self.input.seek(0) # rewind the input - - - def setTreeNodeStream(self, input): - """Set the input stream""" - - self.input = input - - - def getTreeNodeStream(self): - return self.input - - - def getSourceName(self): - return self.input.getSourceName() - - - def getCurrentInputSymbol(self, input): - return input.LT(1) - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - tokenText = "" - adaptor = input.adaptor - return adaptor.createToken( - CommonToken(type=expectedTokenType, text=tokenText)) - - - # precompiled regex used by inContext - dotdot = ".*[^.]\\.\\.[^.].*" - doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*" - dotdotPattern = re.compile(dotdot) - doubleEtcPattern = re.compile(doubleEtc) - - def inContext(self, context, adaptor=None, tokenName=None, t=None): - """Check if current node in input has a context. - - Context means sequence of nodes towards root of tree. For example, - you might say context is "MULT" which means my parent must be MULT. - "CLASS VARDEF" says current node must be child of a VARDEF and whose - parent is a CLASS node. You can use "..." to mean zero-or-more nodes. - "METHOD ... VARDEF" means my parent is VARDEF and somewhere above - that is a METHOD node. The first node in the context is not - necessarily the root. The context matcher stops matching and returns - true when it runs out of context. There is no way to force the first - node to be the root. - """ - - return _inContext( - self.input.getTreeAdaptor(), self.getTokenNames(), - self.input.LT(1), context) - - @classmethod - def _inContext(cls, adaptor, tokenNames, t, context): - """The worker for inContext. - - It's static and full of parameters for testing purposes. - """ - - if cls.dotdotPattern.match(context): - # don't allow "..", must be "..." - raise ValueError("invalid syntax: ..") - - if cls.doubleEtcPattern.match(context): - # don't allow double "..." - raise ValueError("invalid syntax: ... ...") - - # ensure spaces around ... - context = context.replace("...", " ... ") - context = context.strip() - nodes = context.split() - - ni = len(nodes) - 1 - t = adaptor.getParent(t) - while ni >= 0 and t is not None: - if nodes[ni] == "...": - # walk upwards until we see nodes[ni-1] then continue walking - if ni == 0: - # ... at start is no-op - return True - goal = nodes[ni-1] - ancestor = cls._getAncestor(adaptor, tokenNames, t, goal) - if ancestor is None: - return False - t = ancestor - ni -= 1 - - name = tokenNames[adaptor.getType(t)] - if name != nodes[ni]: - return False - - # advance to parent and to previous element in context node list - ni -= 1 - t = adaptor.getParent(t) - - # at root but more nodes to match - if t is None and ni >= 0: - return False - - return True - - @staticmethod - def _getAncestor(adaptor, tokenNames, t, goal): - """Helper for static inContext.""" - while t is not None: - name = tokenNames[adaptor.getType(t)] - if name == goal: - return t - t = adaptor.getParent(t) - - return None - - - def matchAny(self, ignore): # ignore stream, copy of this.input - """ - Match '.' in tree parser has special meaning. Skip node or - entire tree if node has children. If children, scan until - corresponding UP node. - """ - - self._state.errorRecovery = False - - look = self.input.LT(1) - if self.input.getTreeAdaptor().getChildCount(look) == 0: - self.input.consume() # not subtree, consume 1 node and return - return - - # current node is a subtree, skip to corresponding UP. - # must count nesting level to get right UP - level = 0 - tokenType = self.input.getTreeAdaptor().getType(look) - while tokenType != EOF and not (tokenType == UP and level==0): - self.input.consume() - look = self.input.LT(1) - tokenType = self.input.getTreeAdaptor().getType(look) - if tokenType == DOWN: - level += 1 - - elif tokenType == UP: - level -= 1 - - self.input.consume() # consume UP - - - def mismatch(self, input, ttype, follow): - """ - We have DOWN/UP nodes in the stream that have no line info; override. - plus we want to alter the exception type. Don't try to recover - from tree parser errors inline... - """ - - raise MismatchedTreeNodeException(ttype, input) - - - def getErrorHeader(self, e): - """ - Prefix error message with the grammar name because message is - always intended for the programmer because the parser built - the input tree not the user. - """ - - return (self.getGrammarFileName() + - ": node from %sline %s:%s" - % (['', "after "][e.approximateLineInfo], - e.line, - e.charPositionInLine - ) - ) - - def getErrorMessage(self, e, tokenNames): - """ - Tree parsers parse nodes they usually have a token object as - payload. Set the exception token and do the default behavior. - """ - - if isinstance(self, TreeParser): - adaptor = e.input.getTreeAdaptor() - e.token = adaptor.getToken(e.node) - if e.token is not None: # could be an UP/DOWN node - e.token = CommonToken( - type=adaptor.getType(e.node), - text=adaptor.getText(e.node) - ) - - return BaseRecognizer.getErrorMessage(self, e, tokenNames) - - - def traceIn(self, ruleName, ruleIndex): - BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) - - - def traceOut(self, ruleName, ruleIndex): - BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) - - -############################################################################# -# -# tree visitor -# -############################################################################# - -class TreeVisitor(object): - """Do a depth first walk of a tree, applying pre() and post() actions - we go. - """ - - def __init__(self, adaptor=None): - if adaptor is not None: - self.adaptor = adaptor - else: - self.adaptor = CommonTreeAdaptor() - - def visit(self, t, pre_action=None, post_action=None): - """Visit every node in tree t and trigger an action for each node - before/after having visited all of its children. Bottom up walk. - Execute both actions even if t has no children. Ignore return - results from transforming children since they will have altered - the child list of this node (their parent). Return result of - applying post action to this node. - - The Python version differs from the Java version by taking two - callables 'pre_action' and 'post_action' instead of a class instance - that wraps those methods. Those callables must accept a TreeNode as - their single argument and return the (potentially transformed or - replaced) TreeNode. - """ - - isNil = self.adaptor.isNil(t) - if pre_action is not None and not isNil: - # if rewritten, walk children of new t - t = pre_action(t) - - idx = 0 - while idx < self.adaptor.getChildCount(t): - child = self.adaptor.getChild(t, idx) - self.visit(child, pre_action, post_action) - idx += 1 - - if post_action is not None and not isNil: - t = post_action(t) - - return t - -############################################################################# -# -# tree iterator -# -############################################################################# - -class TreeIterator(object): - """ - Return a node stream from a doubly-linked tree whose nodes - know what child index they are. - - Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure. - """ - - def __init__(self, tree, adaptor=None): - if adaptor is None: - adaptor = CommonTreeAdaptor() - - self.root = tree - self.adaptor = adaptor - - self.first_time = True - self.tree = tree - - # If we emit UP/DOWN nodes, we need to spit out multiple nodes per - # next() call. - self.nodes = [] - - # navigation nodes to return during walk and at end - self.down = adaptor.createFromType(DOWN, "DOWN") - self.up = adaptor.createFromType(UP, "UP") - self.eof = adaptor.createFromType(EOF, "EOF") - - - def reset(self): - self.first_time = True - self.tree = self.root - self.nodes = [] - - - def __iter__(self): - return self - - - def has_next(self): - if self.first_time: - return self.root is not None - - if len(self.nodes) > 0: - return True - - if self.tree is None: - return False - - if self.adaptor.getChildCount(self.tree) > 0: - return True - - # back at root? - return self.adaptor.getParent(self.tree) is not None - - - def next(self): - if not self.has_next(): - raise StopIteration - - if self.first_time: - # initial condition - self.first_time = False - if self.adaptor.getChildCount(self.tree) == 0: - # single node tree (special) - self.nodes.append(self.eof) - return self.tree - - return self.tree - - # if any queued up, use those first - if len(self.nodes) > 0: - return self.nodes.pop(0) - - # no nodes left? - if self.tree is None: - return self.eof - - # next node will be child 0 if any children - if self.adaptor.getChildCount(self.tree) > 0: - self.tree = self.adaptor.getChild(self.tree, 0) - # real node is next after DOWN - self.nodes.append(self.tree) - return self.down - - # if no children, look for next sibling of tree or ancestor - parent = self.adaptor.getParent(self.tree) - # while we're out of siblings, keep popping back up towards root - while (parent is not None - and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)): - # we're moving back up - self.nodes.append(self.up) - self.tree = parent - parent = self.adaptor.getParent(self.tree) - - # no nodes left? - if parent is None: - self.tree = None # back at root? nothing left then - self.nodes.append(self.eof) # add to queue, might have UP nodes in there - return self.nodes.pop(0) - - # must have found a node with an unvisited sibling - # move to it and return it - nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1 - self.tree = self.adaptor.getChild(parent, nextSiblingIndex) - self.nodes.append(self.tree) # add to queue, might have UP nodes in there - return self.nodes.pop(0) - - - -############################################################################# -# -# streams for rule rewriting -# -############################################################################# - -class RewriteRuleElementStream(object): - """@brief Internal helper class. - - A generic list of elements tracked in an alternative to be used in - a -> rewrite rule. We need to subclass to fill in the next() method, - which returns either an AST node wrapped around a token payload or - an existing subtree. - - Once you start next()ing, do not try to add more elements. It will - break the cursor tracking I believe. - - @see org.antlr.runtime.tree.RewriteRuleSubtreeStream - @see org.antlr.runtime.tree.RewriteRuleTokenStream - - TODO: add mechanism to detect/puke on modification after reading from - stream - """ - - def __init__(self, adaptor, elementDescription, elements=None): - # Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(), - # which bumps it to 1 meaning no more elements. - self.cursor = 0 - - # Track single elements w/o creating a list. Upon 2nd add, alloc list - self.singleElement = None - - # The list of tokens or subtrees we are tracking - self.elements = None - - # Once a node / subtree has been used in a stream, it must be dup'd - # from then on. Streams are reset after subrules so that the streams - # can be reused in future subrules. So, reset must set a dirty bit. - # If dirty, then next() always returns a dup. - self.dirty = False - - # The element or stream description; usually has name of the token or - # rule reference that this list tracks. Can include rulename too, but - # the exception would track that info. - self.elementDescription = elementDescription - - self.adaptor = adaptor - - if isinstance(elements, (list, tuple)): - # Create a stream, but feed off an existing list - self.singleElement = None - self.elements = elements - - else: - # Create a stream with one element - self.add(elements) - - - def reset(self): - """ - Reset the condition of this stream so that it appears we have - not consumed any of its elements. Elements themselves are untouched. - Once we reset the stream, any future use will need duplicates. Set - the dirty bit. - """ - - self.cursor = 0 - self.dirty = True - - - def add(self, el): - if el is None: - return - - if self.elements is not None: # if in list, just add - self.elements.append(el) - return - - if self.singleElement is None: # no elements yet, track w/o list - self.singleElement = el - return - - # adding 2nd element, move to list - self.elements = [] - self.elements.append(self.singleElement) - self.singleElement = None - self.elements.append(el) - - - def nextTree(self): - """ - Return the next element in the stream. If out of elements, throw - an exception unless size()==1. If size is 1, then return elements[0]. - - Return a duplicate node/subtree if stream is out of elements and - size==1. If we've already used the element, dup (dirty bit set). - """ - - if (self.dirty - or (self.cursor >= len(self) and len(self) == 1) - ): - # if out of elements and size is 1, dup - el = self._next() - return self.dup(el) - - # test size above then fetch - el = self._next() - return el - - - def _next(self): - """ - do the work of getting the next element, making sure that it's - a tree node or subtree. Deal with the optimization of single- - element list versus list of size > 1. Throw an exception - if the stream is empty or we're out of elements and size>1. - protected so you can override in a subclass if necessary. - """ - - if len(self) == 0: - raise RewriteEmptyStreamException(self.elementDescription) - - if self.cursor >= len(self): # out of elements? - if len(self) == 1: # if size is 1, it's ok; return and we'll dup - return self.toTree(self.singleElement) - - # out of elements and size was not 1, so we can't dup - raise RewriteCardinalityException(self.elementDescription) - - # we have elements - if self.singleElement is not None: - self.cursor += 1 # move cursor even for single element list - return self.toTree(self.singleElement) - - # must have more than one in list, pull from elements - o = self.toTree(self.elements[self.cursor]) - self.cursor += 1 - return o - - - def dup(self, el): - """ - When constructing trees, sometimes we need to dup a token or AST - subtree. Dup'ing a token means just creating another AST node - around it. For trees, you must call the adaptor.dupTree() unless - the element is for a tree root; then it must be a node dup. - """ - - raise NotImplementedError - - - def toTree(self, el): - """ - Ensure stream emits trees; tokens must be converted to AST nodes. - AST nodes can be passed through unmolested. - """ - - return el - - - def hasNext(self): - return ( (self.singleElement is not None and self.cursor < 1) - or (self.elements is not None - and self.cursor < len(self.elements) - ) - ) - - - def size(self): - if self.singleElement is not None: - return 1 - - if self.elements is not None: - return len(self.elements) - - return 0 - - __len__ = size - - - def getDescription(self): - """Deprecated. Directly access elementDescription attribute""" - - return self.elementDescription - - -class RewriteRuleTokenStream(RewriteRuleElementStream): - """@brief Internal helper class.""" - - def toTree(self, el): - # Don't convert to a tree unless they explicitly call nextTree. - # This way we can do hetero tree nodes in rewrite. - return el - - - def nextNode(self): - t = self._next() - return self.adaptor.createWithPayload(t) - - - def nextToken(self): - return self._next() - - - def dup(self, el): - raise TypeError("dup can't be called for a token stream.") - - -class RewriteRuleSubtreeStream(RewriteRuleElementStream): - """@brief Internal helper class.""" - - def nextNode(self): - """ - Treat next element as a single node even if it's a subtree. - This is used instead of next() when the result has to be a - tree root node. Also prevents us from duplicating recently-added - children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration - must dup the type node, but ID has been added. - - Referencing a rule result twice is ok; dup entire tree as - we can't be adding trees as root; e.g., expr expr. - - Hideous code duplication here with super.next(). Can't think of - a proper way to refactor. This needs to always call dup node - and super.next() doesn't know which to call: dup node or dup tree. - """ - - if (self.dirty - or (self.cursor >= len(self) and len(self) == 1) - ): - # if out of elements and size is 1, dup (at most a single node - # since this is for making root nodes). - el = self._next() - return self.adaptor.dupNode(el) - - # test size above then fetch - el = self._next() - while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1: - el = self.adaptor.getChild(el, 0) - - # dup just the root (want node here) - return self.adaptor.dupNode(el) - - - def dup(self, el): - return self.adaptor.dupTree(el) - - - -class RewriteRuleNodeStream(RewriteRuleElementStream): - """ - Queues up nodes matched on left side of -> in a tree parser. This is - the analog of RewriteRuleTokenStream for normal parsers. - """ - - def nextNode(self): - return self._next() - - - def toTree(self, el): - return self.adaptor.dupNode(el) - - - def dup(self, el): - # we dup every node, so don't have to worry about calling dup; short- - #circuited next() so it doesn't call. - raise TypeError("dup can't be called for a node stream.") - - -class TreeRuleReturnScope(RuleReturnScope): - """ - This is identical to the ParserRuleReturnScope except that - the start property is a tree nodes not Token object - when you are parsing trees. To be generic the tree node types - have to be Object. - """ - - def __init__(self): - self.start = None - self.tree = None - - - def getStart(self): - return self.start - - - def getTree(self): - return self.tree diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/treewizard.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/treewizard.py deleted file mode 100644 index d96ce780..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/antlr3/treewizard.py +++ /dev/null @@ -1,619 +0,0 @@ -""" @package antlr3.tree -@brief ANTLR3 runtime package, treewizard module - -A utility module to create ASTs at runtime. -See for an overview. Note that the API of the Python implementation is slightly different. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2008 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from antlr3.constants import INVALID_TOKEN_TYPE -from antlr3.tokens import CommonToken -from antlr3.tree import CommonTree, CommonTreeAdaptor - - -def computeTokenTypes(tokenNames): - """ - Compute a dict that is an inverted index of - tokenNames (which maps int token types to names). - """ - - if tokenNames is None: - return {} - - return dict((name, type) for type, name in enumerate(tokenNames)) - - -## token types for pattern parser -EOF = -1 -BEGIN = 1 -END = 2 -ID = 3 -ARG = 4 -PERCENT = 5 -COLON = 6 -DOT = 7 - -class TreePatternLexer(object): - def __init__(self, pattern): - ## The tree pattern to lex like "(A B C)" - self.pattern = pattern - - ## Index into input string - self.p = -1 - - ## Current char - self.c = None - - ## How long is the pattern in char? - self.n = len(pattern) - - ## Set when token type is ID or ARG - self.sval = None - - self.error = False - - self.consume() - - - __idStartChar = frozenset( - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_' - ) - __idChar = __idStartChar | frozenset('0123456789') - - def nextToken(self): - self.sval = "" - while self.c != EOF: - if self.c in (' ', '\n', '\r', '\t'): - self.consume() - continue - - if self.c in self.__idStartChar: - self.sval += self.c - self.consume() - while self.c in self.__idChar: - self.sval += self.c - self.consume() - - return ID - - if self.c == '(': - self.consume() - return BEGIN - - if self.c == ')': - self.consume() - return END - - if self.c == '%': - self.consume() - return PERCENT - - if self.c == ':': - self.consume() - return COLON - - if self.c == '.': - self.consume() - return DOT - - if self.c == '[': # grab [x] as a string, returning x - self.consume() - while self.c != ']': - if self.c == '\\': - self.consume() - if self.c != ']': - self.sval += '\\' - - self.sval += self.c - - else: - self.sval += self.c - - self.consume() - - self.consume() - return ARG - - self.consume() - self.error = True - return EOF - - return EOF - - - def consume(self): - self.p += 1 - if self.p >= self.n: - self.c = EOF - - else: - self.c = self.pattern[self.p] - - -class TreePatternParser(object): - def __init__(self, tokenizer, wizard, adaptor): - self.tokenizer = tokenizer - self.wizard = wizard - self.adaptor = adaptor - self.ttype = tokenizer.nextToken() # kickstart - - - def pattern(self): - if self.ttype == BEGIN: - return self.parseTree() - - elif self.ttype == ID: - node = self.parseNode() - if self.ttype == EOF: - return node - - return None # extra junk on end - - return None - - - def parseTree(self): - if self.ttype != BEGIN: - return None - - self.ttype = self.tokenizer.nextToken() - root = self.parseNode() - if root is None: - return None - - while self.ttype in (BEGIN, ID, PERCENT, DOT): - if self.ttype == BEGIN: - subtree = self.parseTree() - self.adaptor.addChild(root, subtree) - - else: - child = self.parseNode() - if child is None: - return None - - self.adaptor.addChild(root, child) - - if self.ttype != END: - return None - - self.ttype = self.tokenizer.nextToken() - return root - - - def parseNode(self): - # "%label:" prefix - label = None - - if self.ttype == PERCENT: - self.ttype = self.tokenizer.nextToken() - if self.ttype != ID: - return None - - label = self.tokenizer.sval - self.ttype = self.tokenizer.nextToken() - if self.ttype != COLON: - return None - - self.ttype = self.tokenizer.nextToken() # move to ID following colon - - # Wildcard? - if self.ttype == DOT: - self.ttype = self.tokenizer.nextToken() - wildcardPayload = CommonToken(0, ".") - node = WildcardTreePattern(wildcardPayload) - if label is not None: - node.label = label - return node - - # "ID" or "ID[arg]" - if self.ttype != ID: - return None - - tokenName = self.tokenizer.sval - self.ttype = self.tokenizer.nextToken() - - if tokenName == "nil": - return self.adaptor.nil() - - text = tokenName - # check for arg - arg = None - if self.ttype == ARG: - arg = self.tokenizer.sval - text = arg - self.ttype = self.tokenizer.nextToken() - - # create node - treeNodeType = self.wizard.getTokenType(tokenName) - if treeNodeType == INVALID_TOKEN_TYPE: - return None - - node = self.adaptor.createFromType(treeNodeType, text) - if label is not None and isinstance(node, TreePattern): - node.label = label - - if arg is not None and isinstance(node, TreePattern): - node.hasTextArg = True - - return node - - -class TreePattern(CommonTree): - """ - When using %label:TOKENNAME in a tree for parse(), we must - track the label. - """ - - def __init__(self, payload): - CommonTree.__init__(self, payload) - - self.label = None - self.hasTextArg = None - - - def toString(self): - if self.label is not None: - return '%' + self.label + ':' + CommonTree.toString(self) - - else: - return CommonTree.toString(self) - - -class WildcardTreePattern(TreePattern): - pass - - -class TreePatternTreeAdaptor(CommonTreeAdaptor): - """This adaptor creates TreePattern objects for use during scan()""" - - def createWithPayload(self, payload): - return TreePattern(payload) - - -class TreeWizard(object): - """ - Build and navigate trees with this object. Must know about the names - of tokens so you have to pass in a map or array of token names (from which - this class can build the map). I.e., Token DECL means nothing unless the - class can translate it to a token type. - - In order to create nodes and navigate, this class needs a TreeAdaptor. - - This class can build a token type -> node index for repeated use or for - iterating over the various nodes with a particular type. - - This class works in conjunction with the TreeAdaptor rather than moving - all this functionality into the adaptor. An adaptor helps build and - navigate trees using methods. This class helps you do it with string - patterns like "(A B C)". You can create a tree from that pattern or - match subtrees against it. - """ - - def __init__(self, adaptor=None, tokenNames=None, typeMap=None): - if adaptor is None: - self.adaptor = CommonTreeAdaptor() - - else: - self.adaptor = adaptor - - if typeMap is None: - self.tokenNameToTypeMap = computeTokenTypes(tokenNames) - - else: - if tokenNames is not None: - raise ValueError("Can't have both tokenNames and typeMap") - - self.tokenNameToTypeMap = typeMap - - - def getTokenType(self, tokenName): - """Using the map of token names to token types, return the type.""" - - try: - return self.tokenNameToTypeMap[tokenName] - except KeyError: - return INVALID_TOKEN_TYPE - - - def create(self, pattern): - """ - Create a tree or node from the indicated tree pattern that closely - follows ANTLR tree grammar tree element syntax: - - (root child1 ... child2). - - You can also just pass in a node: ID - - Any node can have a text argument: ID[foo] - (notice there are no quotes around foo--it's clear it's a string). - - nil is a special name meaning "give me a nil node". Useful for - making lists: (nil A B C) is a list of A B C. - """ - - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, self.adaptor) - return parser.pattern() - - - def index(self, tree): - """Walk the entire tree and make a node name to nodes mapping. - - For now, use recursion but later nonrecursive version may be - more efficient. Returns a dict int -> list where the list is - of your AST node type. The int is the token type of the node. - """ - - m = {} - self._index(tree, m) - return m - - - def _index(self, t, m): - """Do the work for index""" - - if t is None: - return - - ttype = self.adaptor.getType(t) - elements = m.get(ttype) - if elements is None: - m[ttype] = elements = [] - - elements.append(t) - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self._index(child, m) - - - def find(self, tree, what): - """Return a list of matching token. - - what may either be an integer specifzing the token type to find or - a string with a pattern that must be matched. - - """ - - if isinstance(what, (int, long)): - return self._findTokenType(tree, what) - - elif isinstance(what, basestring): - return self._findPattern(tree, what) - - else: - raise TypeError("'what' must be string or integer") - - - def _findTokenType(self, t, ttype): - """Return a List of tree nodes with token type ttype""" - - nodes = [] - - def visitor(tree, parent, childIndex, labels): - nodes.append(tree) - - self.visit(t, ttype, visitor) - - return nodes - - - def _findPattern(self, t, pattern): - """Return a List of subtrees matching pattern.""" - - subtrees = [] - - # Create a TreePattern from the pattern - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - # don't allow invalid patterns - if (tpattern is None or tpattern.isNil() - or isinstance(tpattern, WildcardTreePattern)): - return None - - rootTokenType = tpattern.getType() - - def visitor(tree, parent, childIndex, label): - if self._parse(tree, tpattern, None): - subtrees.append(tree) - - self.visit(t, rootTokenType, visitor) - - return subtrees - - - def visit(self, tree, what, visitor): - """Visit every node in tree matching what, invoking the visitor. - - If what is a string, it is parsed as a pattern and only matching - subtrees will be visited. - The implementation uses the root node of the pattern in combination - with visit(t, ttype, visitor) so nil-rooted patterns are not allowed. - Patterns with wildcard roots are also not allowed. - - If what is an integer, it is used as a token type and visit will match - all nodes of that type (this is faster than the pattern match). - The labels arg of the visitor action method is never set (it's None) - since using a token type rather than a pattern doesn't let us set a - label. - """ - - if isinstance(what, (int, long)): - self._visitType(tree, None, 0, what, visitor) - - elif isinstance(what, basestring): - self._visitPattern(tree, what, visitor) - - else: - raise TypeError("'what' must be string or integer") - - - def _visitType(self, t, parent, childIndex, ttype, visitor): - """Do the recursive work for visit""" - - if t is None: - return - - if self.adaptor.getType(t) == ttype: - visitor(t, parent, childIndex, None) - - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self._visitType(child, t, i, ttype, visitor) - - - def _visitPattern(self, tree, pattern, visitor): - """ - For all subtrees that match the pattern, execute the visit action. - """ - - # Create a TreePattern from the pattern - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - # don't allow invalid patterns - if (tpattern is None or tpattern.isNil() - or isinstance(tpattern, WildcardTreePattern)): - return - - rootTokenType = tpattern.getType() - - def rootvisitor(tree, parent, childIndex, labels): - labels = {} - if self._parse(tree, tpattern, labels): - visitor(tree, parent, childIndex, labels) - - self.visit(tree, rootTokenType, rootvisitor) - - - def parse(self, t, pattern, labels=None): - """ - Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels - on the various nodes and '.' (dot) as the node/subtree wildcard, - return true if the pattern matches and fill the labels Map with - the labels pointing at the appropriate nodes. Return false if - the pattern is malformed or the tree does not match. - - If a node specifies a text arg in pattern, then that must match - for that node in t. - """ - - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - return self._parse(t, tpattern, labels) - - - def _parse(self, t1, tpattern, labels): - """ - Do the work for parse. Check to see if the tpattern fits the - structure and token types in t1. Check text if the pattern has - text arguments on nodes. Fill labels map with pointers to nodes - in tree matched against nodes in pattern with labels. - """ - - # make sure both are non-null - if t1 is None or tpattern is None: - return False - - # check roots (wildcard matches anything) - if not isinstance(tpattern, WildcardTreePattern): - if self.adaptor.getType(t1) != tpattern.getType(): - return False - - # if pattern has text, check node text - if (tpattern.hasTextArg - and self.adaptor.getText(t1) != tpattern.getText()): - return False - - if tpattern.label is not None and labels is not None: - # map label in pattern to node in t1 - labels[tpattern.label] = t1 - - # check children - n1 = self.adaptor.getChildCount(t1) - n2 = tpattern.getChildCount() - if n1 != n2: - return False - - for i in range(n1): - child1 = self.adaptor.getChild(t1, i) - child2 = tpattern.getChild(i) - if not self._parse(child1, child2, labels): - return False - - return True - - - def equals(self, t1, t2, adaptor=None): - """ - Compare t1 and t2; return true if token types/text, structure match - exactly. - The trees are examined in their entirety so that (A B) does not match - (A B C) nor (A (B C)). - """ - - if adaptor is None: - adaptor = self.adaptor - - return self._equals(t1, t2, adaptor) - - - def _equals(self, t1, t2, adaptor): - # make sure both are non-null - if t1 is None or t2 is None: - return False - - # check roots - if adaptor.getType(t1) != adaptor.getType(t2): - return False - - if adaptor.getText(t1) != adaptor.getText(t2): - return False - - # check children - n1 = adaptor.getChildCount(t1) - n2 = adaptor.getChildCount(t2) - if n1 != n2: - return False - - for i in range(n1): - child1 = adaptor.getChild(t1, i) - child2 = adaptor.getChild(t2, i) - if not self._equals(child1, child2, adaptor): - return False - - return True diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/doxyfile b/thirdparty/antlr3-antlr-3.5/runtime/Python/doxyfile deleted file mode 100644 index ce356de1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/doxyfile +++ /dev/null @@ -1,270 +0,0 @@ -# -*- mode: doxymacs -*- - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- -DOXYFILE_ENCODING = UTF-8 -PROJECT_NAME = "ANTLR Python API" -PROJECT_NUMBER = 3.3 -OUTPUT_DIRECTORY = api -CREATE_SUBDIRS = NO -OUTPUT_LANGUAGE = English -BRIEF_MEMBER_DESC = YES -REPEAT_BRIEF = YES -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the -ALWAYS_DETAILED_SEC = YES -INLINE_INHERITED_MEMB = NO -FULL_PATH_NAMES = YES -STRIP_FROM_PATH = build/doc/ -STRIP_FROM_INC_PATH = -SHORT_NAMES = NO -JAVADOC_AUTOBRIEF = NO -MULTILINE_CPP_IS_BRIEF = NO -DETAILS_AT_TOP = NO -INHERIT_DOCS = YES -SEPARATE_MEMBER_PAGES = NO -TAB_SIZE = 8 -ALIASES = -OPTIMIZE_OUTPUT_FOR_C = NO -OPTIMIZE_OUTPUT_JAVA = YES -BUILTIN_STL_SUPPORT = NO -CPP_CLI_SUPPORT = NO -DISTRIBUTE_GROUP_DOC = NO -SUBGROUPING = YES -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- -EXTRACT_ALL = YES -EXTRACT_PRIVATE = YES -EXTRACT_STATIC = YES -EXTRACT_LOCAL_CLASSES = YES -EXTRACT_LOCAL_METHODS = NO -HIDE_UNDOC_MEMBERS = NO -HIDE_UNDOC_CLASSES = NO -HIDE_FRIEND_COMPOUNDS = NO -HIDE_IN_BODY_DOCS = NO -INTERNAL_DOCS = NO -CASE_SENSE_NAMES = NO -HIDE_SCOPE_NAMES = NO -SHOW_INCLUDE_FILES = YES -INLINE_INFO = YES -SORT_MEMBER_DOCS = YES -SORT_BRIEF_DOCS = NO -SORT_BY_SCOPE_NAME = NO -GENERATE_TODOLIST = YES -GENERATE_TESTLIST = NO -GENERATE_BUGLIST = NO -GENERATE_DEPRECATEDLIST= NO -ENABLED_SECTIONS = -MAX_INITIALIZER_LINES = 30 -SHOW_USED_FILES = YES -SHOW_DIRECTORIES = NO -FILE_VERSION_FILTER = -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- -QUIET = NO -WARNINGS = YES -WARN_IF_UNDOCUMENTED = YES -WARN_IF_DOC_ERROR = YES -WARN_NO_PARAMDOC = NO -WARN_FORMAT = "$file:$line: $text" -WARN_LOGFILE = -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- -INPUT = build/doc -INPUT_ENCODING = UTF-8 -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.d \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.idl \ - *.odl \ - *.cs \ - *.php \ - *.php3 \ - *.inc \ - *.m \ - *.mm \ - *.dox \ - *.py -RECURSIVE = YES -EXCLUDE = build/doc/antlr3/__init__.py -EXCLUDE_SYMLINKS = NO -EXCLUDE_PATTERNS = -EXCLUDE_SYMBOLS = dfa exceptions recognizers streams tokens constants -EXAMPLE_PATH = -EXAMPLE_PATTERNS = * -EXAMPLE_RECURSIVE = NO -IMAGE_PATH = -INPUT_FILTER = -FILTER_PATTERNS = -FILTER_SOURCE_FILES = NO -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- -SOURCE_BROWSER = YES -INLINE_SOURCES = NO -STRIP_CODE_COMMENTS = YES -REFERENCED_BY_RELATION = NO -REFERENCES_RELATION = NO -REFERENCES_LINK_SOURCE = YES -USE_HTAGS = NO -VERBATIM_HEADERS = YES -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- -ALPHABETICAL_INDEX = NO -COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- -GENERATE_HTML = YES -HTML_OUTPUT = . -HTML_FILE_EXTENSION = .html -HTML_HEADER = -HTML_FOOTER = -HTML_STYLESHEET = -HTML_ALIGN_MEMBERS = YES -GENERATE_HTMLHELP = NO -CHM_FILE = -HHC_LOCATION = -GENERATE_CHI = NO -BINARY_TOC = NO -TOC_EXPAND = NO -DISABLE_INDEX = NO -ENUM_VALUES_PER_LINE = 4 -GENERATE_TREEVIEW = NO -TREEVIEW_WIDTH = 250 -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- -GENERATE_LATEX = NO -LATEX_OUTPUT = latex -LATEX_CMD_NAME = latex -MAKEINDEX_CMD_NAME = makeindex -COMPACT_LATEX = NO -PAPER_TYPE = a4wide -EXTRA_PACKAGES = -LATEX_HEADER = -PDF_HYPERLINKS = NO -USE_PDFLATEX = YES -LATEX_BATCHMODE = NO -LATEX_HIDE_INDICES = NO -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- -GENERATE_RTF = NO -RTF_OUTPUT = rtf -COMPACT_RTF = NO -RTF_HYPERLINKS = NO -RTF_STYLESHEET_FILE = -RTF_EXTENSIONS_FILE = -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- -GENERATE_MAN = NO -MAN_OUTPUT = man -MAN_EXTENSION = .3 -MAN_LINKS = NO -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- -GENERATE_XML = NO -XML_OUTPUT = xml -XML_SCHEMA = -XML_DTD = -XML_PROGRAMLISTING = YES -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- -GENERATE_AUTOGEN_DEF = NO -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- -GENERATE_PERLMOD = NO -PERLMOD_LATEX = NO -PERLMOD_PRETTY = YES -PERLMOD_MAKEVAR_PREFIX = -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- -ENABLE_PREPROCESSING = YES -MACRO_EXPANSION = YES -EXPAND_ONLY_PREDEF = NO -SEARCH_INCLUDES = YES -INCLUDE_PATH = -INCLUDE_FILE_PATTERNS = -PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS -EXPAND_AS_DEFINED = -SKIP_FUNCTION_MACROS = YES -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- -TAGFILES = -GENERATE_TAGFILE = -ALLEXTERNALS = NO -EXTERNAL_GROUPS = YES -PERL_PATH = /usr/bin/perl -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- -CLASS_DIAGRAMS = NO -MSCGEN_PATH = -HIDE_UNDOC_RELATIONS = YES -HAVE_DOT = YES -CLASS_GRAPH = YES -COLLABORATION_GRAPH = YES -GROUP_GRAPHS = YES -UML_LOOK = NO -TEMPLATE_RELATIONS = NO -INCLUDE_GRAPH = YES -INCLUDED_BY_GRAPH = YES -CALL_GRAPH = NO -CALLER_GRAPH = NO -GRAPHICAL_HIERARCHY = YES -DIRECTORY_GRAPH = YES -DOT_IMAGE_FORMAT = png -DOT_PATH = -DOTFILE_DIRS = -DOT_GRAPH_MAX_NODES = 50 -DOT_TRANSPARENT = NO -DOT_MULTI_TARGETS = NO -GENERATE_LEGEND = YES -DOT_CLEANUP = YES -#--------------------------------------------------------------------------- -# Configuration::additions related to the search engine -#--------------------------------------------------------------------------- -SEARCHENGINE = NO - - -#--------------------------------------------------------------------------- -# doxypy integration -#--------------------------------------------------------------------------- -FILTER_SOURCE_FILES = YES -INPUT_FILTER = "python doxypy.py" diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/ez_setup.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/ez_setup.py deleted file mode 100644 index 38c09c62..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/ez_setup.py +++ /dev/null @@ -1,228 +0,0 @@ -#!python -"""Bootstrap setuptools installation - -If you want to use setuptools in your package's setup.py, just include this -file in the same directory with it, and add this to the top of your setup.py:: - - from ez_setup import use_setuptools - use_setuptools() - -If you want to require a specific version of setuptools, set a download -mirror, or use an alternate download directory, you can do so by supplying -the appropriate options to ``use_setuptools()``. - -This file can also be run as a script to install or upgrade setuptools. -""" -import sys -DEFAULT_VERSION = "0.6c5" -DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3] - -md5_data = { - 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', - 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', - 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', - 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', - 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', - 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', - 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', - 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', - 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', - 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', - 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', - 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', - 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', - 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', - 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', - 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', - 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', - 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', - 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', - 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', - 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', -} - -import sys, os - -def _validate_md5(egg_name, data): - if egg_name in md5_data: - from md5 import md5 - digest = md5(data).hexdigest() - if digest != md5_data[egg_name]: - print >>sys.stderr, ( - "md5 validation of %s failed! (Possible download problem?)" - % egg_name - ) - sys.exit(2) - return data - - -def use_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, - download_delay=15 -): - """Automatically find/download setuptools and make it available on sys.path - - `version` should be a valid setuptools version number that is available - as an egg for download under the `download_base` URL (which should end with - a '/'). `to_dir` is the directory where setuptools will be downloaded, if - it is not already available. If `download_delay` is specified, it should - be the number of seconds that will be paused before initiating a download, - should one be required. If an older version of setuptools is installed, - this routine will print a message to ``sys.stderr`` and raise SystemExit in - an attempt to abort the calling script. - """ - try: - import setuptools - if setuptools.__version__ == '0.0.1': - print >>sys.stderr, ( - "You have an obsolete version of setuptools installed. Please\n" - "remove it from your system entirely before rerunning this script." - ) - sys.exit(2) - except ImportError: - egg = download_setuptools(version, download_base, to_dir, download_delay) - sys.path.insert(0, egg) - import setuptools; setuptools.bootstrap_install_from = egg - - import pkg_resources - try: - pkg_resources.require("setuptools>="+version) - - except pkg_resources.VersionConflict, e: - # XXX could we install in a subprocess here? - print >>sys.stderr, ( - "The required version of setuptools (>=%s) is not available, and\n" - "can't be installed while this script is running. Please install\n" - " a more recent version first.\n\n(Currently using %r)" - ) % (version, e.args[0]) - sys.exit(2) - -def download_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, - delay = 15 -): - """Download setuptools from a specified location and return its filename - - `version` should be a valid setuptools version number that is available - as an egg for download under the `download_base` URL (which should end - with a '/'). `to_dir` is the directory where the egg will be downloaded. - `delay` is the number of seconds to pause before an actual download attempt. - """ - import urllib2, shutil - egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) - url = download_base + egg_name - saveto = os.path.join(to_dir, egg_name) - src = dst = None - if not os.path.exists(saveto): # Avoid repeated downloads - try: - from distutils import log - if delay: - log.warn(""" ---------------------------------------------------------------------------- -This script requires setuptools version %s to run (even to display -help). I will attempt to download it for you (from -%s), but -you may need to enable firewall access for this script first. -I will start the download in %d seconds. - -(Note: if this machine does not have network access, please obtain the file - - %s - -and place it in this directory before rerunning this script.) ----------------------------------------------------------------------------""", - version, download_base, delay, url - ); from time import sleep; sleep(delay) - log.warn("Downloading %s", url) - src = urllib2.urlopen(url) - # Read/write all in one block, so we don't create a corrupt file - # if the download is interrupted. - data = _validate_md5(egg_name, src.read()) - dst = open(saveto,"wb"); dst.write(data) - finally: - if src: src.close() - if dst: dst.close() - return os.path.realpath(saveto) - -def main(argv, version=DEFAULT_VERSION): - """Install or upgrade setuptools and EasyInstall""" - - try: - import setuptools - except ImportError: - egg = None - try: - egg = download_setuptools(version, delay=0) - sys.path.insert(0,egg) - from setuptools.command.easy_install import main - return main(list(argv)+[egg]) # we're done here - finally: - if egg and os.path.exists(egg): - os.unlink(egg) - else: - if setuptools.__version__ == '0.0.1': - # tell the user to uninstall obsolete version - use_setuptools(version) - - req = "setuptools>="+version - import pkg_resources - try: - pkg_resources.require(req) - except pkg_resources.VersionConflict: - try: - from setuptools.command.easy_install import main - except ImportError: - from easy_install import main - main(list(argv)+[download_setuptools(delay=0)]) - sys.exit(0) # try to force an exit - else: - if argv: - from setuptools.command.easy_install import main - main(argv) - else: - print "Setuptools version",version,"or greater has been installed." - print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' - - - -def update_md5(filenames): - """Update our built-in md5 registry""" - - import re - from md5 import md5 - - for name in filenames: - base = os.path.basename(name) - f = open(name,'rb') - md5_data[base] = md5(f.read()).hexdigest() - f.close() - - data = [" %r: %r,\n" % it for it in md5_data.items()] - data.sort() - repl = "".join(data) - - import inspect - srcfile = inspect.getsourcefile(sys.modules[__name__]) - f = open(srcfile, 'rb'); src = f.read(); f.close() - - match = re.search("\nmd5_data = {\n([^}]+)}", src) - if not match: - print >>sys.stderr, "Internal error!" - sys.exit(2) - - src = src[:match.start(1)] + repl + src[match.end(1):] - f = open(srcfile,'w') - f.write(src) - f.close() - - -if __name__=='__main__': - if len(sys.argv)>2 and sys.argv[1]=='--md5update': - update_md5(sys.argv[2:]) - else: - main(sys.argv[1:]) - - - - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/hudson-build.sh b/thirdparty/antlr3-antlr-3.5/runtime/Python/hudson-build.sh deleted file mode 100755 index fbe6f842..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/hudson-build.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash - -ANTLR_JOB=${1:-ANTLR_Tool} -ST_VERSION=3.1 -ANTLR2_VERSION=2.7.7 - -# find the antlr.jar from the upstream project -JAR=$(ls $WORKSPACE/../../$ANTLR_JOB/lastSuccessful/org.antlr\$antlr/archive/org.antlr/antlr/*/antlr-*-jar-with-dependencies.jar) -echo "antlr.jar=$JAR" - -if [ ! -f "$JAR" ]; then - echo "Could not find antlr.jar" - exit 1 -fi - - -echo "************************************************************************" -echo "Setting up dependencies" -echo - -rm -fr $WORKSPACE/tmp -mkdir -p $WORKSPACE/tmp -cd $WORKSPACE - -# stringtemplate3 -if [ ! -f stringtemplate3-$ST_VERSION.tar.gz ]; then - wget http://pypi.python.org/packages/source/s/stringtemplate3/stringtemplate3-$ST_VERSION.tar.gz -fi -(cd tmp; tar xzf ../stringtemplate3-$ST_VERSION.tar.gz) -(cd tmp/stringtemplate3-$ST_VERSION; python setup.py install --install-lib=$WORKSPACE) - -# antlr2 -if [ ! -f antlr-$ANTLR2_VERSION.tar.gz ]; then - wget http://www.antlr2.org/download/antlr-$ANTLR2_VERSION.tar.gz -fi -(cd tmp; tar xzf ../antlr-$ANTLR2_VERSION.tar.gz) -(cd tmp/antlr-$ANTLR2_VERSION/lib/python; python setup.py install --install-lib=$WORKSPACE) - - -export CLASSPATH=$JAR - -echo "************************************************************************" -echo "Running the testsuite" -echo - -cd $WORKSPACE -rm -fr testout/ -mkdir -p testout/ -python setup.py unittest --xml-output=testout/ -python setup.py functest --xml-output=testout/ --antlr-jar="$JAR" - - -echo "************************************************************************" -echo "Running pylint" -echo - -cd $WORKSPACE -pylint --rcfile=pylintrc --output-format=parseable --include-ids=yes antlr3 | tee pylint-report.txt - - -echo "************************************************************************" -echo "Building dist files" -echo - -cd $WORKSPACE -rm -f dist/* -cp -f $JAR dist/ -python setup.py sdist --formats=gztar,zip -for PYTHON in /usr/bin/python2.?; do - $PYTHON setup.py bdist_egg -done diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/mkdoxy.sh b/thirdparty/antlr3-antlr-3.5/runtime/Python/mkdoxy.sh deleted file mode 100755 index 36fffff3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/mkdoxy.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -if [ -e doxygen.sh ]; then - . doxygen.sh -fi - -rm -fr build/doc -mkdir -p build/doc/antlr3 - -for f in __init__ exceptions constants dfa tokens streams recognizers; do - sed -e '/begin\[licence\]/,/end\[licence\]/d' antlr3/$f.py \ - >>build/doc/antlr3.py -done - -touch build/doc/antlr3/__init__.py - -cp -f antlr3/tree.py build/doc/antlr3 -cp -f antlr3/treewizard.py build/doc/antlr3 - -doxygen doxyfile diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/pylintrc b/thirdparty/antlr3-antlr-3.5/runtime/Python/pylintrc deleted file mode 100644 index 35072577..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/pylintrc +++ /dev/null @@ -1,321 +0,0 @@ -# lint Python modules using external checkers. -# -# This is the main checker controling the other ones and the reports -# generation. It is itself both a raw checker and an astng checker in order -# to: -# * handle message activation / deactivation at the module level -# * handle some basic but necessary stats'data (number of classes, methods...) -# -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Profiled execution. -profile=no - -# Add to the black list. It should be a base name, not a -# path. You may set this option multiple times. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# Set the cache size for astng objects. -cache-size=500 - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[COMMANDS] - -# Display a help message for the given message id and exit. The value may be a -# comma separated list of message ids. -#help-msg= - - -[MESSAGES CONTROL] - -# Enable only checker(s) with the given id(s). This option conflict with the -# disable-checker option -#enable-checker= - -# Enable all checker(s) except those with the given id(s). This option conflict -# with the disable-checker option -#disable-checker= - -# Enable all messages in the listed categories. -#enable-msg-cat= - -# Disable all messages in the listed categories. -#disable-msg-cat= - -# Enable the message(s) with the given id(s). -#enable-msg= - -# Disable the message(s) with the given id(s). -# W0622: Redefining built-in '...' -# C0103: Invalid name -# R0904: Too many public methods -# R0201: Method could be a function -# C0302: Too many lines in module -# R0902: Too many instance attributes -# R0913: Too many arguments -# R0912: Too many branches -# R0903: To few public methods -# C0111: Missing docstring -# W0403: Relative import -# W0401: Wildcard import -# W0142: */** magic -disable-msg=W0622, C0103, R0904, R0201, C0302, R0902, R0913, R0912, R0903, C0111, W0403, W0401, W0142 - - -[REPORTS] - -# set the output format. Available formats are text, parseable, colorized and -# html -output-format=text - -# Include message's id in output -include-ids=yes - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells wether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note).You have access to the variables errors warning, statement which -# respectivly contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (R0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (R0004). -comment=no - -# Enable the report(s) with the given id(s). -#enable-report= - -# Disable the report(s) with the given id(s). -#disable-report= - - -# try to find bugs in the code using type inference -# -[TYPECHECK] - -# Tells wether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# When zope mode is activated, consider the acquired-members option to ignore -# access to some undefined attributes. -zope=no - -# List of members which are usually get through zope's acquisition mecanism and -# so shouldn't trigger E0201 when accessed (need zope=yes to be considered). -acquired-members=REQUEST,acl_users,aq_parent - - -# checks for -# * unused variables / imports -# * undefined variables -# * redefinition of variable from builtins or from an outer scope -# * use of variable before assigment -# -[VARIABLES] - -# Tells wether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching names used for dummy variables (i.e. not used). -dummy-variables-rgx=_|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -# checks for : -# * doc strings -# * modules / classes / functions / methods / arguments / variables name -# * number of arguments, local variables, branchs, returns and statements in -# functions, methods -# * required module attributes -# * dangerous default values as arguments -# * redefinition of function / method / class -# * uses of the global statement -# -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# Regular expression which should only match functions or classes name which do -# not require a docstring -no-docstring-rgx=__.*__ - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - - -# checks for sign of poor/misdesign: -# * number of methods, attributes, local variables... -# * size, complexity of functions, methods -# -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branchs=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -# checks for : -# * methods without self as first argument -# * overridden methods signature -# * access only to existant members via self -# * attributes not defined in the __init__ method -# * supported interfaces implementation -# * unreachable code -# -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - - -# checks for -# * external modules dependencies -# * relative / wildcard imports -# * cyclic imports -# * uses of deprecated modules -# -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,string,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report R0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report R0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report R0402 must -# not be disabled) -int-import-graph= - - -# checks for similarities and duplicated code. This computation may be -# memory / CPU intensive, so you should disable it if you experiments some -# problems. -# -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - - -# checks for : -# * unauthorized constructions -# * strict indentation -# * line length -# * use of <> instead of != -# -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -# checks for: -# * warning notes in the code like FIXME, XXX -# * PEP 263: source code with non ascii character but no encoding declaration -# -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/setup.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/setup.py deleted file mode 100644 index 17ef6d47..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# bootstrapping setuptools -import ez_setup -ez_setup.use_setuptools() - -import os -import sys -import textwrap -from distutils.errors import * -from distutils.command.clean import clean as _clean -from distutils.cmd import Command -from setuptools import setup -from distutils import log - -from distutils.core import setup - - -class clean(_clean): - """Also cleanup local temp files.""" - - def run(self): - _clean.run(self) - - import fnmatch - - # kill temporary files - patterns = [ - # generic tempfiles - '*~', '*.bak', '*.pyc', - - # tempfiles generated by ANTLR runs - 't[0-9]*Lexer.py', 't[0-9]*Parser.py', - '*.tokens', '*__.g', - ] - - for path in ('antlr3', 'unittests', 'tests'): - path = os.path.join(os.path.dirname(__file__), path) - if os.path.isdir(path): - for root, dirs, files in os.walk(path, topdown=True): - graveyard = [] - for pat in patterns: - graveyard.extend(fnmatch.filter(files, pat)) - - for name in graveyard: - filePath = os.path.join(root, name) - - try: - log.info("removing '%s'", filePath) - os.unlink(filePath) - except OSError, exc: - log.warn( - "Failed to delete '%s': %s", - filePath, exc - ) - - -class TestError(DistutilsError): - pass - - -# grml.. the class name appears in the --help output: -# ... -# Options for 'CmdUnitTest' command -# ... -# so I have to use a rather ugly name... -class unittest(Command): - """Run unit tests for package""" - - description = "run unit tests for package" - - user_options = [ - ('xml-output=', None, - "Directory for JUnit compatible XML files."), - ] - boolean_options = [] - - def initialize_options(self): - self.xml_output = None - - def finalize_options(self): - pass - - def run(self): - testDir = os.path.join(os.path.dirname(__file__), 'unittests') - if not os.path.isdir(testDir): - raise DistutilsFileError( - "There is not 'unittests' directory. Did you fetch the " - "development version?", - ) - - import glob - import imp - import unittest - import traceback - import StringIO - - suite = unittest.TestSuite() - loadFailures = [] - - # collect tests from all unittests/test*.py files - testFiles = [] - for testPath in glob.glob(os.path.join(testDir, 'test*.py')): - testFiles.append(testPath) - - testFiles.sort() - for testPath in testFiles: - testID = os.path.basename(testPath)[:-3] - - try: - modFile, modPathname, modDescription \ - = imp.find_module(testID, [testDir]) - - testMod = imp.load_module( - testID, modFile, modPathname, modDescription - ) - - suite.addTests( - unittest.defaultTestLoader.loadTestsFromModule(testMod) - ) - - except Exception: - buf = StringIO.StringIO() - traceback.print_exc(file=buf) - - loadFailures.append( - (os.path.basename(testPath), buf.getvalue()) - ) - - if self.xml_output: - import xmlrunner - runner = xmlrunner.XMLTestRunner( - stream=open(os.path.join(self.xml_output, 'unittest.xml'), 'w')) - else: - runner = unittest.TextTestRunner(verbosity=2) - result = runner.run(suite) - - for testName, error in loadFailures: - sys.stderr.write('\n' + '='*70 + '\n') - sys.stderr.write( - "Failed to load test module %s\n" % testName - ) - sys.stderr.write(error) - sys.stderr.write('\n') - - if not result.wasSuccessful() or loadFailures: - raise TestError( - "Unit test suite failed!", - ) - - -class functest(Command): - """Run functional tests for package""" - - description = "run functional tests for package" - - user_options = [ - ('testcase=', None, - "testcase to run [default: run all]"), - ('antlr-version=', None, - "ANTLR version to use [default: HEAD (in ../../build)]"), - ('antlr-jar=', None, - "Explicit path to an antlr jar (overrides --antlr-version)"), - ('xml-output=', None, - "Directory for JUnit compatible XML files."), - ] - - boolean_options = [] - - def initialize_options(self): - self.testcase = None - self.antlr_version = 'HEAD' - self.antlr_jar = None - self.xml_output = None - - def finalize_options(self): - pass - - def run(self): - import glob - import imp - import unittest - import traceback - import StringIO - - testDir = os.path.join(os.path.dirname(__file__), 'tests') - if not os.path.isdir(testDir): - raise DistutilsFileError( - "There is not 'tests' directory. Did you fetch the " - "development version?", - ) - - # make sure, relative imports from testcases work - sys.path.insert(0, testDir) - - rootDir = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..')) - - if self.antlr_jar is not None: - classpath = [self.antlr_jar] - elif self.antlr_version == 'HEAD': - classpath = [ - os.path.join(rootDir, 'tool', 'target', 'classes'), - os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes') - ] - else: - classpath = [ - os.path.join(rootDir, 'archive', - 'antlr-%s.jar' % self.antlr_version) - ] - - classpath.extend([ - os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'), - os.path.join(rootDir, 'lib', 'stringtemplate-3.2.1.jar'), - os.path.join(rootDir, 'lib', 'ST-4.0.2.jar'), - os.path.join(rootDir, 'lib', 'junit-4.2.jar') - ]) - os.environ['CLASSPATH'] = ':'.join(classpath) - - os.environ['ANTLRVERSION'] = self.antlr_version - - suite = unittest.TestSuite() - loadFailures = [] - - # collect tests from all tests/t*.py files - testFiles = [] - test_glob = 't[0-9][0-9][0-9]*.py' - for testPath in glob.glob(os.path.join(testDir, test_glob)): - if testPath.endswith('Lexer.py') or testPath.endswith('Parser.py'): - continue - - # if a single testcase has been selected, filter out all other - # tests - if (self.testcase is not None - and not os.path.basename(testPath)[:-3].startswith(self.testcase)): - continue - - testFiles.append(testPath) - - testFiles.sort() - for testPath in testFiles: - testID = os.path.basename(testPath)[:-3] - - try: - modFile, modPathname, modDescription \ - = imp.find_module(testID, [testDir]) - - testMod = imp.load_module( - testID, modFile, modPathname, modDescription) - - suite.addTests( - unittest.defaultTestLoader.loadTestsFromModule(testMod)) - - except Exception: - buf = StringIO.StringIO() - traceback.print_exc(file=buf) - - loadFailures.append( - (os.path.basename(testPath), buf.getvalue())) - - if self.xml_output: - import xmlrunner - runner = xmlrunner.XMLTestRunner( - stream=open(os.path.join(self.xml_output, 'functest.xml'), 'w')) - else: - runner = unittest.TextTestRunner(verbosity=2) - - result = runner.run(suite) - - for testName, error in loadFailures: - sys.stderr.write('\n' + '='*70 + '\n') - sys.stderr.write( - "Failed to load test module %s\n" % testName - ) - sys.stderr.write(error) - sys.stderr.write('\n') - - if not result.wasSuccessful() or loadFailures: - raise TestError( - "Functional test suite failed!", - ) - - -setup(name='antlr_python_runtime', - version='3.4', - packages=['antlr3'], - - author="Benjamin Niemann", - author_email="pink@odahoda.de", - url="http://www.antlr.org/", - download_url="http://www.antlr.org/download.html", - license="BSD", - description="Runtime package for ANTLR3", - long_description=textwrap.dedent('''\ - This is the runtime package for ANTLR3, which is required to use parsers - generated by ANTLR3. - '''), - cmdclass={'unittest': unittest, - 'functest': functest, - 'clean': clean - }, - ) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.g deleted file mode 100644 index f92b958f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t001lexer; -options { - language = Python; -} - -ZERO: '0'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.py deleted file mode 100644 index 32282351..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t001lexer.py +++ /dev/null @@ -1,57 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t001lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('0') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.EOF) - - - def testIteratorInterface(self): - stream = antlr3.StringStream('0') - lexer = self.getLexer(stream) - - types = [token.type for token in lexer] - - self.failUnlessEqual(types, [self.lexerModule.ZERO]) - - - def testMalformedInput(self): - stream = antlr3.StringStream('1') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.MismatchedTokenException, exc: - self.failUnlessEqual(exc.expecting, '0') - self.failUnlessEqual(exc.unexpectedType, '1') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.g deleted file mode 100644 index 53b67a93..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.g +++ /dev/null @@ -1,7 +0,0 @@ -lexer grammar t002lexer; -options { - language = Python; -} - -ZERO: '0'; -ONE: '1'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.py deleted file mode 100644 index c2c03ba0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t002lexer.py +++ /dev/null @@ -1,50 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t002lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('01') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.ONE) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.NoViableAltException, exc: - self.failUnlessEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.g deleted file mode 100644 index 0e85e114..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.g +++ /dev/null @@ -1,8 +0,0 @@ -lexer grammar t003lexer; -options { - language = Python; -} - -ZERO: '0'; -ONE: '1'; -FOOZE: 'fooze'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.py deleted file mode 100644 index 3a329559..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t003lexer.py +++ /dev/null @@ -1,53 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t003lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('0fooze1') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.FOOZE) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.ONE) - - token = lexer.nextToken() - self.failUnlessEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.NoViableAltException, exc: - self.failUnlessEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.g deleted file mode 100644 index c39d10d6..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t004lexer; -options { - language = Python; -} - -FOO: 'f' 'o'*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.py deleted file mode 100644 index 52b444c5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t004lexer.py +++ /dev/null @@ -1,70 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t004lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('ffofoofooo') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 0, token.start - assert token.stop == 0, token.stop - assert token.text == 'f', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 1, token.start - assert token.stop == 2, token.stop - assert token.text == 'fo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 3, token.start - assert token.stop == 5, token.stop - assert token.text == 'foo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 6, token.start - assert token.stop == 9, token.stop - assert token.text == 'fooo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.MismatchedTokenException, exc: - self.failUnlessEqual(exc.expecting, 'f') - self.failUnlessEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.g deleted file mode 100644 index f9cc681b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t005lexer; -options { - language = Python; -} - -FOO: 'f' 'o'+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.py deleted file mode 100644 index 667083e8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t005lexer.py +++ /dev/null @@ -1,75 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t005lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofoofooo') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 0, token.start - assert token.stop == 1, token.stop - assert token.text == 'fo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 2, token.start - assert token.stop == 4, token.stop - assert token.text == 'foo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 5, token.start - assert token.stop == 8, token.stop - assert token.text == 'fooo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput1(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - raise AssertionError - - except antlr3.MismatchedTokenException, exc: - assert exc.expecting == 'f', repr(exc.expecting) - assert exc.unexpectedType == '2', repr(exc.unexpectedType) - - - def testMalformedInput2(self): - stream = antlr3.StringStream('f') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - raise AssertionError - - except antlr3.EarlyExitException, exc: - assert exc.unexpectedType == antlr3.EOF, repr(exc.unexpectedType) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.g deleted file mode 100644 index ad93cb48..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t006lexer; -options { - language = Python; -} - -FOO: 'f' ('o' | 'a')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.py deleted file mode 100644 index a4f845bd..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t006lexer.py +++ /dev/null @@ -1,61 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t006lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofaaooa') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 0, token.start - assert token.stop == 1, token.stop - assert token.text == 'fo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 2, token.start - assert token.stop == 7, token.stop - assert token.text == 'faaooa', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('fofoaooaoa2') - lexer = self.getLexer(stream) - - lexer.nextToken() - lexer.nextToken() - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.MismatchedTokenException, exc: - assert exc.expecting == 'f', repr(exc.expecting) - assert exc.unexpectedType == '2', repr(exc.unexpectedType) - assert exc.charPositionInLine == 10, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.g deleted file mode 100644 index b5651d59..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t007lexer; -options { - language = Python; -} - -FOO: 'f' ('o' | 'a' 'b'+)*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.py deleted file mode 100644 index 440657b3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t007lexer.py +++ /dev/null @@ -1,59 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t007lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofababbooabb') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 0, token.start - assert token.stop == 1, token.stop - assert token.text == 'fo', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 2, token.start - assert token.stop == 12, token.stop - assert token.text == 'fababbooabb', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('foaboao') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.EarlyExitException, exc: - assert exc.unexpectedType == 'o', repr(exc.unexpectedType) - assert exc.charPositionInLine == 6, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.g deleted file mode 100644 index 59498665..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t008lexer; -options { - language = Python; -} - -FOO: 'f' 'a'?; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.py deleted file mode 100644 index f62c1489..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t008lexer.py +++ /dev/null @@ -1,66 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t008lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('ffaf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 0, token.start - assert token.stop == 0, token.stop - assert token.text == 'f', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 1, token.start - assert token.stop == 2, token.stop - assert token.text == 'fa', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.FOO - assert token.start == 3, token.start - assert token.stop == 3, token.stop - assert token.text == 'f', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('fafb') - lexer = self.getLexer(stream) - - lexer.nextToken() - lexer.nextToken() - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.MismatchedTokenException, exc: - assert exc.unexpectedType == 'b', repr(exc.unexpectedType) - assert exc.charPositionInLine == 3, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.g deleted file mode 100644 index 61269086..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t009lexer; -options { - language = Python; -} - -DIGIT: '0' .. '9'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.py deleted file mode 100644 index c32cbbfb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t009lexer.py +++ /dev/null @@ -1,67 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t009lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('085') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.DIGIT - assert token.start == 0, token.start - assert token.stop == 0, token.stop - assert token.text == '0', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.DIGIT - assert token.start == 1, token.start - assert token.stop == 1, token.stop - assert token.text == '8', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.DIGIT - assert token.start == 2, token.start - assert token.stop == 2, token.stop - assert token.text == '5', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('2a') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.MismatchedSetException, exc: - # TODO: This should provide more useful information - assert exc.expecting is None - assert exc.unexpectedType == 'a', repr(exc.unexpectedType) - assert exc.charPositionInLine == 1, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.g deleted file mode 100644 index a93636cb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.g +++ /dev/null @@ -1,7 +0,0 @@ -lexer grammar t010lexer; -options { - language = Python; -} - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.py deleted file mode 100644 index 7cd318ce..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t010lexer.py +++ /dev/null @@ -1,78 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t010lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 0, token.start - assert token.stop == 5, token.stop - assert token.text == 'foobar', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.WS - assert token.start == 6, token.start - assert token.stop == 6, token.stop - assert token.text == ' ', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 7, token.start - assert token.stop == 11, token.stop - assert token.text == '_Ab98', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.WS - assert token.start == 12, token.start - assert token.stop == 14, token.stop - assert token.text == ' \n ', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 15, token.start - assert token.stop == 20, token.stop - assert token.text == 'A12sdf', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('a-b') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.NoViableAltException, exc: - assert exc.unexpectedType == '-', repr(exc.unexpectedType) - assert exc.charPositionInLine == 1, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.g deleted file mode 100644 index fde9a3b5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.g +++ /dev/null @@ -1,19 +0,0 @@ -lexer grammar t011lexer; -options { - language = Python; -} - -IDENTIFIER: - ('a'..'z'|'A'..'Z'|'_') - ('a'..'z' - |'A'..'Z' - |'0'..'9' - |'_' - { - print "Underscore" - print "foo" - } - )* - ; - -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.py deleted file mode 100644 index 7014255f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t011lexer.py +++ /dev/null @@ -1,78 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t011lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 0, token.start - assert token.stop == 5, token.stop - assert token.text == 'foobar', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.WS - assert token.start == 6, token.start - assert token.stop == 6, token.stop - assert token.text == ' ', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 7, token.start - assert token.stop == 11, token.stop - assert token.text == '_Ab98', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.WS - assert token.start == 12, token.start - assert token.stop == 14, token.stop - assert token.text == ' \n ', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.IDENTIFIER - assert token.start == 15, token.start - assert token.stop == 20, token.stop - assert token.text == 'A12sdf', token.text - - token = lexer.nextToken() - assert token.type == self.lexerModule.EOF - - - def testMalformedInput(self): - stream = antlr3.StringStream('a-b') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - raise AssertionError, token - - except antlr3.NoViableAltException, exc: - assert exc.unexpectedType == '-', repr(exc.unexpectedType) - assert exc.charPositionInLine == 1, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.input b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.input deleted file mode 100644 index 1815a9f2..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.input +++ /dev/null @@ -1,21 +0,0 @@ - - - - - -]> - - -Text - -öäüß -& -< - - - - \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.output b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.output deleted file mode 100644 index 825c37fc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.output +++ /dev/null @@ -1,39 +0,0 @@ -XML declaration -Attr: version='1.0' -ROOTELEMENT: component -INTERNAL DTD: [ - - - - -] -Start Tag: component -Attr: attr="val'ue" -Attr: attr2='val"ue' -PCDATA: " -" -Comment: "" -PCDATA: " -Text -" -CDATA: "" -PCDATA: " -öäüß -& -< -" -PI: xtal -Attr: cursor='11' -PCDATA: " -" -Empty Element: sub -PCDATA: " -" -Start Tag: sub -End Tag: sub -PCDATA: " -" -End Tag: component diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.py deleted file mode 100644 index 3e8f8b4e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXML.py +++ /dev/null @@ -1,189 +0,0 @@ -import antlr3 -import testbase -import unittest -import os -import sys -from cStringIO import StringIO -import difflib -import textwrap - -class t012lexerXML(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar('t012lexerXMLLexer.g') - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - inputPath = os.path.splitext(__file__)[0] + '.input' - stream = antlr3.StringStream(unicode(open(inputPath).read(), 'utf-8')) - lexer = self.getLexer(stream) - - while True: - token = lexer.nextToken() - if token.type == self.lexerModule.EOF: - break - - - output = unicode(lexer.outbuf.getvalue(), 'utf-8') - - outputPath = os.path.splitext(__file__)[0] + '.output' - testOutput = unicode(open(outputPath).read(), 'utf-8') - - success = (output == testOutput) - if not success: - d = difflib.Differ() - r = d.compare(output.splitlines(1), testOutput.splitlines(1)) - self.fail( - ''.join([l.encode('ascii', 'backslashreplace') for l in r]) - ) - - - def testMalformedInput1(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - if token.type == antlr3.EOF: - break - - raise AssertionError - - except antlr3.NoViableAltException, exc: - assert exc.unexpectedType == '>', repr(exc.unexpectedType) - assert exc.charPositionInLine == 11, repr(exc.charPositionInLine) - assert exc.line == 2, repr(exc.line) - - - def testMalformedInput2(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - if token.type == antlr3.EOF: - break - - raise AssertionError - - except antlr3.MismatchedSetException, exc: - assert exc.unexpectedType == 't', repr(exc.unexpectedType) - assert exc.charPositionInLine == 2, repr(exc.charPositionInLine) - assert exc.line == 1, repr(exc.line) - - - def testMalformedInput3(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - if token.type == antlr3.EOF: - break - - raise AssertionError - - except antlr3.NoViableAltException, exc: - assert exc.unexpectedType == 'a', repr(exc.unexpectedType) - assert exc.charPositionInLine == 11, repr(exc.charPositionInLine) - assert exc.line == 2, repr(exc.line) - - - -if __name__ == '__main__': - unittest.main() - - -## # run an infinite loop with randomly mangled input -## while True: -## print "ping" - -## input = """\ -## -## -## -## - -## ]> -## -## -## Text -## -## & -## < -## -## -## -## -## """ - -## import random -## input = list(input) # make it mutable -## for _ in range(3): -## p1 = random.randrange(len(input)) -## p2 = random.randrange(len(input)) - -## c1 = input[p1] -## input[p1] = input[p2] -## input[p2] = c1 -## input = ''.join(input) # back to string - -## stream = antlr3.StringStream(input) -## lexer = Lexer(stream) - -## try: -## while True: -## token = lexer.nextToken() -## if token.type == EOF: -## break - -## except antlr3.RecognitionException, exc: -## print exc -## for l in input.splitlines()[0:exc.line]: -## print l -## print ' '*exc.charPositionInLine + '^' - -## except BaseException, exc: -## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())]) -## print "%s at %d:%d" % (exc, stream.line, stream.charPositionInLine) -## print - -## raise - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXMLLexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXMLLexer.g deleted file mode 100644 index 31fa2037..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t012lexerXMLLexer.g +++ /dev/null @@ -1,132 +0,0 @@ -lexer grammar t012lexerXMLLexer; -options { - language = Python; -} - -@header { -from cStringIO import StringIO -} - -@lexer::init { -self.outbuf = StringIO() -} - -@lexer::members { -def output(self, line): - self.outbuf.write(line.encode('utf-8') + "\n") -} - -DOCUMENT - : XMLDECL? WS? DOCTYPE? WS? ELEMENT WS? - ; - -fragment DOCTYPE - : - '' - ; - -fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ; - -fragment PI : - '' - ; - -fragment XMLDECL : - '' - ; - - -fragment ELEMENT - : ( START_TAG - (ELEMENT - | t=PCDATA - {self.output("PCDATA: \""+$t.text+"\"")} - | t=CDATA - {self.output("CDATA: \""+$t.text+"\"")} - | t=COMMENT - {self.output("Comment: \""+$t.text+"\"")} - | pi=PI - )* - END_TAG - | EMPTY_ELEMENT - ) - ; - -fragment START_TAG - : '<' WS? name=GENERIC_ID WS? - {self.output("Start Tag: "+name.text)} - ( ATTRIBUTE WS? )* '>' - ; - -fragment EMPTY_ELEMENT - : '<' WS? name=GENERIC_ID WS? - {self.output("Empty Element: "+name.text)} - ( ATTRIBUTE WS? )* '/>' - ; - -fragment ATTRIBUTE - : name=GENERIC_ID WS? '=' WS? value=VALUE - {self.output("Attr: "+name.text+"="+value.text)} - ; - -fragment END_TAG - : '' - {self.output("End Tag: "+name.text)} - ; - -fragment COMMENT - : '' - ; - -fragment CDATA - : '' - ; - -fragment PCDATA : (~'<')+ ; - -fragment VALUE : - ( '\"' (~'\"')* '\"' - | '\'' (~'\'')* '\'' - ) - ; - -fragment GENERIC_ID - : ( LETTER | '_' | ':') - ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )* - ; - -fragment LETTER - : 'a'..'z' - | 'A'..'Z' - ; - -fragment WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.g deleted file mode 100644 index c3ab2c9f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.g +++ /dev/null @@ -1,23 +0,0 @@ -grammar t013parser; -options { - language = Python; -} - -@parser::init { -self.identifiers = [] -self.reportedErrors = [] -} - -@parser::members { -def foundIdentifier(self, name): - self.identifiers.append(name) - -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - -document: - t=IDENTIFIER {self.foundIdentifier($t.text)} - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.py deleted file mode 100644 index 1c21d5e3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t013parser.py +++ /dev/null @@ -1,35 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t013parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - assert len(parser.reportedErrors) == 0, parser.reportedErrors - assert parser.identifiers == ['foobar'] - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 1, parser.reportedErrors - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.g deleted file mode 100644 index 4c8238f3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.g +++ /dev/null @@ -1,35 +0,0 @@ -grammar t014parser; -options { - language = Python; -} - -@parser::init { -self.events = [] -self.reportedErrors = [] -} - -@parser::members { -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - - -document: - ( declaration - | call - )* - EOF - ; - -declaration: - 'var' t=IDENTIFIER ';' - {self.events.append(('decl', $t.text))} - ; - -call: - t=IDENTIFIER '(' ')' ';' - {self.events.append(('call', $t.text))} - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; -WS: (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.py deleted file mode 100644 index e2965a41..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t014parser.py +++ /dev/null @@ -1,74 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t014parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('var foobar; gnarz(); var blupp; flupp ( ) ;') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - assert len(parser.reportedErrors) == 0, parser.reportedErrors - assert parser.events == [ - ('decl', 'foobar'), - ('call', 'gnarz'), - ('decl', 'blupp'), - ('call', 'flupp') - ], parser.events - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('var; foo();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 1, parser.reportedErrors - assert parser.events == [], parser.events - - - def testMalformedInput2(self): - cStream = antlr3.StringStream('var foobar(); gnarz();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 1, parser.reportedErrors - assert parser.events == [ - ('call', 'gnarz'), - ], parser.events - - - def testMalformedInput3(self): - cStream = antlr3.StringStream('gnarz(; flupp();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 1, parser.reportedErrors - assert parser.events == [ - ('call', 'gnarz'), - ('call', 'flupp'), - ], parser.events - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.g deleted file mode 100644 index f08e3ce7..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.g +++ /dev/null @@ -1,54 +0,0 @@ -grammar t015calc; -options { - language = Python; -} - -@header { -import math -} - -@parser::init { -self.reportedErrors = [] -} - -@parser::members { -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - -evaluate returns [result]: r=expression {result = r}; - -expression returns [result]: r=mult ( - '+' r2=mult {r += r2} - | '-' r2=mult {r -= r2} - )* {result = r}; - -mult returns [result]: r=log ( - '*' r2=log {r *= r2} - | '/' r2=log {r /= r2} -// | '%' r2=log {r %= r2} - )* {result = r}; - -log returns [result]: 'ln' r=exp {result = math.log(r)} - | r=exp {result = r} - ; - -exp returns [result]: r=atom ('^' r2=atom {r = math.pow(r,r2)} )? {result = r} - ; - -atom returns [result]: - n=INTEGER {result = int($n.text)} - | n=DECIMAL {result = float($n.text)} - | '(' r=expression {result = r} ')' - | 'PI' {result = math.pi} - | 'E' {result = math.e} - ; - -INTEGER: DIGIT+; - -DECIMAL: DIGIT+ '.' DIGIT+; - -fragment -DIGIT: '0'..'9'; - -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.py deleted file mode 100644 index 0f1fe8a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t015calc.py +++ /dev/null @@ -1,46 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t015calc(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def _evaluate(self, expr, expected, errors=[]): - cStream = antlr3.StringStream(expr) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - result = parser.evaluate() - assert result == expected, "%r != %r" % (result, expected) - assert len(parser.reportedErrors) == len(errors), parser.reportedErrors - - - def testValid01(self): - self._evaluate("1 + 2", 3) - - - def testValid02(self): - self._evaluate("1 + 2 * 3", 7) - - - def testValid03(self): - self._evaluate("10 / 2", 5) - - - def testValid04(self): - self._evaluate("6 + 2*(3+1) - 4", 10) - - - def testMalformedInput(self): - self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"]) - - # FIXME: most parse errors result in TypeErrors in action code, because - # rules return None, which is then added/multiplied... to integers. - # evaluate("6 - foo 2", 4, ["some error"]) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.g deleted file mode 100644 index 1b7ac658..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.g +++ /dev/null @@ -1,31 +0,0 @@ -grammar t016actions; -options { - language = Python; -} - -declaration returns [name] - : functionHeader ';' - {$name = $functionHeader.name} - ; - -functionHeader returns [name] - : type ID - {$name = $ID.text} - ; - -type - : 'int' - | 'char' - | 'void' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.py deleted file mode 100644 index 5e4cad05..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t016actions.py +++ /dev/null @@ -1,20 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t016actions(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream("int foo;") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - name = parser.declaration() - assert name == 'foo', name - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.g deleted file mode 100644 index 84c6b030..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.g +++ /dev/null @@ -1,91 +0,0 @@ -grammar t017parser; - -options { - language = Python; -} - -program - : declaration+ - ; - -declaration - : variable - | functionHeader ';' - | functionHeader block - ; - -variable - : type declarator ';' - ; - -declarator - : ID - ; - -functionHeader - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - ; - -formalParameter - : type declarator - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : '{' - variable* - stat* - '}' - ; - -stat: forStat - | expr ';' - | block - | assignStat ';' - | ';' - ; - -forStat - : 'for' '(' assignStat ';' expr ';' assignStat ')' block - ; - -assignStat - : ID '=' expr - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('==' | '<') aexpr )? - ; - -aexpr - : atom ( '+' atom )* - ; - -atom - : ID - | INT - | '(' expr ')' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.py deleted file mode 100644 index 5b4d851a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t017parser.py +++ /dev/null @@ -1,58 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t017parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - def parserClass(self, base): - class TestParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self.reportedErrors = [] - - - def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) - - return TestParser - - - def testValid(self): - cStream = antlr3.StringStream("int foo;") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - assert len(parser.reportedErrors) == 0, parser.reportedErrors - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('int foo() { 1+2 }') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 1, parser.reportedErrors - - - def testMalformedInput2(self): - cStream = antlr3.StringStream('int foo() { 1+; 1+2 }') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - assert len(parser.reportedErrors) == 2, parser.reportedErrors - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.g deleted file mode 100644 index 388ab92d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.g +++ /dev/null @@ -1,111 +0,0 @@ -grammar t018llstar; - -options { - language = Python; -} - -@header { -from cStringIO import StringIO -} - -@init { -self.output = StringIO() -} - -program - : declaration+ - ; - -/** In this rule, the functionHeader left prefix on the last two - * alternatives is not LL(k) for a fixed k. However, it is - * LL(*). The LL(*) algorithm simply scans ahead until it sees - * either the ';' or the '{' of the block and then it picks - * the appropriate alternative. Lookhead can be arbitrarily - * long in theory, but is <=10 in most cases. Works great. - * Use ANTLRWorks to see the lookahead use (step by Location) - * and look for blue tokens in the input window pane. :) - */ -declaration - : variable - | functionHeader ';' - {self.output.write($functionHeader.name+" is a declaration\n")} - | functionHeader block - {self.output.write($functionHeader.name+" is a definition\n")} - ; - -variable - : type declarator ';' - ; - -declarator - : ID - ; - -functionHeader returns [name] - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - {$name = $ID.text} - ; - -formalParameter - : type declarator - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : '{' - variable* - stat* - '}' - ; - -stat: forStat - | expr ';' - | block - | assignStat ';' - | ';' - ; - -forStat - : 'for' '(' assignStat ';' expr ';' assignStat ')' block - ; - -assignStat - : ID '=' expr - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('==' | '<') aexpr )? - ; - -aexpr - : atom ( '+' atom )* - ; - -atom - : ID - | INT - | '(' expr ')' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.input b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.input deleted file mode 100644 index 1aa5a0d0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.input +++ /dev/null @@ -1,12 +0,0 @@ -char c; -int x; - -void bar(int x); - -int foo(int y, char d) { - int i; - for (i=0; i<3; i=i+1) { - x=3; - y=5; - } -} diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.output b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.output deleted file mode 100644 index 757c53aa..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.output +++ /dev/null @@ -1,2 +0,0 @@ -bar is a declaration -foo is a definition diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.py deleted file mode 100644 index fe67fe2b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t018llstar.py +++ /dev/null @@ -1,76 +0,0 @@ -import antlr3 -import testbase -import unittest -import os -import sys -from cStringIO import StringIO -import difflib - -class t018llstar(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - inputPath = os.path.splitext(__file__)[0] + '.input' - cStream = antlr3.StringStream(open(inputPath).read()) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - output = parser.output.getvalue() - - outputPath = os.path.splitext(__file__)[0] + '.output' - testOutput = open(outputPath).read() - - success = (output == testOutput) - if not success: - d = difflib.Differ() - r = d.compare(output.splitlines(1), testOutput.splitlines(1)) - self.fail( - ''.join([l.encode('ascii', 'backslashreplace') for l in r]) - ) - -if __name__ == '__main__': - unittest.main() - - - -## # run an infinite loop with randomly mangled input -## while True: -## print "ping" - -## input = open(inputPath).read() - -## import random -## input = list(input) # make it mutable -## for _ in range(3): -## p1 = random.randrange(len(input)) -## p2 = random.randrange(len(input)) - -## c1 = input[p1] -## input[p1] = input[p2] -## input[p2] = c1 -## input = ''.join(input) # back to string - - -## try: -## cStream = antlr3.StringStream(input) -## lexer = Lexer(cStream) -## tStream = antlr3.CommonTokenStream(lexer) -## parser = TestParser(tStream) -## parser.program() - -## except antlr3.RecognitionException, exc: -## print exc -## for l in input.splitlines()[0:exc.line]: -## print l -## print ' '*exc.charPositionInLine + '^' - -## except BaseException, exc: -## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())]) -## print "%s at %d:%d" % (exc, cStream.line, cStream.charPositionInLine) -## print - -## raise diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.g deleted file mode 100644 index 36477755..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.g +++ /dev/null @@ -1,64 +0,0 @@ -lexer grammar t019lexer; -options { - language=Python; - filter=true; -} - -IMPORT - : 'import' WS name=QIDStar WS? ';' - ; - -/** Avoids having "return foo;" match as a field */ -RETURN - : 'return' (options {greedy=false;}:.)* ';' - ; - -CLASS - : 'class' WS name=ID WS? ('extends' WS QID WS?)? - ('implements' WS QID WS? (',' WS? QID WS?)*)? '{' - ; - -COMMENT - : '/*' (options {greedy=false;} : . )* '*/' - ; - -STRING - : '"' (options {greedy=false;}: ESC | .)* '"' - ; - -CHAR - : '\'' (options {greedy=false;}: ESC | .)* '\'' - ; - -WS : (' '|'\t'|'\n')+ - ; - -fragment -QID : ID ('.' ID)* - ; - -/** QID cannot see beyond end of token so using QID '.*'? somewhere won't - * ever match since k=1 lookahead in the QID loop of '.' will make it loop. - * I made this rule to compensate. - */ -fragment -QIDStar - : ID ('.' ID)* '.*'? - ; - -fragment -TYPE: QID '[]'? - ; - -fragment -ARG : TYPE WS ID - ; - -fragment -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')* - ; - -fragment -ESC : '\\' ('"'|'\''|'\\') - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.input b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.input deleted file mode 100644 index d01e1c1a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t019lexer.input +++ /dev/null @@ -1,13 +0,0 @@ -import org.antlr.runtime.*; - -public class Main { - public static void main(String[] args) throws Exception { - for (int i=0; i ID // only visible, if b was called with True - | NUM - ; - - -/* rule scopes, from the book, final beta, p.148 */ - -c returns [res] -scope { - symbols -} -@init { - $c::symbols = set(); -} - : '{' c1* c2+ '}' - { $res = $c::symbols; } - ; - -c1 - : 'int' ID {$c::symbols.add($ID.text)} ';' - ; - -c2 - : ID '=' NUM ';' - { - if $ID.text not in $c::symbols: - raise RuntimeError($ID.text) - } - ; - -/* recursive rule scopes, from the book, final beta, p.150 */ - -d returns [res] -scope { - symbols -} -@init { - $d::symbols = set(); -} - : '{' d1* d2* '}' - { $res = $d::symbols; } - ; - -d1 - : 'int' ID {$d::symbols.add($ID.text)} ';' - ; - -d2 - : ID '=' NUM ';' - { - for s in reversed(range(len($d))): - if $ID.text in $d[s]::symbols: - break - else: - raise RuntimeError($ID.text) - } - | d - ; - -/* recursive rule scopes, access bottom-most scope */ - -e returns [res] -scope { - a -} -@after { - $res = $e::a; -} - : NUM { $e[0]::a = int($NUM.text); } - | '{' e '}' - ; - - -/* recursive rule scopes, access with negative index */ - -f returns [res] -scope { - a -} -@after { - $res = $f::a; -} - : NUM { $f[-2]::a = int($NUM.text); } - | '{' f '}' - ; - - -/* tokens */ - -ID : ('a'..'z')+ - ; - -NUM : ('0'..'9')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t022scopes.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t022scopes.py deleted file mode 100644 index 01bc5975..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t022scopes.py +++ /dev/null @@ -1,167 +0,0 @@ -import antlr3 -import testbase -import unittest -import textwrap - - -class t022scopes(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TParser - - - def testa1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - - def testb1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - try: - parser.b(False) - self.fail() - except antlr3.RecognitionException: - pass - - - def testb2(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.b(True) - - - def testc1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - symbols = parser.c() - - self.failUnlessEqual( - symbols, - set(['i', 'j']) - ) - - - def testc2(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - x = 4; - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - try: - parser.c() - self.fail() - except RuntimeError, exc: - self.failUnlessEqual(exc.args[0], 'x') - - - def testd1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - { - int i; - int x; - x = 5; - } - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - symbols = parser.d() - - self.failUnlessEqual( - symbols, - set(['i', 'j']) - ) - - - def teste1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { { { 12 } } } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.e() - - self.failUnlessEqual(res, 12) - - - def testf1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { { { 12 } } } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.f() - - self.failUnlessEqual(res, None) - - - def testf2(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { 12 } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.f() - - self.failUnlessEqual(res, None) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.g deleted file mode 100644 index 02e69b1a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.g +++ /dev/null @@ -1,18 +0,0 @@ -grammar t023scopes; - -options { - language=Python; -} - -prog -scope { -name -} - : ID {$prog::name=$ID.text;} - ; - -ID : ('a'..'z')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.py deleted file mode 100644 index 4c33b8af..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t023scopes.py +++ /dev/null @@ -1,20 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t023scopes(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.prog() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.g deleted file mode 100644 index 1cd2527e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.g +++ /dev/null @@ -1,19 +0,0 @@ -grammar t024finally; - -options { - language=Python; -} - -prog returns [events] -@init {events = []} -@after {events.append('after')} - : ID {raise RuntimeError} - ; - catch [RuntimeError] {events.append('catch')} - finally {events.append('finally')} - -ID : ('a'..'z')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.py deleted file mode 100644 index 9a269dde..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t024finally.py +++ /dev/null @@ -1,23 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t024finally(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.prog() - - assert events == ['catch', 'finally'], events - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.g deleted file mode 100644 index b3500ccb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.g +++ /dev/null @@ -1,18 +0,0 @@ -lexer grammar t025lexerRulePropertyRef; -options { - language = Python; -} - -@lexer::init { -self.properties = [] -} - -IDENTIFIER: - ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - { -self.properties.append( - ($text, $type, $line, $pos, $index, $channel, $start, $stop) -) - } - ; -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.py deleted file mode 100644 index ae4ac794..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t025lexerRulePropertyRef.py +++ /dev/null @@ -1,54 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t025lexerRulePropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - while True: - token = lexer.nextToken() - if token.type == antlr3.EOF: - break - - assert len(lexer.properties) == 3, lexer.properties - - text, type, line, pos, index, channel, start, stop = lexer.properties[0] - assert text == 'foobar', lexer.properties[0] - assert type == self.lexerModule.IDENTIFIER, lexer.properties[0] - assert line == 1, lexer.properties[0] - assert pos == 0, lexer.properties[0] - assert index == -1, lexer.properties[0] - assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[0] - assert start == 0, lexer.properties[0] - assert stop == 5, lexer.properties[0] - - text, type, line, pos, index, channel, start, stop = lexer.properties[1] - assert text == '_Ab98', lexer.properties[1] - assert type == self.lexerModule.IDENTIFIER, lexer.properties[1] - assert line == 1, lexer.properties[1] - assert pos == 7, lexer.properties[1] - assert index == -1, lexer.properties[1] - assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[1] - assert start == 7, lexer.properties[1] - assert stop == 11, lexer.properties[1] - - text, type, line, pos, index, channel, start, stop = lexer.properties[2] - assert text == 'A12sdf', lexer.properties[2] - assert type == self.lexerModule.IDENTIFIER, lexer.properties[2] - assert line == 2, lexer.properties[2] - assert pos == 1, lexer.properties[2] - assert index == -1, lexer.properties[2] - assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[2] - assert start == 15, lexer.properties[2] - assert stop == 20, lexer.properties[2] - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.g deleted file mode 100644 index e8f9fefe..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.g +++ /dev/null @@ -1,39 +0,0 @@ -grammar t026actions; -options { - language = Python; -} - -@lexer::init { - self.foobar = 'attribute;' -} - -prog -@init { - self.capture('init;') -} -@after { - self.capture('after;') -} - : IDENTIFIER EOF - ; - catch [ RecognitionException, exc ] { - self.capture('catch;') - raise - } - finally { - self.capture('finally;') - } - - -IDENTIFIER - : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - { - # a comment - self.capture('action;') - self.capture('\%r \%r \%r \%r \%r \%r \%r \%r;' \% ($text, $type, $line, $pos, $index, $channel, $start, $stop)) - if True: - self.capture(self.foobar) - } - ; - -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.py deleted file mode 100644 index dd4e5d67..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t026actions.py +++ /dev/null @@ -1,66 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t026actions(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TLexer - - - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.prog() - - self.assertEqual( - parser._output, - 'init;after;finally;') - self.assertEqual( - lexer._output, - 'action;u\'foobar\' 4 1 0 -1 0 0 5;attribute;action;u\'_Ab98\' 4 1 7 -1 0 7 11;attribute;action;u\'A12sdf\' 4 2 1 -1 0 15 20;attribute;') - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.g deleted file mode 100644 index 9cfbb3af..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.g +++ /dev/null @@ -1,8 +0,0 @@ -lexer grammar t027eof; - -options { - language=Python; -} - -END: EOF; -SPACE: ' '; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.py deleted file mode 100644 index b6ae18d9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t027eof.py +++ /dev/null @@ -1,25 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t027eof(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - @testbase.broken("That's not how EOF is supposed to be used", Exception) - def testValid1(self): - cStream = antlr3.StringStream(' ') - lexer = self.getLexer(cStream) - - tok = lexer.nextToken() - assert tok.type == self.lexerModule.SPACE, tok - - tok = lexer.nextToken() - assert tok.type == self.lexerModule.END, tok - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t028labelExpr.g.disabled b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t028labelExpr.g.disabled deleted file mode 100644 index d3ba76cb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t028labelExpr.g.disabled +++ /dev/null @@ -1,5 +0,0 @@ -lexer grammar t028labelExpr; -ETAGO: (' ' ' '<'; -CDATA: '<'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.g deleted file mode 100644 index 7900262a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.g +++ /dev/null @@ -1,16 +0,0 @@ -lexer grammar t029synpredgate; -options { - language = Python; -} - -FOO - : ('ab')=> A - | ('ac')=> B - ; - -fragment -A: 'a'; - -fragment -B: 'a'; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.py deleted file mode 100644 index b6586889..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t029synpredgate.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t029synpredgate(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - stream = antlr3.StringStream('ac') - lexer = self.getLexer(stream) - token = lexer.nextToken() - - -if __name__ == '__main__': - unittest.main() - - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.g deleted file mode 100644 index 7b2e423e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.g +++ /dev/null @@ -1,26 +0,0 @@ -grammar t030specialStates; -options { - language = Python; -} - -@init { -self.cond = True -} - -@members { -def recover(self, input, re): - # no error recovery yet, just crash! - raise re -} - -r - : ( {self.cond}? NAME - | {not self.cond}? NAME WS+ NAME - ) - ( WS+ NAME )? - EOF - ; - -NAME: ('a'..'z') ('a'..'z' | '0'..'9')+; -NUMBER: ('0'..'9')+; -WS: ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.py deleted file mode 100644 index 86c4f7cd..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t030specialStates.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t030specialStates(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foo') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - - def testValid2(self): - cStream = antlr3.StringStream('foo name1') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - - def testValid3(self): - cStream = antlr3.StringStream('bar name1') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.cond = False - events = parser.r() - - - def testValid4(self): - cStream = antlr3.StringStream('bar name1 name2') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.cond = False - events = parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.g deleted file mode 100644 index 0afa5961..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t031emptyAlt; -options { - language = Python; -} - -r - : NAME - ( {self.cond}?=> WS+ NAME - | - ) - EOF - ; - -NAME: ('a'..'z') ('a'..'z' | '0'..'9')+; -NUMBER: ('0'..'9')+; -WS: ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.py deleted file mode 100644 index fcae8e1d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t031emptyAlt(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foo') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.g deleted file mode 100644 index 3cc23276..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.g +++ /dev/null @@ -1,8 +0,0 @@ -grammar t032subrulePredict; -options { - language = Python; -} - -a: 'BEGIN' b WS+ 'END'; -b: ( WS+ 'A' )+; -WS: ' '; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.py deleted file mode 100644 index 7b62add8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t032subrulePredict.py +++ /dev/null @@ -1,44 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t032subrulePredict(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream( - 'BEGIN A END' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - - @testbase.broken("DFA tries to look beyond end of rule b", Exception) - def testValid2(self): - cStream = antlr3.StringStream( - ' A' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.b() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.g deleted file mode 100644 index 85a4b309..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.g +++ /dev/null @@ -1,534 +0,0 @@ -grammar t033backtracking; -options { - language=Python; - backtrack=true; - memoize=true; - k=2; -} - -scope Symbols { - types; -} - -@header { -# compatibility stuff -try: - set = set - frozenset = frozenset -except NameError: - from sets import Set as set, ImmutableSet as frozenset - - -try: - reversed = reversed -except NameError: - def reversed(l): - l = l[:] - l.reverse() - return l - -} - -@members { - def isTypeName(self, name): - for scope in reversed(self.Symbols_stack): - if name in scope.types: - return True - - return False - -} - -translation_unit -scope Symbols; // entire file is a scope -@init { - $Symbols::types = set() -} - : external_declaration+ - ; - -/** Either a function definition or any other kind of C decl/def. - * The LL(*) analysis algorithm fails to deal with this due to - * recursion in the declarator rules. I'm putting in a - * manual predicate here so that we don't backtrack over - * the entire function. Further, you get a better error - * as errors within the function itself don't make it fail - * to predict that it's a function. Weird errors previously. - * Remember: the goal is to avoid backtrack like the plague - * because it makes debugging, actions, and errors harder. - * - * Note that k=1 results in a much smaller predictor for the - * fixed lookahead; k=2 made a few extra thousand lines. ;) - * I'll have to optimize that in the future. - */ -external_declaration -options {k=1;} - : ( declaration_specifiers? declarator declaration* '{' )=> function_definition - | declaration - ; - -function_definition -scope Symbols; // put parameters and locals into same scope for now -@init { - $Symbols::types = set() -} - : declaration_specifiers? declarator -// ( declaration+ compound_statement // K&R style -// | compound_statement // ANSI style -// ) - ; - -declaration -scope { - isTypedef; -} -@init { - $declaration::isTypedef = False -} - : 'typedef' declaration_specifiers? {$declaration::isTypedef = True} - init_declarator_list ';' // special case, looking for typedef - | declaration_specifiers init_declarator_list? ';' - ; - -declaration_specifiers - : ( storage_class_specifier - | type_specifier - | type_qualifier - )+ - ; - -init_declarator_list - : init_declarator (',' init_declarator)* - ; - -init_declarator - : declarator //('=' initializer)? - ; - -storage_class_specifier - : 'extern' - | 'static' - | 'auto' - | 'register' - ; - -type_specifier - : 'void' - | 'char' - | 'short' - | 'int' - | 'long' - | 'float' - | 'double' - | 'signed' - | 'unsigned' -// | struct_or_union_specifier -// | enum_specifier - | type_id - ; - -type_id - : {self.isTypeName(self.input.LT(1).getText())}? IDENTIFIER -// {System.out.println($IDENTIFIER.text+" is a type");} - ; - -// struct_or_union_specifier -// options {k=3;} -// scope Symbols; // structs are scopes -// @init { -// $Symbols::types = set() -// } -// : struct_or_union IDENTIFIER? '{' struct_declaration_list '}' -// | struct_or_union IDENTIFIER -// ; - -// struct_or_union -// : 'struct' -// | 'union' -// ; - -// struct_declaration_list -// : struct_declaration+ -// ; - -// struct_declaration -// : specifier_qualifier_list struct_declarator_list ';' -// ; - -// specifier_qualifier_list -// : ( type_qualifier | type_specifier )+ -// ; - -// struct_declarator_list -// : struct_declarator (',' struct_declarator)* -// ; - -// struct_declarator -// : declarator (':' constant_expression)? -// | ':' constant_expression -// ; - -// enum_specifier -// options {k=3;} -// : 'enum' '{' enumerator_list '}' -// | 'enum' IDENTIFIER '{' enumerator_list '}' -// | 'enum' IDENTIFIER -// ; - -// enumerator_list -// : enumerator (',' enumerator)* -// ; - -// enumerator -// : IDENTIFIER ('=' constant_expression)? -// ; - -type_qualifier - : 'const' - | 'volatile' - ; - -declarator - : pointer? direct_declarator - | pointer - ; - -direct_declarator - : ( IDENTIFIER - { - if len($declaration)>0 and $declaration::isTypedef: - $Symbols::types.add($IDENTIFIER.text) - print "define type "+$IDENTIFIER.text - } - | '(' declarator ')' - ) - declarator_suffix* - ; - -declarator_suffix - : /*'[' constant_expression ']' - |*/ '[' ']' -// | '(' parameter_type_list ')' -// | '(' identifier_list ')' - | '(' ')' - ; - -pointer - : '*' type_qualifier+ pointer? - | '*' pointer - | '*' - ; - -// parameter_type_list -// : parameter_list (',' '...')? -// ; - -// parameter_list -// : parameter_declaration (',' parameter_declaration)* -// ; - -// parameter_declaration -// : declaration_specifiers (declarator|abstract_declarator)* -// ; - -// identifier_list -// : IDENTIFIER (',' IDENTIFIER)* -// ; - -// type_name -// : specifier_qualifier_list abstract_declarator? -// ; - -// abstract_declarator -// : pointer direct_abstract_declarator? -// | direct_abstract_declarator -// ; - -// direct_abstract_declarator -// : ( '(' abstract_declarator ')' | abstract_declarator_suffix ) abstract_declarator_suffix* -// ; - -// abstract_declarator_suffix -// : '[' ']' -// | '[' constant_expression ']' -// | '(' ')' -// | '(' parameter_type_list ')' -// ; - -// initializer -// : assignment_expression -// | '{' initializer_list ','? '}' -// ; - -// initializer_list -// : initializer (',' initializer)* -// ; - -// // E x p r e s s i o n s - -// argument_expression_list -// : assignment_expression (',' assignment_expression)* -// ; - -// additive_expression -// : (multiplicative_expression) ('+' multiplicative_expression | '-' multiplicative_expression)* -// ; - -// multiplicative_expression -// : (cast_expression) ('*' cast_expression | '/' cast_expression | '%' cast_expression)* -// ; - -// cast_expression -// : '(' type_name ')' cast_expression -// | unary_expression -// ; - -// unary_expression -// : postfix_expression -// | '++' unary_expression -// | '--' unary_expression -// | unary_operator cast_expression -// | 'sizeof' unary_expression -// | 'sizeof' '(' type_name ')' -// ; - -// postfix_expression -// : primary_expression -// ( '[' expression ']' -// | '(' ')' -// | '(' argument_expression_list ')' -// | '.' IDENTIFIER -// | '*' IDENTIFIER -// | '->' IDENTIFIER -// | '++' -// | '--' -// )* -// ; - -// unary_operator -// : '&' -// | '*' -// | '+' -// | '-' -// | '~' -// | '!' -// ; - -// primary_expression -// : IDENTIFIER -// | constant -// | '(' expression ')' -// ; - -// constant -// : HEX_LITERAL -// | OCTAL_LITERAL -// | DECIMAL_LITERAL -// | CHARACTER_LITERAL -// | STRING_LITERAL -// | FLOATING_POINT_LITERAL -// ; - -// ///// - -// expression -// : assignment_expression (',' assignment_expression)* -// ; - -// constant_expression -// : conditional_expression -// ; - -// assignment_expression -// : lvalue assignment_operator assignment_expression -// | conditional_expression -// ; - -// lvalue -// : unary_expression -// ; - -// assignment_operator -// : '=' -// | '*=' -// | '/=' -// | '%=' -// | '+=' -// | '-=' -// | '<<=' -// | '>>=' -// | '&=' -// | '^=' -// | '|=' -// ; - -// conditional_expression -// : logical_or_expression ('?' expression ':' conditional_expression)? -// ; - -// logical_or_expression -// : logical_and_expression ('||' logical_and_expression)* -// ; - -// logical_and_expression -// : inclusive_or_expression ('&&' inclusive_or_expression)* -// ; - -// inclusive_or_expression -// : exclusive_or_expression ('|' exclusive_or_expression)* -// ; - -// exclusive_or_expression -// : and_expression ('^' and_expression)* -// ; - -// and_expression -// : equality_expression ('&' equality_expression)* -// ; -// equality_expression -// : relational_expression (('=='|'!=') relational_expression)* -// ; - -// relational_expression -// : shift_expression (('<'|'>'|'<='|'>=') shift_expression)* -// ; - -// shift_expression -// : additive_expression (('<<'|'>>') additive_expression)* -// ; - -// // S t a t e m e n t s - -// statement -// : labeled_statement -// | compound_statement -// | expression_statement -// | selection_statement -// | iteration_statement -// | jump_statement -// ; - -// labeled_statement -// : IDENTIFIER ':' statement -// | 'case' constant_expression ':' statement -// | 'default' ':' statement -// ; - -// compound_statement -// scope Symbols; // blocks have a scope of symbols -// @init { -// $Symbols::types = {} -// } -// : '{' declaration* statement_list? '}' -// ; - -// statement_list -// : statement+ -// ; - -// expression_statement -// : ';' -// | expression ';' -// ; - -// selection_statement -// : 'if' '(' expression ')' statement (options {k=1; backtrack=false;}:'else' statement)? -// | 'switch' '(' expression ')' statement -// ; - -// iteration_statement -// : 'while' '(' expression ')' statement -// | 'do' statement 'while' '(' expression ')' ';' -// | 'for' '(' expression_statement expression_statement expression? ')' statement -// ; - -// jump_statement -// : 'goto' IDENTIFIER ';' -// | 'continue' ';' -// | 'break' ';' -// | 'return' ';' -// | 'return' expression ';' -// ; - -IDENTIFIER - : LETTER (LETTER|'0'..'9')* - ; - -fragment -LETTER - : '$' - | 'A'..'Z' - | 'a'..'z' - | '_' - ; - -CHARACTER_LITERAL - : '\'' ( EscapeSequence | ~('\''|'\\') ) '\'' - ; - -STRING_LITERAL - : '"' ( EscapeSequence | ~('\\'|'"') )* '"' - ; - -HEX_LITERAL : '0' ('x'|'X') HexDigit+ IntegerTypeSuffix? ; - -DECIMAL_LITERAL : ('0' | '1'..'9' '0'..'9'*) IntegerTypeSuffix? ; - -OCTAL_LITERAL : '0' ('0'..'7')+ IntegerTypeSuffix? ; - -fragment -HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; - -fragment -IntegerTypeSuffix - : ('u'|'U')? ('l'|'L') - | ('u'|'U') ('l'|'L')? - ; - -FLOATING_POINT_LITERAL - : ('0'..'9')+ '.' ('0'..'9')* Exponent? FloatTypeSuffix? - | '.' ('0'..'9')+ Exponent? FloatTypeSuffix? - | ('0'..'9')+ Exponent FloatTypeSuffix? - | ('0'..'9')+ Exponent? FloatTypeSuffix - ; - -fragment -Exponent : ('e'|'E') ('+'|'-')? ('0'..'9')+ ; - -fragment -FloatTypeSuffix : ('f'|'F'|'d'|'D') ; - -fragment -EscapeSequence - : '\\' ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\') - | OctalEscape - ; - -fragment -OctalEscape - : '\\' ('0'..'3') ('0'..'7') ('0'..'7') - | '\\' ('0'..'7') ('0'..'7') - | '\\' ('0'..'7') - ; - -fragment -UnicodeEscape - : '\\' 'u' HexDigit HexDigit HexDigit HexDigit - ; - -WS : (' '|'\r'|'\t'|'\u000C'|'\n') {$channel=HIDDEN;} - ; - -COMMENT - : '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;} - ; - -LINE_COMMENT - : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - ; - -// ignore #line info for now -LINE_COMMAND - : '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.py deleted file mode 100644 index 8b5c66a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t033backtracking.py +++ /dev/null @@ -1,31 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t033backtracking(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - @testbase.broken("Some bug in the tool", SyntaxError) - def testValid1(self): - cStream = antlr3.StringStream('int a;') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.translation_unit() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.g deleted file mode 100644 index 7311235d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.g +++ /dev/null @@ -1,30 +0,0 @@ -grammar t034tokenLabelPropertyRef; -options { - language = Python; -} - -a: t=A - { - print $t.text - print $t.type - print $t.line - print $t.pos - print $t.channel - print $t.index - #print $t.tree - } - ; - -A: 'a'..'z'; - -WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.py deleted file mode 100644 index b94de131..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t034tokenLabelPropertyRef.py +++ /dev/null @@ -1,40 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t034tokenLabelPropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.g deleted file mode 100644 index 710a91c3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t035ruleLabelPropertyRef; -options { - language = Python; -} - -a returns [bla]: t=b - { - $bla = $t.start, $t.stop, $t.text - } - ; - -b: A+; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.py deleted file mode 100644 index c42dbaae..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t035ruleLabelPropertyRef.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t035ruleLabelPropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a a a a ') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - start, stop, text = parser.a() - - # first token of rule b is the 2nd token (counting hidden tokens) - assert start.index == 1, start - - # first token of rule b is the 7th token (counting hidden tokens) - assert stop.index == 7, stop - - assert text == "a a a a", text - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.g deleted file mode 100644 index 04ce14c4..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.g +++ /dev/null @@ -1,25 +0,0 @@ -grammar t036multipleReturnValues; -options { - language = Python; -} - -a returns [foo, bar]: A - { - $foo = "foo"; - $bar = "bar"; - } - ; - -A: 'a'..'z'; - -WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.py deleted file mode 100644 index 97e04e30..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t036multipleReturnValues.py +++ /dev/null @@ -1,43 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t036multipleReturnValues(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - ret = parser.a() - assert ret.foo == 'foo', ret.foo - assert ret.bar == 'bar', ret.bar - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.g deleted file mode 100644 index d2ab177d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.g +++ /dev/null @@ -1,15 +0,0 @@ -grammar t037rulePropertyRef; -options { - language = Python; -} - -a returns [bla] -@after { - $bla = $start, $stop, $text -} - : A+ - ; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.py deleted file mode 100644 index 998a2ba1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t037rulePropertyRef.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t037rulePropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a a a a ') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - start, stop, text = parser.a().bla - - # first token of rule b is the 2nd token (counting hidden tokens) - assert start.index == 1, start - - # first token of rule b is the 7th token (counting hidden tokens) - assert stop.index == 7, stop - - assert text == "a a a a", text - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.g deleted file mode 100644 index fcc1a61b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.g +++ /dev/null @@ -1,28 +0,0 @@ -lexer grammar t038lexerRuleLabel; -options { - language = Python; -} - -A: 'a'..'z' WS '0'..'9' - { - print $WS - print $WS.type - print $WS.line - print $WS.pos - print $WS.channel - print $WS.index - print $WS.text - } - ; - -fragment WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.py deleted file mode 100644 index 2af65f98..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t038lexerRuleLabel.py +++ /dev/null @@ -1,33 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t038lexerRuleLabel(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def testValid1(self): - cStream = antlr3.StringStream('a 2') - - lexer = self.getLexer(cStream) - - while True: - t = lexer.nextToken() - if t.type == antlr3.EOF: - break - print t - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.g deleted file mode 100644 index d9dc2483..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.g +++ /dev/null @@ -1,18 +0,0 @@ -grammar t039labels; -options { - language = Python; -} - -a returns [l] - : ids+=A ( ',' ids+=(A|B) )* C D w=. ids+=. F EOF - { l = ($ids, $w) } - ; - -A: 'a'..'z'; -B: '0'..'9'; -C: a='A' { print $a }; -D: a='FOOBAR' { print $a }; -E: 'GNU' a=. { print $a }; -F: 'BLARZ' a=EOF { print $a }; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.py deleted file mode 100644 index 8159d6be..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t039labels.py +++ /dev/null @@ -1,53 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t039labels(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream( - 'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - ids, w = parser.a() - - assert len(ids) == 6, ids - assert ids[0].text == 'a', ids[0] - assert ids[1].text == 'b', ids[1] - assert ids[2].text == 'c', ids[2] - assert ids[3].text == '1', ids[3] - assert ids[4].text == '2', ids[4] - assert ids[5].text == 'A', ids[5] - - assert w.text == 'GNU1', w - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.g deleted file mode 100644 index bdf610bc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.g +++ /dev/null @@ -1,13 +0,0 @@ -lexer grammar t040bug80; -options { - language = Python; -} - -ID_LIKE - : 'defined' - | {False}? Identifier - | Identifier - ; - -fragment -Identifier: 'a'..'z'+ ; // with just 'a', output compiles diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.py deleted file mode 100644 index c6637e55..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t040bug80.py +++ /dev/null @@ -1,33 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t040bug80(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def testValid1(self): - cStream = antlr3.StringStream('defined') - lexer = self.getLexer(cStream) - while True: - t = lexer.nextToken() - if t.type == antlr3.EOF: - break - print t - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.g deleted file mode 100644 index b9a88924..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t041parameters; -options { - language = Python; -} - -a[arg1, arg2] returns [l] - : A+ EOF - { - l = ($arg1, $arg2) - $arg1 = "gnarz" - } - ; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.py deleted file mode 100644 index 1fe4a4fc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t041parameters.py +++ /dev/null @@ -1,45 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t041parameters(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream('a a a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - r = parser.a('foo', 'bar') - - assert r == ('foo', 'bar'), r - - -if __name__ == '__main__': - unittest.main() - - - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.g deleted file mode 100644 index f6768351..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.g +++ /dev/null @@ -1,353 +0,0 @@ -grammar t042ast; -options { - language = Python; - output = AST; -} - -tokens { - VARDEF; - FLOAT; - EXPR; - BLOCK; - VARIABLE; - FIELD; - CALL; - INDEX; - FIELDACCESS; -} - -@init { -self.flag = False -} - -r1 - : INT ('+'^ INT)* - ; - -r2 - : 'assert'^ x=expression (':'! y=expression)? ';'! - ; - -r3 - : 'if'^ expression s1=statement ('else'! s2=statement)? - ; - -r4 - : 'while'^ expression statement - ; - -r5 - : 'return'^ expression? ';'! - ; - -r6 - : (INT|ID)+ - ; - -r7 - : INT -> - ; - -r8 - : 'var' ID ':' type -> ^('var' type ID) - ; - -r9 - : type ID ';' -> ^(VARDEF type ID) - ; - -r10 - : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))} - ; - -r11 - : expression -> ^(EXPR expression) - | -> EXPR - ; - -r12 - : ID (',' ID)* -> ID+ - ; - -r13 - : type ID (',' ID)* ';' -> ^(type ID+) - ; - -r14 - : expression? statement* type+ - -> ^(EXPR expression? statement* type+) - ; - -r15 - : INT -> INT INT - ; - -r16 - : 'int' ID (',' ID)* -> ^('int' ID)+ - ; - -r17 - : 'for' '(' start=statement ';' expression ';' next=statement ')' statement - -> ^('for' $start expression $next statement) - ; - -r18 - : t='for' -> ^(BLOCK) - ; - -r19 - : t='for' -> ^(BLOCK[$t]) - ; - -r20 - : t='for' -> ^(BLOCK[$t,"FOR"]) - ; - -r21 - : t='for' -> BLOCK - ; - -r22 - : t='for' -> BLOCK[$t] - ; - -r23 - : t='for' -> BLOCK[$t,"FOR"] - ; - -r24 - : r=statement expression -> ^($r expression) - ; - -r25 - : r+=statement (',' r+=statement)+ expression -> ^($r expression) - ; - -r26 - : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+) - ; - -r27 - : r=statement expression -> ^($r ^($r expression)) - ; - -r28 - : ('foo28a'|'foo28b') -> - ; - -r29 - : (r+=statement)* -> ^(BLOCK $r+) - ; - -r30 - : statement* -> ^(BLOCK statement?) - ; - -r31 - : modifier type ID ('=' expression)? ';' - -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?) - -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?) - -> ^(FIELD ID modifier* type expression?) - ; - -r32[which] - : ID INT -> {which==1}? ID - -> {which==2}? INT - -> // yield nothing as else-clause - ; - -r33 - : modifiers! statement - ; - -r34 - : modifiers! r34a[$modifiers.tree] - //| modifiers! r33b[$modifiers.tree] - ; - -r34a[mod] - : 'class' ID ('extends' sup=type)? - ( 'implements' i+=type (',' i+=type)*)? - '{' statement* '}' - -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* ) - ; - -r35 - : '{' 'extends' (sup=type)? '}' - -> ^('extends' $sup)? - ; - -r36 - : 'if' '(' expression ')' s1=statement - ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2) - | -> ^('if' ^(EXPR expression) $s1) - ) - ; - -r37 - : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )* - ; - -r38 - : INT ('+'^ INT)* - ; - -r39 - : (primary->primary) // set return tree to just primary - ( '(' arg=expression ')' - -> ^(CALL $r39 $arg) - | '[' ie=expression ']' - -> ^(INDEX $r39 $ie) - | '.' p=primary - -> ^(FIELDACCESS $r39 $p) - )* - ; - -r40 - : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';' - ; - -r41 - : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';' - ; - -r42 - : ids+=ID (','! ids+=ID)* - ; - -r43 returns [res] - : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]} - ; - -r44 - : ids+=ID^ (','! ids+=ID^)* - ; - -r45 - : primary^ - ; - -r46 returns [res] - : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]} - ; - -r47 - : ids+=primary (','! ids+=primary)* - ; - -r48 - : ids+=. (','! ids+=.)* - ; - -r49 - : .^ ID - ; - -r50 - : ID - -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID) - ; - -/** templates tested: - tokenLabelPropertyRef_tree -*/ -r51 returns [res] - : ID t=ID ID - { $res = $t.tree } - ; - -/** templates tested: - rulePropertyRef_tree -*/ -r52 returns [res] -@after { - $res = $tree -} - : ID - ; - -/** templates tested: - ruleLabelPropertyRef_tree -*/ -r53 returns [res] - : t=primary - { $res = $t.tree } - ; - -/** templates tested: - ruleSetPropertyRef_tree -*/ -r54 returns [res] -@after { - $tree = $t.tree; -} - : ID t=expression ID - ; - -/** backtracking */ -r55 -options { backtrack=true; k=1; } - : (modifier+ INT)=> modifier+ expression - | modifier+ statement - ; - - -/** templates tested: - rewriteTokenRef with len(args)>0 -*/ -r56 - : t=ID* -> ID[$t,'foo'] - ; - -/** templates tested: - rewriteTokenRefRoot with len(args)>0 -*/ -r57 - : t=ID* -> ^(ID[$t,'foo']) - ; - -/** templates tested: - ??? -*/ -r58 - : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^ - ; - -/** templates tested: - rewriteTokenListLabelRefRoot -*/ -r59 - : (t+=ID)+ statement -> ^($t statement)+ - ; - -primary - : ID - ; - -expression - : r1 - ; - -statement - : 'fooze' - | 'fooze2' - ; - -modifiers - : modifier+ - ; - -modifier - : 'public' - | 'private' - ; - -type - : 'int' - | 'bool' - ; - -ID : 'a'..'z' + ; -INT : '0'..'9' +; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.py deleted file mode 100644 index e29c0777..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t042ast.py +++ /dev/null @@ -1,688 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class t042ast(testbase.ANTLRTest): -## def lexerClass(self, base): -## class TLexer(base): -## def reportError(self, re): -## # no error recovery yet, just crash! -## raise re - -## return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def parse(self, text, method, rArgs=[], **kwargs): - self.compileGrammar() #options='-trace') - - cStream = antlr3.StringStream(text) - self.lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(self.lexer) - self.parser = self.getParser(tStream) - - for attr, val in kwargs.items(): - setattr(self.parser, attr, val) - - return getattr(self.parser, method)(*rArgs) - - - def testR1(self): - r = self.parse("1 + 2", 'r1') - self.failUnlessEqual( - r.tree.toStringTree(), - '(+ 1 2)' - ) - - - def testR2a(self): - r = self.parse("assert 2+3;", 'r2') - self.failUnlessEqual( - r.tree.toStringTree(), - '(assert (+ 2 3))' - ) - - - def testR2b(self): - r = self.parse("assert 2+3 : 5;", 'r2') - self.failUnlessEqual( - r.tree.toStringTree(), - '(assert (+ 2 3) 5)' - ) - - - def testR3a(self): - r = self.parse("if 1 fooze", 'r3') - self.failUnlessEqual( - r.tree.toStringTree(), - '(if 1 fooze)' - ) - - - def testR3b(self): - r = self.parse("if 1 fooze else fooze", 'r3') - self.failUnlessEqual( - r.tree.toStringTree(), - '(if 1 fooze fooze)' - ) - - - def testR4a(self): - r = self.parse("while 2 fooze", 'r4') - self.failUnlessEqual( - r.tree.toStringTree(), - '(while 2 fooze)' - ) - - - def testR5a(self): - r = self.parse("return;", 'r5') - self.failUnlessEqual( - r.tree.toStringTree(), - 'return' - ) - - - def testR5b(self): - r = self.parse("return 2+3;", 'r5') - self.failUnlessEqual( - r.tree.toStringTree(), - '(return (+ 2 3))' - ) - - - def testR6a(self): - r = self.parse("3", 'r6') - self.failUnlessEqual( - r.tree.toStringTree(), - '3' - ) - - - def testR6b(self): - r = self.parse("3 a", 'r6') - self.failUnlessEqual( - r.tree.toStringTree(), - '3 a' - ) - - - def testR7(self): - r = self.parse("3", 'r7') - self.failUnless( - r.tree is None - ) - - - def testR8(self): - r = self.parse("var foo:bool", 'r8') - self.failUnlessEqual( - r.tree.toStringTree(), - '(var bool foo)' - ) - - - def testR9(self): - r = self.parse("int foo;", 'r9') - self.failUnlessEqual( - r.tree.toStringTree(), - '(VARDEF int foo)' - ) - - - def testR10(self): - r = self.parse("10", 'r10') - self.failUnlessEqual( - r.tree.toStringTree(), - '10.0' - ) - - - def testR11a(self): - r = self.parse("1+2", 'r11') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2))' - ) - - - def testR11b(self): - r = self.parse("", 'r11') - self.failUnlessEqual( - r.tree.toStringTree(), - 'EXPR' - ) - - - def testR12a(self): - r = self.parse("foo", 'r12') - self.failUnlessEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR12b(self): - r = self.parse("foo, bar, gnurz", 'r12') - self.failUnlessEqual( - r.tree.toStringTree(), - 'foo bar gnurz' - ) - - - def testR13a(self): - r = self.parse("int foo;", 'r13') - self.failUnlessEqual( - r.tree.toStringTree(), - '(int foo)' - ) - - - def testR13b(self): - r = self.parse("bool foo, bar, gnurz;", 'r13') - self.failUnlessEqual( - r.tree.toStringTree(), - '(bool foo bar gnurz)' - ) - - - def testR14a(self): - r = self.parse("1+2 int", 'r14') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2) int)' - ) - - - def testR14b(self): - r = self.parse("1+2 int bool", 'r14') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2) int bool)' - ) - - - def testR14c(self): - r = self.parse("int bool", 'r14') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR int bool)' - ) - - - def testR14d(self): - r = self.parse("fooze fooze int bool", 'r14') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR fooze fooze int bool)' - ) - - - def testR14e(self): - r = self.parse("7+9 fooze fooze int bool", 'r14') - self.failUnlessEqual( - r.tree.toStringTree(), - '(EXPR (+ 7 9) fooze fooze int bool)' - ) - - - def testR15(self): - r = self.parse("7", 'r15') - self.failUnlessEqual( - r.tree.toStringTree(), - '7 7' - ) - - - def testR16a(self): - r = self.parse("int foo", 'r16') - self.failUnlessEqual( - r.tree.toStringTree(), - '(int foo)' - ) - - - def testR16b(self): - r = self.parse("int foo, bar, gnurz", 'r16') - - self.failUnlessEqual( - r.tree.toStringTree(), - '(int foo) (int bar) (int gnurz)' - ) - - - def testR17a(self): - r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17') - self.failUnlessEqual( - r.tree.toStringTree(), - '(for fooze (+ 1 2) fooze fooze)' - ) - - - def testR18a(self): - r = self.parse("for", 'r18') - self.failUnlessEqual( - r.tree.toStringTree(), - 'BLOCK' - ) - - - def testR19a(self): - r = self.parse("for", 'r19') - self.failUnlessEqual( - r.tree.toStringTree(), - 'for' - ) - - - def testR20a(self): - r = self.parse("for", 'r20') - self.failUnlessEqual( - r.tree.toStringTree(), - 'FOR' - ) - - - def testR21a(self): - r = self.parse("for", 'r21') - self.failUnlessEqual( - r.tree.toStringTree(), - 'BLOCK' - ) - - - def testR22a(self): - r = self.parse("for", 'r22') - self.failUnlessEqual( - r.tree.toStringTree(), - 'for' - ) - - - def testR23a(self): - r = self.parse("for", 'r23') - self.failUnlessEqual( - r.tree.toStringTree(), - 'FOR' - ) - - - def testR24a(self): - r = self.parse("fooze 1 + 2", 'r24') - self.failUnlessEqual( - r.tree.toStringTree(), - '(fooze (+ 1 2))' - ) - - - def testR25a(self): - r = self.parse("fooze, fooze2 1 + 2", 'r25') - self.failUnlessEqual( - r.tree.toStringTree(), - '(fooze (+ 1 2))' - ) - - - def testR26a(self): - r = self.parse("fooze, fooze2", 'r26') - self.failUnlessEqual( - r.tree.toStringTree(), - '(BLOCK fooze fooze2)' - ) - - - def testR27a(self): - r = self.parse("fooze 1 + 2", 'r27') - self.failUnlessEqual( - r.tree.toStringTree(), - '(fooze (fooze (+ 1 2)))' - ) - - - def testR28(self): - r = self.parse("foo28a", 'r28') - self.failUnless( - r.tree is None - ) - - - def testR29(self): - try: - r = self.parse("", 'r29') - self.fail() - except RuntimeError: - pass - - -# FIXME: broken upstream? -## def testR30(self): -## try: -## r = self.parse("fooze fooze", 'r30') -## self.fail(r.tree.toStringTree()) -## except RuntimeError: -## pass - - - def testR31a(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0) - self.failUnlessEqual( - r.tree.toStringTree(), - '(VARDEF gnurz public int (+ 1 2))' - ) - - - def testR31b(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1) - self.failUnlessEqual( - r.tree.toStringTree(), - '(VARIABLE gnurz public int (+ 1 2))' - ) - - - def testR31c(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2) - self.failUnlessEqual( - r.tree.toStringTree(), - '(FIELD gnurz public int (+ 1 2))' - ) - - - def testR32a(self): - r = self.parse("gnurz 32", 'r32', [1], flag=2) - self.failUnlessEqual( - r.tree.toStringTree(), - 'gnurz' - ) - - - def testR32b(self): - r = self.parse("gnurz 32", 'r32', [2], flag=2) - self.failUnlessEqual( - r.tree.toStringTree(), - '32' - ) - - - def testR32c(self): - r = self.parse("gnurz 32", 'r32', [3], flag=2) - self.failUnless( - r.tree is None - ) - - - def testR33a(self): - r = self.parse("public private fooze", 'r33') - self.failUnlessEqual( - r.tree.toStringTree(), - 'fooze' - ) - - - def testR34a(self): - r = self.parse("public class gnurz { fooze fooze2 }", 'r34') - self.failUnlessEqual( - r.tree.toStringTree(), - '(class gnurz public fooze fooze2)' - ) - - - def testR34b(self): - r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34') - self.failUnlessEqual( - r.tree.toStringTree(), - '(class gnurz public (extends bool) (implements int bool) fooze fooze2)' - ) - - - def testR35(self): - try: - r = self.parse("{ extends }", 'r35') - self.fail() - - except RuntimeError: - pass - - - def testR36a(self): - r = self.parse("if ( 1 + 2 ) fooze", 'r36') - self.failUnlessEqual( - r.tree.toStringTree(), - '(if (EXPR (+ 1 2)) fooze)' - ) - - - def testR36b(self): - r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36') - self.failUnlessEqual( - r.tree.toStringTree(), - '(if (EXPR (+ 1 2)) fooze fooze2)' - ) - - - def testR37(self): - r = self.parse("1 + 2 + 3", 'r37') - self.failUnlessEqual( - r.tree.toStringTree(), - '(+ (+ 1 2) 3)' - ) - - - def testR38(self): - r = self.parse("1 + 2 + 3", 'r38') - self.failUnlessEqual( - r.tree.toStringTree(), - '(+ (+ 1 2) 3)' - ) - - - def testR39a(self): - r = self.parse("gnurz[1]", 'r39') - self.failUnlessEqual( - r.tree.toStringTree(), - '(INDEX gnurz 1)' - ) - - - def testR39b(self): - r = self.parse("gnurz(2)", 'r39') - self.failUnlessEqual( - r.tree.toStringTree(), - '(CALL gnurz 2)' - ) - - - def testR39c(self): - r = self.parse("gnurz.gnarz", 'r39') - self.failUnlessEqual( - r.tree.toStringTree(), - '(FIELDACCESS gnurz gnarz)' - ) - - - def testR39d(self): - r = self.parse("gnurz.gnarz.gnorz", 'r39') - self.failUnlessEqual( - r.tree.toStringTree(), - '(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)' - ) - - - def testR40(self): - r = self.parse("1 + 2 + 3;", 'r40') - self.failUnlessEqual( - r.tree.toStringTree(), - '(+ 1 2 3)' - ) - - - def testR41(self): - r = self.parse("1 + 2 + 3;", 'r41') - self.failUnlessEqual( - r.tree.toStringTree(), - '(3 (2 1))' - ) - - - def testR42(self): - r = self.parse("gnurz, gnarz, gnorz", 'r42') - self.failUnlessEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR43(self): - r = self.parse("gnurz, gnarz, gnorz", 'r43') - self.failUnless( - r.tree is None - ) - self.failUnlessEqual( - r.res, - ['gnurz', 'gnarz', 'gnorz'] - ) - - - def testR44(self): - r = self.parse("gnurz, gnarz, gnorz", 'r44') - self.failUnlessEqual( - r.tree.toStringTree(), - '(gnorz (gnarz gnurz))' - ) - - - def testR45(self): - r = self.parse("gnurz", 'r45') - self.failUnlessEqual( - r.tree.toStringTree(), - 'gnurz' - ) - - - def testR46(self): - r = self.parse("gnurz, gnarz, gnorz", 'r46') - self.failUnless( - r.tree is None - ) - self.failUnlessEqual( - r.res, - ['gnurz', 'gnarz', 'gnorz'] - ) - - - def testR47(self): - r = self.parse("gnurz, gnarz, gnorz", 'r47') - self.failUnlessEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR48(self): - r = self.parse("gnurz, gnarz, gnorz", 'r48') - self.failUnlessEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR49(self): - r = self.parse("gnurz gnorz", 'r49') - self.failUnlessEqual( - r.tree.toStringTree(), - '(gnurz gnorz)' - ) - - - def testR50(self): - r = self.parse("gnurz", 'r50') - self.failUnlessEqual( - r.tree.toStringTree(), - '(1.0 gnurz)' - ) - - - def testR51(self): - r = self.parse("gnurza gnurzb gnurzc", 'r51') - self.failUnlessEqual( - r.res.toStringTree(), - 'gnurzb' - ) - - - def testR52(self): - r = self.parse("gnurz", 'r52') - self.failUnlessEqual( - r.res.toStringTree(), - 'gnurz' - ) - - - def testR53(self): - r = self.parse("gnurz", 'r53') - self.failUnlessEqual( - r.res.toStringTree(), - 'gnurz' - ) - - - def testR54(self): - r = self.parse("gnurza 1 + 2 gnurzb", 'r54') - self.failUnlessEqual( - r.tree.toStringTree(), - '(+ 1 2)' - ) - - - def testR55a(self): - r = self.parse("public private 1 + 2", 'r55') - self.failUnlessEqual( - r.tree.toStringTree(), - 'public private (+ 1 2)' - ) - - - def testR55b(self): - r = self.parse("public fooze", 'r55') - self.failUnlessEqual( - r.tree.toStringTree(), - 'public fooze' - ) - - - def testR56(self): - r = self.parse("a b c d", 'r56') - self.failUnlessEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR57(self): - r = self.parse("a b c d", 'r57') - self.failUnlessEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR59(self): - r = self.parse("a b c fooze", 'r59') - self.failUnlessEqual( - r.tree.toStringTree(), - '(a fooze) (b fooze) (c fooze)' - ) - - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.g deleted file mode 100644 index 7294f23f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.g +++ /dev/null @@ -1,14 +0,0 @@ -grammar t043synpred; -options { - language = Python; -} - -a: ((s+ P)=> s+ b)? E; -b: P 'foo'; - -s: S; - - -S: ' '; -P: '+'; -E: '>'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.py deleted file mode 100644 index 9246de27..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t043synpred.py +++ /dev/null @@ -1,39 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t043synpred(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' +foo>') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.g deleted file mode 100644 index 0b7aa71b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.g +++ /dev/null @@ -1,20 +0,0 @@ -grammar t044trace; -options { - language = Python; -} - -@init { - self._stack = None -} - -a: '<' ((INT '+')=>b|c) '>'; -b: c ('+' c)*; -c: INT - { - if self._stack is None: - self._stack = self.getRuleInvocationStack() - } - ; - -INT: ('0'..'9')+; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.py deleted file mode 100644 index 13c9b761..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t044trace.py +++ /dev/null @@ -1,95 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class T(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar(options='-trace') - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - def getRuleInvocationStack(self): - return self._getRuleInvocationStack(base.__module__) - - return TParser - - - def testTrace(self): - cStream = antlr3.StringStream('< 1 + 2 + 3 >') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - self.failUnlessEqual( - lexer.traces, - [ '>T__7', 'WS', 'INT', 'WS', 'T__6', 'WS', 'INT', 'WS', 'T__6', 'WS', 'INT', 'WS', 'T__8', 'a', '>synpred1_t044trace_fragment', 'b', '>c', - 'c', 'c', '') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - self.failUnlessEqual( - parser._stack, - ['a', 'b', 'c'] - ) - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.g deleted file mode 100644 index 4ad895bf..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.g +++ /dev/null @@ -1,32 +0,0 @@ -grammar t045dfabug; -options { - language = Python; - output = AST; -} - - -// this rule used to generate an infinite loop in DFA.predict -r -options { backtrack=true; } - : (modifier+ INT)=> modifier+ expression - | modifier+ statement - ; - -expression - : INT '+' INT - ; - -statement - : 'fooze' - | 'fooze2' - ; - -modifier - : 'public' - | 'private' - ; - -ID : 'a'..'z' + ; -INT : '0'..'9' +; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.py deleted file mode 100644 index 76be15e2..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t045dfabug.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class T(testbase.ANTLRTest): - - def testbug(self): - self.compileGrammar() - - cStream = antlr3.StringStream("public fooze") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.g deleted file mode 100644 index e8dc1dcc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.g +++ /dev/null @@ -1,54 +0,0 @@ -grammar t046rewrite; -options { - language=Python; -} - -program -@init { - start = self.input.LT(1) -} - : method+ - { - self.input.insertBefore(start,"public class Wrapper {\n") - self.input.insertAfter($method.stop, "\n}\n") - } - ; - -method - : m='method' ID '(' ')' body - {self.input.replace($m, "public void");} - ; - -body -scope { - decls -} -@init { - $body::decls = set() -} - : lcurly='{' stat* '}' - { - for it in $body::decls: - self.input.insertAfter($lcurly, "\nint "+it+";") - } - ; - -stat: ID '=' expr ';' {$body::decls.add($ID.text);} - ; - -expr: mul ('+' mul)* - ; - -mul : atom ('*' atom)* - ; - -atom: ID - | INT - ; - -ID : ('a'..'z'|'A'..'Z')+ ; - -INT : ('0'..'9')+ ; - -WS : (' '|'\t'|'\n')+ {$channel=HIDDEN;} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.py deleted file mode 100644 index a61ede44..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t046rewrite.py +++ /dev/null @@ -1,55 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class T(testbase.ANTLRTest): - def testRewrite(self): - self.compileGrammar() - - input = textwrap.dedent( - '''\ - method foo() { - i = 3; - k = i; - i = k*4; - } - - method bar() { - j = i*2; - } - ''') - - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - parser = self.getParser(tStream) - parser.program() - - expectedOutput = textwrap.dedent('''\ - public class Wrapper { - public void foo() { - int k; - int i; - i = 3; - k = i; - i = k*4; - } - - public void bar() { - int j; - j = i*2; - } - } - - ''') - - self.failUnlessEqual( - str(tStream), - expectedOutput - ) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.g deleted file mode 100644 index 7e50ac40..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.g +++ /dev/null @@ -1,113 +0,0 @@ -grammar t047treeparser; -options { - language=Python; - output=AST; -} - -tokens { - VAR_DEF; - ARG_DEF; - FUNC_HDR; - FUNC_DECL; - FUNC_DEF; - BLOCK; -} - -program - : declaration+ - ; - -declaration - : variable - | functionHeader ';' -> ^(FUNC_DECL functionHeader) - | functionHeader block -> ^(FUNC_DEF functionHeader block) - ; - -variable - : type declarator ';' -> ^(VAR_DEF type declarator) - ; - -declarator - : ID - ; - -functionHeader - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - -> ^(FUNC_HDR type ID formalParameter+) - ; - -formalParameter - : type declarator -> ^(ARG_DEF type declarator) - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : lc='{' - variable* - stat* - '}' - -> ^(BLOCK[$lc,"BLOCK"] variable* stat*) - ; - -stat: forStat - | expr ';'! - | block - | assignStat ';'! - | ';'! - ; - -forStat - : 'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block - -> ^('for' $start expr $next block) - ; - -assignStat - : ID EQ expr -> ^(EQ ID expr) - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('=='^ | '<'^) aexpr )? - ; - -aexpr - : atom ( '+'^ atom )* - ; - -atom - : ID - | INT - | '(' expr ')' -> expr - ; - -FOR : 'for' ; -INT_TYPE : 'int' ; -CHAR: 'char'; -VOID: 'void'; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -EQ : '=' ; -EQEQ : '==' ; -LT : '<' ; -PLUS : '+' ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - { $channel=HIDDEN } - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.py deleted file mode 100644 index 1c0cb05d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t047treeparser.py +++ /dev/null @@ -1,122 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def setUp(self): - self.compileGrammar() - self.compileGrammar('t047treeparserWalker.g', options='-trace') - - - def testWalker(self): - input = textwrap.dedent( - '''\ - char c; - int x; - - void bar(int x); - - int foo(int y, char d) { - int i; - for (i=0; i<3; i=i+1) { - x=3; - y=5; - } - } - ''') - - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - r = parser.program() - - self.failUnlessEqual( - r.tree.toStringTree(), - "(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))" - ) - - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = self.getWalker(nodes) - walker.program() - - # FIXME: need to crosscheck with Java target (compile walker with - # -trace option), if this is the real list. For now I'm happy that - # it does not crash ;) - self.failUnlessEqual( - walker.traces, - [ '>program', '>declaration', '>variable', '>type', 'declarator', 'declaration', '>variable', '>type', 'declarator', - 'declaration', - '>functionHeader', '>type', 'formalParameter', - '>type', 'declarator', 'declaration', '>functionHeader', '>type', 'formalParameter', '>type', 'declarator', - 'formalParameter', '>type', - 'declarator', 'block', '>variable', '>type', 'declarator', 'stat', '>forStat', - '>expr', '>expr', '>atom', 'expr', - '>expr', '>atom', 'expr', '>atom', 'expr', '>expr', '>expr', '>atom', 'expr', '>atom', 'block', '>stat', '>expr', '>expr', '>atom', 'stat', '>expr', '>expr', '>atom', ' within boundaries of " - "previous " - ) - - def testInsertThenReplaceSameIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "0") - tokens.replace(0, "x") # supercedes insert at 0 - - result = tokens.toString() - expecting = "0xbc" - self.failUnlessEqual(result, expecting) - - - def test2InsertMiddleIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(1, "y") - - result = tokens.toString() - expecting = "ayxbc" - self.failUnlessEqual(result, expecting) - - - def test2InsertThenReplaceIndex0(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "x") - tokens.insertBefore(0, "y") - tokens.replace(0, "z") - - result = tokens.toString() - expecting = "yxzbc" - self.failUnlessEqual(result, expecting) - - - def testReplaceThenInsertBeforeLastIndex(self): - tokens = self._parse("abc") - tokens.replace(2, "x") - tokens.insertBefore(2, "y") - - result = tokens.toString() - expecting = "abyx" - self.failUnlessEqual(result, expecting) - - - def testInsertThenReplaceLastIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(2, "y") - tokens.replace(2, "x") - - result = tokens.toString() - expecting = "abyx" - self.failUnlessEqual(result, expecting) - - - def testReplaceThenInsertAfterLastIndex(self): - tokens = self._parse("abc") - tokens.replace(2, "x") - tokens.insertAfter(2, "y") - - result = tokens.toString() - expecting = "abxy" - self.failUnlessEqual(result, expecting) - - - def testReplaceRangeThenInsertAtLeftEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertBefore(2, "y") - - result = tokens.toString() - expecting = "abyxba" - self.failUnlessEqual(result, expecting) - - - def testReplaceRangeThenInsertAtRightEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertBefore(4, "y") # no effect; within range of a replace - - try: - tokens.toString() - self.fail() - except ValueError, exc: - self.failUnlessEqual( - str(exc), - "insert op within boundaries of " - "previous ") - - - def testReplaceRangeThenInsertAfterRightEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertAfter(4, "y") - - result = tokens.toString() - expecting = "abxyba" - self.failUnlessEqual(result, expecting) - - - def testReplaceAll(self): - tokens = self._parse("abcccba") - tokens.replace(0, 6, "x") - - result = tokens.toString() - expecting = "x" - self.failUnlessEqual(result, expecting) - - - def testReplaceSubsetThenFetch(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - - result = tokens.toString(0, 6) - expecting = "abxyzba" - self.failUnlessEqual(result, expecting) - - - def testReplaceThenReplaceSuperset(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - tokens.replace(3, 5, "foo") # overlaps, error - - try: - tokens.toString() - self.fail() - except ValueError, exc: - self.failUnlessEqual( - str(exc), - "replace op boundaries of overlap " - "with previous ") - - - def testReplaceThenReplaceLowerIndexedSuperset(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - tokens.replace(1, 3, "foo") # overlap, error - - try: - tokens.toString() - self.fail() - except ValueError, exc: - self.failUnlessEqual( - str(exc), - "replace op boundaries of overlap " - "with previous ") - - - def testReplaceSingleMiddleThenOverlappingSuperset(self): - tokens = self._parse("abcba") - tokens.replace(2, 2, "xyz") - tokens.replace(0, 3, "foo") - - result = tokens.toString() - expecting = "fooa" - self.failUnlessEqual(result, expecting) - - - def testCombineInserts(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "x") - tokens.insertBefore(0, "y") - result = tokens.toString() - expecting = "yxabc" - self.failUnlessEqual(expecting, result) - - - def testCombine3Inserts(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(0, "y") - tokens.insertBefore(1, "z") - result = tokens.toString() - expecting = "yazxbc" - self.failUnlessEqual(expecting, result) - - - def testCombineInsertOnLeftWithReplace(self): - tokens = self._parse("abc") - tokens.replace(0, 2, "foo") - tokens.insertBefore(0, "z") # combine with left edge of rewrite - result = tokens.toString() - expecting = "zfoo" - self.failUnlessEqual(expecting, result) - - - def testCombineInsertOnLeftWithDelete(self): - tokens = self._parse("abc") - tokens.delete(0, 2) - tokens.insertBefore(0, "z") # combine with left edge of rewrite - result = tokens.toString() - expecting = "z" # make sure combo is not znull - self.failUnlessEqual(expecting, result) - - - def testDisjointInserts(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(2, "y") - tokens.insertBefore(0, "z") - result = tokens.toString() - expecting = "zaxbyc" - self.failUnlessEqual(expecting, result) - - - def testOverlappingReplace(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(0, 3, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "bar" - self.failUnlessEqual(expecting, result) - - - def testOverlappingReplace2(self): - tokens = self._parse("abcc") - tokens.replace(0, 3, "bar") - tokens.replace(1, 2, "foo") # cannot split earlier replace - - try: - tokens.toString() - self.fail() - except ValueError, exc: - self.failUnlessEqual( - str(exc), - "replace op boundaries of overlap " - "with previous ") - - - def testOverlappingReplace3(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(0, 2, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "barc" - self.failUnlessEqual(expecting, result) - - - def testOverlappingReplace4(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(1, 3, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "abar" - self.failUnlessEqual(expecting, result) - - - def testDropIdenticalReplace(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(1, 2, "foo") # drop previous, identical - result = tokens.toString() - expecting = "afooc" - self.failUnlessEqual(expecting, result) - - - def testDropPrevCoveredInsert(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "foo") - tokens.replace(1, 2, "foo") # kill prev insert - result = tokens.toString() - expecting = "afoofoo" - self.failUnlessEqual(expecting, result) - - - def testLeaveAloneDisjointInsert(self): - tokens = self._parse("abcc") - tokens.insertBefore(1, "x") - tokens.replace(2, 3, "foo") - result = tokens.toString() - expecting = "axbfoo" - self.failUnlessEqual(expecting, result) - - - def testLeaveAloneDisjointInsert2(self): - tokens = self._parse("abcc") - tokens.replace(2, 3, "foo") - tokens.insertBefore(1, "x") - result = tokens.toString() - expecting = "axbfoo" - self.failUnlessEqual(expecting, result) - - - def testInsertBeforeTokenThenDeleteThatToken(self): - tokens = self._parse("abc") - tokens.insertBefore(2, "y") - tokens.delete(2) - result = tokens.toString() - expecting = "aby" - self.failUnlessEqual(expecting, result) - - -class T2(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar('t048rewrite2.g') - - - def _parse(self, input): - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - tStream.fillBuffer() - - return tStream - - - def testToStringStartStop(self): - # Tokens: 0123456789 - # Input: x = 3 * 0 - tokens = self._parse("x = 3 * 0;") - tokens.replace(4, 8, "0") # replace 3 * 0 with 0 - - result = tokens.toOriginalString() - expecting = "x = 3 * 0;" - self.failUnlessEqual(expecting, result) - - result = tokens.toString() - expecting = "x = 0;" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(0, 9) - expecting = "x = 0;" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(4, 8) - expecting = "0" - self.failUnlessEqual(expecting, result) - - - def testToStringStartStop2(self): - # Tokens: 012345678901234567 - # Input: x = 3 * 0 + 2 * 0 - tokens = self._parse("x = 3 * 0 + 2 * 0;") - - result = tokens.toOriginalString() - expecting = "x = 3 * 0 + 2 * 0;" - self.failUnlessEqual(expecting, result) - - tokens.replace(4, 8, "0") # replace 3 * 0 with 0 - result = tokens.toString() - expecting = "x = 0 + 2 * 0;" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(0, 17) - expecting = "x = 0 + 2 * 0;" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(4, 8) - expecting = "0" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(0, 8) - expecting = "x = 0" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(12, 16) - expecting = "2 * 0" - self.failUnlessEqual(expecting, result) - - tokens.insertAfter(17, "// comment") - result = tokens.toString(12, 18) - expecting = "2 * 0;// comment" - self.failUnlessEqual(expecting, result) - - result = tokens.toString(0, 8) # try again after insert at end - expecting = "x = 0" - self.failUnlessEqual(expecting, result) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t048rewrite2.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t048rewrite2.g deleted file mode 100644 index f98251cf..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t048rewrite2.g +++ /dev/null @@ -1,12 +0,0 @@ -lexer grammar t048rewrite2; -options { - language=Python; -} - -ID : 'a'..'z'+; -INT : '0'..'9'+; -SEMI : ';'; -PLUS : '+'; -MUL : '*'; -ASSIGN : '='; -WS : ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t049treeparser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t049treeparser.py deleted file mode 100644 index 9c7157d1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t049treeparser.py +++ /dev/null @@ -1,499 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - getattr(walker, treeEntry)() - - return walker._output - - - def testFlatList(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ID INT - {self.capture("\%s, \%s" \% ($ID, $INT))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("abc, 34", found) - - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ^(ID INT) - {self.capture(str($ID)+", "+str($INT))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("abc, 34", found) - - - def testFlatVsTreeDecision(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : b c ; - b : ID INT -> ^(ID INT); - c : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : b b ; - b : ID INT {self.capture(str($ID)+" "+str($INT)+'\n')} - | ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 b 2" - ) - self.failUnlessEqual("^(a 1)b 2\n", found) - - - def testFlatVsTreeDecision2(self): - grammar = textwrap.dedent( - r"""grammar T; - options { - language=Python; - output=AST; - } - a : b c ; - b : ID INT+ -> ^(ID INT+); - c : ID INT+; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : b b ; - b : ID INT+ {self.capture(str($ID)+" "+str($INT)+"\n")} - | ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 2 3 b 4 5" - ) - self.failUnlessEqual("^(a 3)b 5\n", found) - - - def testCyclicDFALookahead(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : ID INT+ PERIOD; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ID INT+ PERIOD {self.capture("alt 1")} - | ID INT+ SEMI {self.capture("alt 2")} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 2 3." - ) - self.failUnlessEqual("alt 1", found) - - -## def testTemplateOutput(self): -## String grammar = -## "grammar T;\n" + -## "options {output=AST;}\n" + -## "a : ID INT;\n" + -## "ID : 'a'..'z'+ ;\n" + -## "INT : '0'..'9'+;\n" + -## "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; - -## String treeGrammar = -## "tree grammar TP;\n" + -## "options {output=template; ASTLabelType=CommonTree;}\n" + -## "s : a {System.out.println($a.st);};\n" + -## "a : ID INT -> {new StringTemplate($INT.text)}\n" + -## " ;\n"; - -## String found = execTreeParser("T.g", grammar, "TParser", "TP.g", -## treeGrammar, "TP", "TLexer", "a", "s", "abc 34"); -## assertEquals("34\n", found); -## } - - - def testNullableChildList(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : ID INT? -> ^(ID INT?); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ^(ID INT?) - {self.capture(str($ID))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - self.failUnlessEqual("abc", found) - - - def testNullableChildList2(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : ID INT? SEMI -> ^(ID INT?) SEMI ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ^(ID INT?) SEMI - {self.capture(str($ID))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc;" - ) - self.failUnlessEqual("abc", found) - - - def testNullableChildList3(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a : ^(ID INT? b) SEMI - {self.capture(str($ID)+", "+str($b.text))} - ; - b : ID? ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc def;" - ) - self.failUnlessEqual("abc, def", found) - - - def testActionsAfterRoot(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=AST; - } - a : x=ID INT? SEMI -> ^($x INT?) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python; - ASTLabelType=CommonTree; - } - a @init {x=0} : ^(ID {x=1} {x=2} INT?) - {self.capture(str($ID)+", "+str(x))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc;" - ) - self.failUnless("abc, 2\n", found) - - - def testWildcardLookahead(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' . INT) { self.capture("alt 1") } - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEquals("alt 1", found) - - - def testWildcardLookahead2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' . INT) { self.capture("alt 1") } - | ^('+' . .) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEquals("alt 1", found) - - - def testWildcardLookahead3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' ID INT) { self.capture("alt 1") } - | ^('+' . .) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEquals("alt 1", found) - - - def testWildcardPlusLookahead(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' INT INT ) { self.capture("alt 1") } - | ^('+' .+) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEquals("alt 2", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.g b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.g deleted file mode 100644 index a8b17d1d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.g +++ /dev/null @@ -1,29 +0,0 @@ -grammar t050decorate; -options { - language = Python; -} - -@header { - def logme(func): - def decorated(self, *args, **kwargs): - self.events.append('before') - try: - return func(self, *args, **kwargs) - finally: - self.events.append('after') - - return decorated -} - -@parser::init { -self.events = [] -} - -document -@decorate { - @logme -} - : IDENTIFIER - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.py deleted file mode 100644 index bb6b85e1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t050decorate.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t013parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - assert parser.events == ['before', 'after'] - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t051treeRewriteAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t051treeRewriteAST.py deleted file mode 100644 index 39253b4a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t051treeRewriteAST.py +++ /dev/null @@ -1,1593 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - self.buf = "" - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r.tree is not None: - return r.tree.toStringTree() - - return "" - - - def testFlatList(self): - grammar = textwrap.dedent( - r''' - grammar T1; - options { - language=Python; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP1; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T1; - } - - a : ID INT -> INT ID; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("34 abc", found) - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r''' - grammar T2; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP2; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T2; - } - a : ^(ID INT) -> ^(INT ID); - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("(34 abc)", found) - - - def testCombinedRewriteAndAuto(self): - grammar = textwrap.dedent( - r''' - grammar T3; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT) | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP3; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T3; - } - a : ^(ID INT) -> ^(INT ID) | INT; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("(34 abc)", found) - - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "34" - ) - - self.failUnlessEqual("34", found) - - - def testAvoidDup(self): - grammar = textwrap.dedent( - r''' - grammar T4; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP4; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T4; - } - a : ID -> ^(ID ID); - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.failUnlessEqual("(abc abc)", found) - - - def testLoop(self): - grammar = textwrap.dedent( - r''' - grammar T5; - options { - language=Python; - output=AST; - } - a : ID+ INT+ -> (^(ID INT))+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP5; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T5; - } - a : (^(ID INT))+ -> INT+ ID+; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b c 3 4 5" - ) - - self.failUnlessEqual("3 4 5 a b c", found) - - - def testAutoDup(self): - grammar = textwrap.dedent( - r''' - grammar T6; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP6; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T6; - } - a : ID; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.failUnlessEqual("abc", found) - - - def testAutoDupRule(self): - grammar = textwrap.dedent( - r''' - grammar T7; - options { - language=Python; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP7; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T7; - } - a : b c ; - b : ID ; - c : INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1" - ) - - self.failUnlessEqual("a 1", found) - - - def testAutoWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID . - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEquals("abc 34", found) - - -# def testNoWildcardAsRootError(self): -# ErrorQueue equeue = new ErrorQueue(); -# ErrorManager.setErrorListener(equeue); -# > -# String treeGrammar = -# "tree grammar TP;\n"+ -# "options {language=Python;output=AST;} -# "a : ^(. INT) -# " ;\n"; -# > -# Grammar g = new Grammar(treeGrammar); -# Tool antlr = newTool(); -# antlr.setOutputDirectory(null); // write to /dev/null -# CodeGenerator generator = new CodeGenerator(antlr, g, "Java"); -# g.setCodeGenerator(generator); -# generator.genRecognizer(); -# > -# assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); -# > -# int expectedMsgID = ErrorManager.MSG_WILDCARD_AS_ROOT; -# Object expectedArg = null; -# antlr.RecognitionException expectedExc = null; -# GrammarSyntaxMessage expectedMessage = -# new GrammarSyntaxMessage(expectedMsgID, g, null, expectedArg, expectedExc); -# > -# checkError(equeue, expectedMessage); -# } - - def testAutoWildcard2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ^(ID .) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEquals("(abc 34)", found) - - - def testAutoWildcardWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID c=. - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEquals("abc 34", found) - - - def testAutoWildcardWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID c+=. - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEquals("abc 34", found) - - - def testAutoDupMultiple(self): - grammar = textwrap.dedent( - r''' - grammar T8; - options { - language=Python; - output=AST; - } - a : ID ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP8; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T8; - } - a : ID ID INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b 3" - ) - - self.failUnlessEqual("a b 3", found) - - - def testAutoDupTree(self): - grammar = textwrap.dedent( - r''' - grammar T9; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP9; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T9; - } - a : ^(ID INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupTreeWithLabels(self): - grammar = textwrap.dedent( - r''' - grammar T10; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP10; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T10; - } - a : ^(x=ID y=INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupTreeWithListLabels(self): - grammar = textwrap.dedent( - r''' - grammar T11; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP11; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T11; - } - a : ^(x+=ID y+=INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T12; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP12; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T12; - } - a : ^(b INT) ; - b : ID ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRootAndLabels(self): - grammar = textwrap.dedent( - r''' - grammar T13; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP13; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T13; - } - a : ^(x=b INT) ; - b : ID ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRootAndListLabels(self): - grammar = textwrap.dedent( - r''' - grammar T14; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP14; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T14; - } - a : ^(x+=b y+=c) ; - b : ID ; - c : INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.failUnlessEqual("(a 3)", found) - - - def testAutoDupNestedTree(self): - grammar = textwrap.dedent( - r''' - grammar T15; - options { - language=Python; - output=AST; - } - a : x=ID y=ID INT -> ^($x ^($y INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP15; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T15; - } - a : ^(ID ^(ID INT)) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b 3" - ) - - self.failUnlessEqual("(a (b 3))", found) - - - def testDelete(self): - grammar = textwrap.dedent( - r''' - grammar T16; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP16; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T16; - } - a : ID -> - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.failUnlessEqual("", found) - - def testSetMatchNoRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : b INT; - b : ID | INT; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("abc 34", found) - - - def testSetOptionalMatchNoRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : (ID|INT)? INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - - self.failUnlessEqual("abc 34", found) - - - def testSetMatchNoRewriteLevel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : x=ID INT -> ^($x INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^(ID (ID | INT) ) ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("(abc 34)", found) - - - def testSetMatchNoRewriteLevel2Root(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : x=ID INT -> ^($x INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^((ID | INT) INT) ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("(abc 34)", found) - - - ## REWRITE MODE - - def testRewriteModeCombinedRewriteAndAuto(self): - grammar = textwrap.dedent( - r''' - grammar T17; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID INT) | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP17; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T17; - rewrite=true; - } - a : ^(ID INT) -> ^(ID["ick"] INT) - | INT // leaves it alone, returning $a.start - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("(ick 34)", found) - - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "34" - ) - - self.failUnlessEqual("34", found) - - - def testRewriteModeFlatTree(self): - grammar = textwrap.dedent( - r''' - grammar T18; - options { - language=Python; - output=AST; - } - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP18; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T18; - rewrite=true; - } - s : ID a ; - a : INT -> INT["1"] - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - self.assertEquals("abc 1", found) - - - def testRewriteModeChainRuleFlatTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a ; - a : b ; - b : ID INT -> INT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEquals("34 abc", found) - - - def testRewriteModeChainRuleTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a ; - a : b ; // a.tree must become b.tree - b : ^(ID INT) -> INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEquals("34", found) - - - def testRewriteModeChainRuleTree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : a* b ; // only b contributes to tree, but it's after a*; s.tree = b.tree - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEquals("34", found) - - - def testRewriteModeChainRuleTree3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : 'boo' ID INT -> 'boo' ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : 'boo' a* b ; // don't reset s.tree to b.tree due to 'boo' - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEquals("boo 34", found) - - - def testRewriteModeChainRuleTree4(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : 'boo' ID INT -> ^('boo' ^(ID INT)) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : ^('boo' a* b) ; // don't reset s.tree to b.tree due to 'boo' - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEquals("(boo 34)", found) - - - def testRewriteModeChainRuleTree5(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : 'boo' ID INT -> ^('boo' ^(ID INT)) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : ^(a b) ; // s.tree is a.tree - a : 'boo' ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEquals("(boo 34)", found) - - - def testRewriteOfRuleRef(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a -> a ; - a : ID INT -> ID INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.failUnlessEqual("abc 34", found) - - - def testRewriteOfRuleRefRoot(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.failUnlessEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefRootLabeled(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(label=a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.failUnlessEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefRootListLabeled(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(label+=a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.failUnlessEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefChild(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.failUnlessEqual("(34 34)", found) - - - def testRewriteOfRuleRefLabel(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID label=a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.failUnlessEqual("(34 34)", found) - - - def testRewriteOfRuleRefListLabel(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID label+=a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.failUnlessEqual("(34 34)", found) - - - - def testRewriteModeWithPredicatedRewrites(self): - grammar = textwrap.dedent( - r''' - grammar T19; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID["root"] ^(ID INT)) | INT -> ^(ID["root"] INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP19; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T19; - rewrite=true; - } - s : ^(ID a) { self.buf += $s.start.toStringTree() }; - a : ^(ID INT) -> {True}? ^(ID["ick"] INT) - -> INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - - self.failUnlessEqual("(root (ick 34))", found) - - - def testWildcardSingleNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID INT -> ^(ID["root"] INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - s : ^(ID c=.) -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - - self.failUnlessEqual("34", found) - - def testWildcardUnlabeledSingleNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ^(ID .) -> ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEquals("abc", found) - - - def testWildcardGrabsSubtree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ^(ID c=.) -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 1 2 3") - self.assertEquals("(1 2 3)", found) - - - def testWildcardGrabsSubtree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : ID x=INT y=INT z=INT -> ID ^($x $y $z); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ID c=. -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 1 2 3") - self.assertEquals("(1 2 3)", found) - - - def testWildcardListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST;} - a : INT INT INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : (c+=.)+ -> $c+ - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "1 2 3") - self.assertEquals("1 2 3", found) - - - def testWildcardListLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python; output=AST; ASTLabelType=CommonTree;} - a : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(INT (c+=.)+) -> $c+ - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "1 2 3") - self.assertEquals("(2 3) (2 3)", found) - - - def testRuleResultAsRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID '=' INT -> ^('=' ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - COLON : ':' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - rewrite=true; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ; - eq : '=' | ':' {pass} ; // bug in set match, doesn't add to tree!! booh. force nonset. - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc = 34") - self.assertEquals("(= 34 abc)", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t052import.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t052import.py deleted file mode 100644 index 89244621..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t052import.py +++ /dev/null @@ -1,1203 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class T(testbase.ANTLRTest): - def setUp(self): - self.oldPath = sys.path[:] - sys.path.insert(0, self.baseDir) - - - def tearDown(self): - sys.path = self.oldPath - - - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, slaves, input): - for slave in slaves: - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - try: - del sys.modules[parserName+'Parser'] - except KeyError: - pass - - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - getattr(parser, grammarEntry)() - - return parser._output - - - def execLexer(self, grammar, slaves, input): - for slave in slaves: - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - try: - del sys.modules[parserName+'Parser'] - except KeyError: - pass - - lexerCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - - while True: - token = lexer.nextToken() - if token is None or token.type == antlr3.EOF: - break - - lexer._output += token.text - - return lexer._output - - - # @Test public void testWildcardStillWorks() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String grammar = - # "parser grammar S;\n" + - # "a : B . C ;\n"; // not qualified ID - # Grammar g = new Grammar(grammar); - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # } - - - def testDelegatorInvokesDelegateRule(self): - slave = textwrap.dedent( - r''' - parser grammar S1; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM1.capture(t) - - } - - a : B { self.capture("S.a") } ; - ''') - - master = textwrap.dedent( - r''' - grammar M1; - options { - language=Python; - } - import S1; - s : a ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.failUnlessEqual("S.a", found) - - - # @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { - # // must generate something like: - # // public int a(int x) throws RecognitionException { return gS.a(x); } - # // in M. - # String slave = - # "parser grammar S;\n" + - # "a : B {System.out.print(\"S.a\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "s : a {System.out.println($a.text);} ;\n" + - # "B : 'b' ;" + // defines B from inherited token space - # "WS : (' '|'\\n') {skip();} ;\n" ; - # String found = execParser("M.g", master, "MParser", "MLexer", - # "s", "b", debug); - # assertEquals("S.ab\n", found); - # } - - - def testDelegatorInvokesDelegateRuleWithArgs(self): - slave = textwrap.dedent( - r''' - parser grammar S2; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM2.capture(t) - } - a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ; - ''') - - master = textwrap.dedent( - r''' - grammar M2; - options { - language=Python; - } - import S2; - s : label=a[3] {self.capture($label.y);} ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.failUnlessEqual("S.a1000", found) - - - def testDelegatorAccessesDelegateMembers(self): - slave = textwrap.dedent( - r''' - parser grammar S3; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM3.capture(t) - - def foo(self): - self.capture("foo") - } - a : B ; - ''') - - master = textwrap.dedent( - r''' - grammar M3; // uses no rules from the import - options { - language=Python; - } - import S3; - s : 'b' {self.gS3.foo();} ; // gS is import pointer - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.failUnlessEqual("foo", found) - - - def testDelegatorInvokesFirstVersionOfDelegateRule(self): - slave = textwrap.dedent( - r''' - parser grammar S4; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM4.capture(t) - } - a : b {self.capture("S.a");} ; - b : B ; - ''') - - slave2 = textwrap.dedent( - r''' - parser grammar T4; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM4.capture(t) - } - a : B {self.capture("T.a");} ; // hidden by S.a - ''') - - master = textwrap.dedent( - r''' - grammar M4; - options { - language=Python; - } - import S4,T4; - s : a ; - B : 'b' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave, slave2], - input="b" - ) - - self.failUnlessEqual("S.a", found) - - - def testDelegatesSeeSameTokenType(self): - slave = textwrap.dedent( - r''' - parser grammar S5; // A, B, C token type order - options { - language=Python; - } - tokens { A; B; C; } - @members { - def capture(self, t): - self.gM5.capture(t) - } - x : A {self.capture("S.x ");} ; - ''') - - slave2 = textwrap.dedent( - r''' - parser grammar T5; - options { - language=Python; - } - tokens { C; B; A; } /// reverse order - @members { - def capture(self, t): - self.gM5.capture(t) - } - y : A {self.capture("T.y");} ; - ''') - - master = textwrap.dedent( - r''' - grammar M5; - options { - language=Python; - } - import S5,T5; - s : x y ; // matches AA, which should be "aa" - B : 'b' ; // another order: B, A, C - A : 'a' ; - C : 'c' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave, slave2], - input="aa" - ) - - self.failUnlessEqual("S.x T.y", found) - - - # @Test public void testDelegatesSeeSameTokenType2() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + // A, B, C token type order - # "tokens { A; B; C; }\n" + - # "x : A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String slave2 = - # "parser grammar T;\n" + - # "tokens { C; B; A; }\n" + // reverse order - # "y : A {System.out.println(\"T.y\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "T.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S,T;\n" + - # "s : x y ;\n" + // matches AA, which should be "aa" - # "B : 'b' ;\n" + // another order: B, A, C - # "A : 'a' ;\n" + - # "C : 'c' ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]"; - # String expectedStringLiteralToTypeMap = "{}"; - # String expectedTypeToTokenList = "[A, B, C, WS]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # } - - # @Test public void testCombinedImportsCombined() throws Exception { - # // for now, we don't allow combined to import combined - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "grammar S;\n" + // A, B, C token type order - # "tokens { A; B; C; }\n" + - # "x : 'x' INT {System.out.println(\"S.x\");} ;\n" + - # "INT : '0'..'9'+ ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "s : x INT ;\n"; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); - # String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S"; - # assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+","")); - # } - - # @Test public void testSameStringTwoNames() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # "tokens { A='a'; }\n" + - # "x : A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String slave2 = - # "parser grammar T;\n" + - # "tokens { X='a'; }\n" + - # "y : X {System.out.println(\"T.y\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "T.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S,T;\n" + - # "s : x y ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # String expectedTokenIDToTypeMap = "[A=4, WS=6, X=5]"; - # String expectedStringLiteralToTypeMap = "{'a'=4}"; - # String expectedTypeToTokenList = "[A, X, WS]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # Object expectedArg = "X='a'"; - # Object expectedArg2 = "A"; - # int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT; - # GrammarSemanticsMessage expectedMessage = - # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2); - # checkGrammarSemanticsError(equeue, expectedMessage); - - # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); - - # String expectedError = - # "error(158): T.g:2:10: cannot alias X='a'; string already assigned to A"; - # assertEquals(expectedError, equeue.errors.get(0).toString()); - # } - - # @Test public void testSameNameTwoStrings() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # "tokens { A='a'; }\n" + - # "x : A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String slave2 = - # "parser grammar T;\n" + - # "tokens { A='x'; }\n" + - # "y : A {System.out.println(\"T.y\");} ;\n"; - - # writeFile(tmpdir, "T.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S,T;\n" + - # "s : x y ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]"; - # String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}"; - # String expectedTypeToTokenList = "[A, WS, T__6]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap)); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # Object expectedArg = "A='x'"; - # Object expectedArg2 = "'a'"; - # int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT; - # GrammarSemanticsMessage expectedMessage = - # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2); - # checkGrammarSemanticsError(equeue, expectedMessage); - - # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); - - # String expectedError = - # "error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'"; - # assertEquals(expectedError, equeue.errors.get(0).toString()); - # } - - # @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # "options {tokenVocab=whatever;}\n" + - # "tokens { A='a'; }\n" + - # "x : A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "s : x ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # Object expectedArg = "S"; - # int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE; - # GrammarSemanticsMessage expectedMessage = - # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg); - # checkGrammarSemanticsWarning(equeue, expectedMessage); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size()); - - # String expectedError = - # "warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S"; - # assertEquals(expectedError, equeue.warnings.get(0).toString()); - # } - - # @Test public void testImportedTokenVocabWorksInRoot() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # "tokens { A='a'; }\n" + - # "x : A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String tokens = - # "A=99\n"; - # writeFile(tmpdir, "Test.tokens", tokens); - - # String master = - # "grammar M;\n" + - # "options {tokenVocab=Test;}\n" + - # "import S;\n" + - # "s : x ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # String expectedTokenIDToTypeMap = "[A=99, WS=101]"; - # String expectedStringLiteralToTypeMap = "{'a'=100}"; - # String expectedTypeToTokenList = "[A, 'a', WS]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # } - - # @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # "options {toke\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "s : x ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # // whole bunch of errors from bad S.g file - # assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size()); - # } - - # @Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar S;\n" + - # ": A {System.out.println(\"S.x\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "s : x ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - - # // whole bunch of errors from bad S.g file - # assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size()); - # } - - - def testDelegatorRuleOverridesDelegate(self): - slave = textwrap.dedent( - r''' - parser grammar S6; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM6.capture(t) - } - a : b {self.capture("S.a");} ; - b : B ; - ''') - - master = textwrap.dedent( - r''' - grammar M6; - options { - language=Python; - } - import S6; - b : 'b'|'c' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 'a', - slaves=[slave], - input="c" - ) - - self.failUnlessEqual("S.a", found) - - - # @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { - # String slave = - # "parser grammar JavaDecl;\n" + - # "type : 'int' ;\n" + - # "decl : type ID ';'\n" + - # " | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" + - # " ;\n" + - # "init : '=' INT ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "JavaDecl.g", slave); - # String master = - # "grammar Java;\n" + - # "import JavaDecl;\n" + - # "prog : decl ;\n" + - # "type : 'int' | 'float' ;\n" + - # "\n" + - # "ID : 'a'..'z'+ ;\n" + - # "INT : '0'..'9'+ ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # // for float to work in decl, type must be overridden - # String found = execParser("Java.g", master, "JavaParser", "JavaLexer", - # "prog", "float x = 3;", debug); - # assertEquals("JavaDecl: floatx=3;\n", found); - # } - - # @Test public void testDelegatorRuleOverridesDelegates() throws Exception { - # String slave = - # "parser grammar S;\n" + - # "a : b {System.out.println(\"S.a\");} ;\n" + - # "b : B ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # String slave2 = - # "parser grammar T;\n" + - # "tokens { A='x'; }\n" + - # "b : B {System.out.println(\"T.b\");} ;\n"; - # writeFile(tmpdir, "T.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S, T;\n" + - # "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # String found = execParser("M.g", master, "MParser", "MLexer", - # "a", "c", debug); - # assertEquals("M.b\n" + - # "S.a\n", found); - # } - - # LEXER INHERITANCE - - def testLexerDelegatorInvokesDelegateRule(self): - slave = textwrap.dedent( - r''' - lexer grammar S7; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM7.capture(t) - } - A : 'a' {self.capture("S.A ");} ; - C : 'c' ; - ''') - - master = textwrap.dedent( - r''' - lexer grammar M7; - options { - language=Python; - } - import S7; - B : 'b' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execLexer( - master, - slaves=[slave], - input="abc" - ) - - self.failUnlessEqual("S.A abc", found) - - - def testLexerDelegatorRuleOverridesDelegate(self): - slave = textwrap.dedent( - r''' - lexer grammar S8; - options { - language=Python; - } - @members { - def capture(self, t): - self.gM8.capture(t) - } - A : 'a' {self.capture("S.A")} ; - ''') - - master = textwrap.dedent( - r''' - lexer grammar M8; - options { - language=Python; - } - import S8; - A : 'a' {self.capture("M.A ");} ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execLexer( - master, - slaves=[slave], - input="a" - ) - - self.failUnlessEqual("M.A a", found) - - # @Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception { - # // M.Tokens has nothing to predict tokens from S. Should - # // not include S.Tokens alt in this case? - # String slave = - # "lexer grammar S;\n" + - # "A : 'a' {System.out.println(\"S.A\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "lexer grammar M;\n" + - # "import S;\n" + - # "A : 'a' {System.out.println(\"M.A\");} ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # writeFile(tmpdir, "/M.g", master); - - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # composite.assignTokenTypes(); - # composite.defineGrammarSymbols(); - # composite.createNFAs(); - # g.createLookaheadDFAs(false); - - # // predict only alts from M not S - # String expectingDFA = - # ".s0-'a'->.s1\n" + - # ".s0-{'\\n', ' '}->:s3=>2\n" + - # ".s1-->:s2=>1\n"; - # org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1); - # FASerializer serializer = new FASerializer(g); - # String result = serializer.serialize(dfa.startState); - # assertEquals(expectingDFA, result); - - # // must not be a "unreachable alt: Tokens" error - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # } - - # @Test public void testInvalidImportMechanism() throws Exception { - # // M.Tokens has nothing to predict tokens from S. Should - # // not include S.Tokens alt in this case? - # String slave = - # "lexer grammar S;\n" + - # "A : 'a' {System.out.println(\"S.A\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "tree grammar M;\n" + - # "import S;\n" + - # "a : A ;"; - # writeFile(tmpdir, "/M.g", master); - - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - - # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); - # assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size()); - - # String expectedError = - # "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S"; - # assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+","")); - # } - - # @Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception { - # // if this compiles, it means that synpred1_S is defined in S.java - # // but not MParser.java. MParser has its own synpred1_M which must - # // be separate to compile. - # String slave = - # "parser grammar S;\n" + - # "a : 'a' {System.out.println(\"S.a1\");}\n" + - # " | 'a' {System.out.println(\"S.a2\");}\n" + - # " ;\n" + - # "b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "grammar M;\n" + - # "options {backtrack=true;}\n" + - # "import S;\n" + - # "start : a b ;\n" + - # "nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M - # "WS : (' '|'\\n') {skip();} ;\n" ; - # String found = execParser("M.g", master, "MParser", "MLexer", - # "start", "ax", debug); - # assertEquals("S.a1\n", found); - # } - - # @Test public void testKeywordVSIDGivesNoWarning() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "lexer grammar S;\n" + - # "A : 'abc' {System.out.println(\"S.A\");} ;\n" + - # "ID : 'a'..'z'+ ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "a : A {System.out.println(\"M.a\");} ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # String found = execParser("M.g", master, "MParser", "MLexer", - # "a", "abc", debug); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); - - # assertEquals("S.A\nM.a\n", found); - # } - - # @Test public void testWarningForUndefinedToken() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "lexer grammar S;\n" + - # "A : 'abc' {System.out.println(\"S.A\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "a : ABC A {System.out.println(\"M.a\");} ;\n" + - # "WS : (' '|'\\n') {skip();} ;\n" ; - # // A is defined in S but M should still see it and not give warning. - # // only problem is ABC. - - # rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size()); - - # String expectedError = - # "warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:3:5: no lexer rule corresponding to token: ABC"; - # assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+","")); - # } - - # /** Make sure that M can import S that imports T. */ - # @Test public void test3LevelImport() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar T;\n" + - # "a : T ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "T.g", slave); - # String slave2 = - # "parser grammar S;\n" + // A, B, C token type order - # "import T;\n" + - # "a : S ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "a : M ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - # g.composite.defineGrammarSymbols(); - - # String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]"; - # String expectedStringLiteralToTypeMap = "{}"; - # String expectedTypeToTokenList = "[T, S, M]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - # boolean ok = - # rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false); - # boolean expecting = true; // should be ok - # assertEquals(expecting, ok); - # } - - # @Test public void testBigTreeOfImports() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar T;\n" + - # "x : T ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "T.g", slave); - # slave = - # "parser grammar S;\n" + - # "import T;\n" + - # "y : S ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - - # slave = - # "parser grammar C;\n" + - # "i : C ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "C.g", slave); - # slave = - # "parser grammar B;\n" + - # "j : B ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "B.g", slave); - # slave = - # "parser grammar A;\n" + - # "import B,C;\n" + - # "k : A ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "A.g", slave); - - # String master = - # "grammar M;\n" + - # "import S,A;\n" + - # "a : M ;\n" ; - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - # g.composite.defineGrammarSymbols(); - - # String expectedTokenIDToTypeMap = "[A=8, B=6, C=7, M=9, S=5, T=4]"; - # String expectedStringLiteralToTypeMap = "{}"; - # String expectedTypeToTokenList = "[T, S, B, C, A, M]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - # boolean ok = - # rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false); - # boolean expecting = true; // should be ok - # assertEquals(expecting, ok); - # } - - # @Test public void testRulesVisibleThroughMultilevelImport() throws Exception { - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String slave = - # "parser grammar T;\n" + - # "x : T ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "T.g", slave); - # String slave2 = - # "parser grammar S;\n" + // A, B, C token type order - # "import T;\n" + - # "a : S ;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave2); - - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "a : M x ;\n" ; // x MUST BE VISIBLE TO M - # writeFile(tmpdir, "M.g", master); - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - # g.composite.defineGrammarSymbols(); - - # String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]"; - # String expectedStringLiteralToTypeMap = "{}"; - # String expectedTypeToTokenList = "[T, S, M]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - # } - - # @Test public void testNestedComposite() throws Exception { - # // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438 - # ErrorQueue equeue = new ErrorQueue(); - # ErrorManager.setErrorListener(equeue); - # String gstr = - # "lexer grammar L;\n" + - # "T1: '1';\n" + - # "T2: '2';\n" + - # "T3: '3';\n" + - # "T4: '4';\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "L.g", gstr); - # gstr = - # "parser grammar G1;\n" + - # "s: a | b;\n" + - # "a: T1;\n" + - # "b: T2;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "G1.g", gstr); - - # gstr = - # "parser grammar G2;\n" + - # "import G1;\n" + - # "a: T3;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "G2.g", gstr); - # String G3str = - # "grammar G3;\n" + - # "import G2;\n" + - # "b: T4;\n" ; - # mkdir(tmpdir); - # writeFile(tmpdir, "G3.g", G3str); - - # Tool antlr = newTool(new String[] {"-lib", tmpdir}); - # CompositeGrammar composite = new CompositeGrammar(); - # Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite); - # composite.setDelegationRoot(g); - # g.parseAndBuildAST(); - # g.composite.assignTokenTypes(); - # g.composite.defineGrammarSymbols(); - - # String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]"; - # String expectedStringLiteralToTypeMap = "{}"; - # String expectedTypeToTokenList = "[T1, T2, T3, T4]"; - - # assertEquals(expectedTokenIDToTypeMap, - # realElements(g.composite.tokenIDToTypeMap).toString()); - # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); - # assertEquals(expectedTypeToTokenList, - # realElements(g.composite.typeToTokenList).toString()); - - # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - # boolean ok = - # rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false); - # boolean expecting = true; // should be ok - # assertEquals(expecting, ok); - # } - - # @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception { - # String slave = - # "parser grammar S;\n" + - # "a : B {System.out.print(\"S.a\");} ;\n"; - # mkdir(tmpdir); - # writeFile(tmpdir, "S.g", slave); - # String master = - # "grammar M;\n" + - # "import S;\n" + - # "@header{package mypackage;}\n" + - # "@lexer::header{package mypackage;}\n" + - # "s : a ;\n" + - # "B : 'b' ;" + // defines B from inherited token space - # "WS : (' '|'\\n') {skip();} ;\n" ; - # boolean ok = antlr("M.g", "M.g", master, debug); - # boolean expecting = true; // should be ok - # assertEquals(expecting, ok); - # } - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t053hetero.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t053hetero.py deleted file mode 100644 index db3e9dbe..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t053hetero.py +++ /dev/null @@ -1,939 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class T(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if r is not None: - return r.tree.toStringTree() - - return "" - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r is not None: - return r.tree.toStringTree() - - return "" - - - # PARSERS -- AUTO AST - - def testToken(self): - grammar = textwrap.dedent( - r''' - grammar T1; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testTokenCommonTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a") - - self.failUnlessEqual("a", found) - - - def testTokenWithQualifiedType(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - @members { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - } - a : ID ; // TParser.V is qualified name - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testNamedType(self): - grammar = textwrap.dedent( - r""" - grammar $T; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - """) - - found = self.execParser(grammar, 'a', input="a") - self.assertEquals("a", found) - - - def testTokenWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T2; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x=ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testTokenWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T3; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x+=ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testTokenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T4; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testTokenRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T5; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x+=ID^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testString(self): - grammar = textwrap.dedent( - r''' - grammar T6; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.failUnlessEqual("begin", found) - - - def testStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T7; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin'^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.failUnlessEqual("begin", found) - - - # PARSERS -- REWRITE AST - - def testRewriteToken(self): - grammar = textwrap.dedent( - r''' - grammar T8; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID -> ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual("a", found) - - - def testRewriteTokenWithArgs(self): - grammar = textwrap.dedent( - r''' - grammar T9; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def __init__(self, *args): - if len(args) == 4: - ttype = args[0] - x = args[1] - y = args[2] - z = args[3] - token = CommonToken(type=ttype, text="") - - elif len(args) == 3: - ttype = args[0] - token = args[1] - x = args[2] - y, z = 0, 0 - - else: - raise TypeError("Invalid args \%r" \% (args,)) - - CommonTree.__init__(self, token) - self.x = x - self.y = y - self.z = z - - def toString(self): - txt = "" - if self.token is not None: - txt += self.token.text - txt +=";\%d\%d\%d" \% (self.x, self.y, self.z) - return txt - __str__ = toString - - } - a : ID -> ID[42,19,30] ID[$ID,99]; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.failUnlessEqual(";421930 a;9900", found) - - - def testRewriteTokenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T10; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a 2" - ) - - self.failUnlessEqual("(a 2)", found) - - - def testRewriteString(self): - grammar = textwrap.dedent( - r''' - grammar T11; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' -> 'begin' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.failUnlessEqual("begin", found) - - - def testRewriteStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T12; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' INT -> ^('begin' INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin 2" - ) - - self.failUnlessEqual("(begin 2)", found) - - def testRewriteRuleResults(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - tokens {LIST;} - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def __init__(self, tokenType, txt): - super(W, self).__init__( - CommonToken(type=tokenType, text=txt)) - - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : id (',' id)* -> ^(LIST["LIST"] id+); - id : ID -> ID; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a,b,c") - - self.failUnlessEqual("(LIST a b c)", found) - - def testCopySemanticsWithHetero(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - @header { - class V(CommonTree): - def dupNode(self): - return V(self) - - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : type ID (',' ID)* ';' -> ^(type ID)+; - type : 'int' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="int a, b, c;") - self.failUnlessEqual("(int a) (int b) (int c)", found) - - # TREE PARSERS -- REWRITE AST - - def testTreeParserRewriteFlatList(self): - grammar = textwrap.dedent( - r''' - grammar T13; - options { - language=Python; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP13; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T13; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> INT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc 34" - ) - - self.failUnlessEqual("34 abc", found) - - - def testTreeParserRewriteTree(self): - grammar = textwrap.dedent( - r''' - grammar T14; - options { - language=Python; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP14; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T14; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> ^(INT ID) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc 34" - ) - - self.failUnlessEqual("(34 abc)", found) - - - def testTreeParserRewriteImaginary(self): - grammar = textwrap.dedent( - r''' - grammar T15; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP15; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T15; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType): - CommonTree.__init__(self, CommonToken(tokenType)) - - def toString(self): - return tokenNames[self.token.type] + "" - __str__ = toString - - - } - a : ID -> ROOT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.failUnlessEqual("ROOT abc", found) - - - def testTreeParserRewriteImaginaryWithArgs(self): - grammar = textwrap.dedent( - r''' - grammar T16; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP16; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T16; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType, x): - CommonTree.__init__(self, CommonToken(tokenType)) - self.x = x - - def toString(self): - return tokenNames[self.token.type] + ";" + str(self.x) - __str__ = toString - - } - a : ID -> ROOT[42] ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.failUnlessEqual("ROOT;42 abc", found) - - - def testTreeParserRewriteImaginaryRoot(self): - grammar = textwrap.dedent( - r''' - grammar T17; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP17; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T17; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType): - CommonTree.__init__(self, CommonToken(tokenType)) - - def toString(self): - return tokenNames[self.token.type] + "" - __str__ = toString - - } - a : ID -> ^(ROOT ID) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.failUnlessEqual("(ROOT abc)", found) - - - def testTreeParserRewriteImaginaryFromReal(self): - grammar = textwrap.dedent( - r''' - grammar T18; - options { - language=Python; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP18; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T18; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType, tree=None): - if tree is None: - CommonTree.__init__(self, CommonToken(tokenType)) - else: - CommonTree.__init__(self, tree) - self.token.type = tokenType - - def toString(self): - return tokenNames[self.token.type]+"@"+str(self.token.line) - __str__ = toString - - } - a : ID -> ROOT[$ID] - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.failUnlessEqual("ROOT@1", found) - - - def testTreeParserAutoHeteroAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ID ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def toString(self): - return CommonTree.toString(self) + "" - __str__ = toString - - } - - a : ID ';'; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc;" - ) - - self.failUnlessEqual("abc ;", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t054main.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t054main.py deleted file mode 100644 index bb26510c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t054main.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- - -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys -from StringIO import StringIO - -class T(testbase.ANTLRTest): - def setUp(self): - self.oldPath = sys.path[:] - sys.path.insert(0, self.baseDir) - - - def tearDown(self): - sys.path = self.oldPath - - - def testOverrideMain(self): - grammar = textwrap.dedent( - r"""lexer grammar T3; - options { - language = Python; - } - - @main { - def main(argv): - raise RuntimeError("no") - } - - ID: ('a'..'z' | '\u00c0'..'\u00ff')+; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - try: - lexerMod.main( - ['lexer.py'] - ) - self.fail() - except RuntimeError: - pass - - - def testLexerFromFile(self): - input = "foo bar" - inputPath = self.writeFile("input.txt", input) - - grammar = textwrap.dedent( - r"""lexer grammar T1; - options { - language = Python; - } - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py', inputPath], - stdout=stdout - ) - - self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3) - - - def testLexerFromStdIO(self): - input = "foo bar" - - grammar = textwrap.dedent( - r"""lexer grammar T2; - options { - language = Python; - } - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py'], - stdin=StringIO(input), - stdout=stdout - ) - - self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3) - - - def testLexerEncoding(self): - input = u"föö bär".encode('utf-8') - - grammar = textwrap.dedent( - r"""lexer grammar T3; - options { - language = Python; - } - - ID: ('a'..'z' | '\u00c0'..'\u00ff')+; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py', '--encoding', 'utf-8'], - stdin=StringIO(input), - stdout=stdout - ) - - self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3) - - - def testCombined(self): - input = "foo bar" - - grammar = textwrap.dedent( - r"""grammar T4; - options { - language = Python; - } - - r returns [res]: (ID)+ EOF { $res = $text; }; - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - parserMod.main( - ['combined.py', '--rule', 'r'], - stdin=StringIO(input), - stdout=stdout - ) - - stdout = stdout.getvalue() - self.failUnlessEqual(len(stdout.splitlines()), 1, stdout) - - - def testCombinedOutputAST(self): - input = "foo + bar" - - grammar = textwrap.dedent( - r"""grammar T5; - options { - language = Python; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN; }; - """) - - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - parserMod.main( - ['combined.py', '--rule', 'r'], - stdin=StringIO(input), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.failUnlessEqual(stdout, "(+ foo bar)") - - - def testTreeParser(self): - grammar = textwrap.dedent( - r'''grammar T6; - options { - language = Python; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN; }; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar T6Walker; - options { - language=Python; - ASTLabelType=CommonTree; - tokenVocab=T6; - } - r returns [res]: ^(OP a=ID b=ID) - { $res = "\%s \%s \%s" \% ($a.text, $OP.text, $b.text) } - ; - ''') - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True) - - stdout = StringIO() - walkerMod.main( - ['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'], - stdin=StringIO("a+b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.failUnlessEqual(stdout, "u'a + b'") - - - def testTreeParserRewrite(self): - grammar = textwrap.dedent( - r'''grammar T7; - options { - language = Python; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN; }; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar T7Walker; - options { - language=Python; - ASTLabelType=CommonTree; - tokenVocab=T7; - output=AST; - } - tokens { - ARG; - } - r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID)); - ''') - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True) - - stdout = StringIO() - walkerMod.main( - ['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'], - stdin=StringIO("a+b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.failUnlessEqual(stdout, "(+ (ARG a) (ARG b))") - - - - def testGrammarImport(self): - slave = textwrap.dedent( - r''' - parser grammar T8S; - options { - language=Python; - } - - a : B; - ''') - - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - try: - del sys.modules[parserName+'Parser'] - except KeyError: - pass - - master = textwrap.dedent( - r''' - grammar T8M; - options { - language=Python; - } - import T8S; - s returns [res]: a { $res = $a.text }; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True) - parserMod.main( - ['import.py', '--rule', 's'], - stdin=StringIO("b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.failUnlessEqual(stdout, "u'b'") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t055templates.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t055templates.py deleted file mode 100644 index 5090b01b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t055templates.py +++ /dev/null @@ -1,508 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import stringtemplate3 -import testbase -import sys -import os -from StringIO import StringIO - -class T(testbase.ANTLRTest): - def execParser(self, grammar, grammarEntry, input, group=None): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - if group is not None: - parser.templateLib = group - result = getattr(parser, grammarEntry)() - if result.st is not None: - return result.st.toString() - return None - - - def testInlineTemplate(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a : ID INT - -> template(id={$ID.text}, int={$INT.text}) - "id=, int=" - ; - - ID : 'a'..'z'+; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("id=abc, int=34", found) - - - def testExternalTemplate(self): - templates = textwrap.dedent( - '''\ - group T; - expr(args, op) ::= << - [}>] - >> - ''' - ) - - group = stringtemplate3.StringTemplateGroup( - file=StringIO(templates), - lexer='angle-bracket' - ) - - grammar = textwrap.dedent( - r'''grammar T2; - options { - language=Python; - output=template; - } - a : r+=arg OP r+=arg - -> expr(op={$OP.text}, args={$r}) - ; - arg: ID -> template(t={$ID.text}) ""; - - ID : 'a'..'z'+; - OP: '+'; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "a + b", - group - ) - - self.failUnlessEqual("[a+b]", found) - - - def testEmptyTemplate(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a : ID INT - -> - ; - - ID : 'a'..'z'+; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc 34" - ) - - self.failUnless(found is None) - - - def testList(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a: (r+=b)* EOF - -> template(r={$r}) - "" - ; - - b: ID - -> template(t={$ID.text}) "" - ; - - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc def ghi" - ) - - self.failUnlessEqual("abc,def,ghi", found) - - - def testAction(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a: ID - -> { stringtemplate3.StringTemplate("hello") } - ; - - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc" - ) - - self.failUnlessEqual("hello", found) - - - def testTemplateExpressionInAction(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a: ID - { $st = %{"hello"} } - ; - - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc" - ) - - self.failUnlessEqual("hello", found) - - - def testTemplateExpressionInAction2(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a: ID - { - res = %{"hello "} - %res.foo = "world"; - } - -> { res } - ; - - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc" - ) - - self.failUnlessEqual("hello world", found) - - - def testIndirectTemplateConstructor(self): - templates = textwrap.dedent( - '''\ - group T; - expr(args, op) ::= << - [}>] - >> - ''' - ) - - group = stringtemplate3.StringTemplateGroup( - file=StringIO(templates), - lexer='angle-bracket' - ) - - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python; - output=template; - } - a: ID - { - $st = %({"expr"})(args={[1, 2, 3]}, op={"+"}) - } - ; - - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc", - group - ) - - self.failUnlessEqual("[1+2+3]", found) - - - def testPredicates(self): - grammar = textwrap.dedent( - r'''grammar T3; - options { - language=Python; - output=template; - } - a : ID INT - -> {$ID.text=='a'}? template(int={$INT.text}) - "A: " - -> {$ID.text=='b'}? template(int={$INT.text}) - "B: " - -> template(int={$INT.text}) - "C: " - ; - - ID : 'a'..'z'+; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "b 34" - ) - - self.failUnlessEqual("B: 34", found) - - - def testBacktrackingMode(self): - grammar = textwrap.dedent( - r'''grammar T4; - options { - language=Python; - output=template; - backtrack=true; - } - a : (ID INT)=> ID INT - -> template(id={$ID.text}, int={$INT.text}) - "id=, int=" - ; - - ID : 'a'..'z'+; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''' - ) - - found = self.execParser( - grammar, 'a', - "abc 34" - ) - - self.failUnlessEqual("id=abc, int=34", found) - - - def testRewrite(self): - grammar = textwrap.dedent( - r'''grammar T5; - options { - language=Python; - output=template; - rewrite=true; - } - - prog: stat+; - - stat - : 'if' '(' expr ')' stat - | 'return' return_expr ';' - | '{' stat* '}' - | ID '=' expr ';' - ; - - return_expr - : expr - -> template(t={$text}) <)>> - ; - - expr - : ID - | INT - ; - - ID: 'a'..'z'+; - INT: '0'..'9'+; - WS: (' '|'\n')+ {$channel=HIDDEN;} ; - COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ; - ''' - ) - - input = textwrap.dedent( - '''\ - if ( foo ) { - b = /* bla */ 2; - return 1 /* foo */; - } - - /* gnurz */ - return 12; - ''' - ) - - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - parser = parserCls(tStream) - result = parser.prog() - - found = tStream.toString() - - expected = textwrap.dedent( - '''\ - if ( foo ) { - b = /* bla */ 2; - return boom(1) /* foo */; - } - - /* gnurz */ - return boom(12); - ''' - ) - - self.failUnlessEqual(expected, found) - - - def testTreeRewrite(self): - grammar = textwrap.dedent( - r'''grammar T6; - options { - language=Python; - output=AST; - } - - tokens { - BLOCK; - ASSIGN; - } - - prog: stat+; - - stat - : IF '(' e=expr ')' s=stat - -> ^(IF $e $s) - | RETURN expr ';' - -> ^(RETURN expr) - | '{' stat* '}' - -> ^(BLOCK stat*) - | ID '=' expr ';' - -> ^(ASSIGN ID expr) - ; - - expr - : ID - | INT - ; - - IF: 'if'; - RETURN: 'return'; - ID: 'a'..'z'+; - INT: '0'..'9'+; - WS: (' '|'\n')+ {$channel=HIDDEN;} ; - COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ; - ''' - ) - - treeGrammar = textwrap.dedent( - r'''tree grammar T6Walker; - options { - language=Python; - tokenVocab=T6; - ASTLabelType=CommonTree; - output=template; - rewrite=true; - } - - prog: stat+; - - stat - : ^(IF expr stat) - | ^(RETURN return_expr) - | ^(BLOCK stat*) - | ^(ASSIGN ID expr) - ; - - return_expr - : expr - -> template(t={$text}) <)>> - ; - - expr - : ID - | INT - ; - ''' - ) - - input = textwrap.dedent( - '''\ - if ( foo ) { - b = /* bla */ 2; - return 1 /* foo */; - } - - /* gnurz */ - return 12; - ''' - ) - - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - parser = parserCls(tStream) - tree = parser.prog().tree - nodes = antlr3.tree.CommonTreeNodeStream(tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - walker.prog() - - found = tStream.toString() - - expected = textwrap.dedent( - '''\ - if ( foo ) { - b = /* bla */ 2; - return boom(1) /* foo */; - } - - /* gnurz */ - return boom(12); - ''' - ) - - self.failUnlessEqual(expected, found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t056lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t056lexer.py deleted file mode 100644 index a53f92a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t056lexer.py +++ /dev/null @@ -1,49 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import stringtemplate3 -import testbase -import sys -import os -from StringIO import StringIO - -# FIXME: port other tests from TestLexer.java - -class T(testbase.ANTLRTest): - def execParser(self, grammar, grammarEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - result = getattr(parser, grammarEntry)() - return result - - - def testRefToRuleDoesNotSetChannel(self): - # this must set channel of A to HIDDEN. $channel is local to rule - # like $type. - grammar = textwrap.dedent( - r''' - grammar P; - options { - language=Python; - } - a returns [foo]: A EOF { $foo = '\%s, channel=\%d' \% ($A.text, $A.channel); } ; - A : '-' WS I ; - I : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - "- 34" - ) - - self.failUnlessEqual("- 34, channel=0", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t057autoAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t057autoAST.py deleted file mode 100644 index e5c1d35d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t057autoAST.py +++ /dev/null @@ -1,1005 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class TestAutoAST(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input, expectErrors=False): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if not expectErrors: - self.assertEquals(len(parser._errors), 0, parser._errors) - - result = "" - - if r is not None: - if hasattr(r, 'result'): - result += r.result - - if r.tree is not None: - result += r.tree.toStringTree() - - if not expectErrors: - return result - - else: - return result, parser._errors - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r is not None: - return r.tree.toStringTree() - - return "" - - - def testTokenList(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;}; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("abc 34", found); - - - def testTokenListInSingleAltBlock(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : (ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar,"a", "abc 34") - self.assertEquals("abc 34", found) - - - def testSimpleRootAtOuterLevel(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("(abc 34)", found) - - - def testSimpleRootAtOuterLevelReverse(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : INT ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34 abc") - self.assertEquals("(abc 34)", found) - - - def testBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT! ID! INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34 dag 4532") - self.assertEquals("abc 4532", found) - - - def testOptionalThenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ( ID INT )? ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 1 b") - self.assertEquals("(b a 1)", found) - - - def testLabeledStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void'^ ID ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("(void foo ;)", found) - - - def testWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void'^ . ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("(void foo ;)", found) - - - def testWildcardRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void' .^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("(foo void ;)", found) - - - def testWildcardRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void' x=.^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("(foo void ;)", found) - - - def testWildcardRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void' x=.^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("(foo void ;)", found) - - - def testWildcardBangWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : v='void' x=.! ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEquals("void ;", found) - - - def testRootRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID^ INT^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 34 c") - self.assertEquals("(34 a c)", found) - - - def testRootRoot2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT^ ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 34 c") - self.assertEquals("(c (34 a))", found) - - - def testRootThenRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID^ (INT '*'^ ID)+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 34 * b 9 * c") - self.assertEquals("(* (* (a 34) b 9) c)", found) - - - def testNestedSubrule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'void' (({pass}ID|INT) ID | 'null' ) ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void a b;") - self.assertEquals("void a b ;", found) - - - def testInvokeRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : type ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEquals("int a", found) - - - def testInvokeRuleAsRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEquals("(int a)", found) - - - def testInvokeRuleAsRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x=type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEquals("(int a)", found) - - - def testInvokeRuleAsRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x+=type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEquals("(int a)", found) - - - def testRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID ('+'^ ID)* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a+b+c+d") - self.assertEquals("(+ (+ (+ a b) c) d)", found) - - - def testRuleInvocationRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID (op^ ID)* ; - op : {pass}'+' | '-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a+b+c-d") - self.assertEquals("(- (+ (+ a b) c) d)", found) - - - def testTailRecursion(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - s : a ; - a : atom ('exp'^ a)? ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "s", "3 exp 4 exp 5") - self.assertEquals("(exp 3 (exp 4 5))", found) - - - def testSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID|INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testSetRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ('+' | '-')^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "+abc") - self.assertEquals("(+ abc)", found) - - - @testbase.broken( - "FAILS until antlr.g rebuilt in v3", testbase.GrammarCompileError) - def testSetRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x=('+' | '-')^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "+abc") - self.assertEquals("(+ abc)", found) - - - def testSetAsRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID (('+'|'-')^ ID)* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a+b-c") - self.assertEquals("(- (+ a b) c)", found) - - - def testNotSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEquals("34 + 2", found) - - - def testNotSetWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x=~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEquals("34 + 2", found) - - - def testNotSetWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x=~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEquals("34 + 2", found) - - - def testNotSetRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEquals("(34 55)", found) - - - def testNotSetRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEquals("(34 55)", found) - - - def testNotSetRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEquals("(34 55)", found) - - - def testNotSetRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : INT (~INT^ INT)* ; - blort : '+' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "3+4+5") - self.assertEquals("(+ (+ 3 4) 5)", found) - - - @testbase.broken("FIXME: What happened to the semicolon?", AssertionError) - def testTokenLabelReuse(self): - # check for compilation problem due to multiple defines - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result] : id=ID id=ID {$result = "2nd id="+$id.text+";";} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("2nd id=b;a b", found) - - - def testTokenLabelReuse2(self): - # check for compilation problem due to multiple defines - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result]: id=ID id=ID^ {$result = "2nd id="+$id.text+',';} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("2nd id=b,(b a)", found) - - - def testTokenListLabelReuse(self): - # check for compilation problem due to multiple defines - # make sure ids has both ID tokens - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result] : ids+=ID ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - expecting = "id list=[a,b],a b" - self.assertEquals(expecting, found) - - - def testTokenListLabelReuse2(self): - # check for compilation problem due to multiple defines - # make sure ids has both ID tokens - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result] : ids+=ID^ ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - expecting = "id list=[a,b],(a b)" - self.assertEquals(expecting, found) - - - def testTokenListLabelRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : id+=ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a", found) - - - def testTokenListLabelBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : id+=ID! ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("", found) - - - def testRuleListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result]: x+=b x+=b { - t=$x[1] - $result = "2nd x="+t.toStringTree()+','; - }; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("2nd x=b,a b", found) - - - def testRuleListLabelRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result] : ( x+=b^ )+ { - $result = "x="+$x[1].toStringTree()+','; - } ; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("x=(b a),(b a)", found) - - - def testRuleListLabelBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a returns [result] : x+=b! x+=b { - $result = "1st x="+$x[0].toStringTree()+','; - } ; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("1st x=a,b", found) - - - def testComplicatedMelange(self): - # check for compilation problem - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : A b=B b=B c+=C c+=C D {s = $D.text} ; - A : 'a' ; - B : 'b' ; - C : 'c' ; - D : 'd' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b b c c d") - self.assertEquals("a b b c c d", found) - - - def testReturnValueWithAST(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a returns [result] : ID b { $result = str($b.i) + '\n';} ; - b returns [i] : INT {$i=int($INT.text);} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("34\nabc 34", found) - - - def testSetLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { language=Python;output=AST; } - r : (INT|ID)+ ; - ID : 'a'..'z' + ; - INT : '0'..'9' +; - WS: (' ' | '\n' | '\\t')+ {$channel = HIDDEN;}; - ''') - - found = self.execParser(grammar, "r", "abc 34 d") - self.assertEquals("abc 34 d", found) - - - def testExtraTokenInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int 34 x=1;", - expectErrors=True) - self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"], - errors) - self.assertEquals("(int x 1)", found) # tree gets correct x and 1 tokens - - - def testMissingIDInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - tokens {EXPR;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int =1;", - expectErrors=True) - self.assertEquals(["line 1:4 missing ID at u'='"], errors) - self.assertEquals("(int 1)", found) # tree gets invented ID token - - - def testMissingSetInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - tokens {EXPR;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "x=1;", - expectErrors=True) - self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"], errors) - self.assertEquals("( x 1)", found) # tree gets invented ID token - - - def testMissingTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID INT ; // follow is EOF - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", expectErrors=True) - self.assertEquals(["line 1:3 missing INT at ''"], errors) - self.assertEquals("abc ", found) - - - def testMissingTokenGivesErrorNodeInInvokedRule(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b ; - b : ID INT ; // follow should see EOF - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", expectErrors=True) - self.assertEquals(["line 1:3 mismatched input '' expecting INT"], errors) - self.assertEquals(", resync=abc>", found) - - - def testExtraTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc ick 34", - expectErrors=True) - self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"], - errors) - self.assertEquals("abc 34", found) - - - def testMissingFirstTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - self.assertEquals(["line 1:0 missing ID at u'34'"], errors) - self.assertEquals(" 34", found) - - - def testMissingFirstTokenGivesErrorNode2(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEquals(["line 1:0 missing ID at u'34'"], errors) - self.assertEquals(" 34", found) - - - def testNoViableAltGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b | c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - S : '*' ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "*", expectErrors=True) - self.assertEquals(["line 1:0 no viable alternative at input u'*'"], - errors) - self.assertEquals(",1:0], resync=*>", - found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t058rewriteAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t058rewriteAST.py deleted file mode 100644 index 15036f47..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t058rewriteAST.py +++ /dev/null @@ -1,1517 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class TestRewriteAST(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - base.__init__(self, *args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input, expectErrors=False): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if not expectErrors: - self.assertEquals(len(parser._errors), 0, parser._errors) - - result = "" - - if r is not None: - if hasattr(r, 'result'): - result += r.result - - if r.tree is not None: - result += r.tree.toStringTree() - - if not expectErrors: - return result - - else: - return result, parser._errors - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r is not None: - return r.tree.toStringTree() - - return "" - - - def testDelete(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("", found) - - - def testSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testSingleTokenToNewNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> ID["x"]; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("x", found) - - - def testSingleTokenToNewNodeRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> ^(ID["x"] INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("(x INT)", found) - - - def testSingleTokenToNewNode2(self): - # Allow creation of new nodes w/o args. - grammar = textwrap.dedent( - r''' - grammar TT; - options {language=Python;output=AST;} - a : ID -> ID[ ]; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("ID", found) - - - def testSingleCharLiteral(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'c' -> 'c'; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "c") - self.assertEquals("c", found) - - - def testSingleStringLiteral(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'ick' -> 'ick'; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "ick") - self.assertEquals("ick", found) - - - def testSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : b -> b; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testReorderTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> INT ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("34 abc", found) - - - def testReorderTokenAndRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : b INT -> INT b; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("34 abc", found) - - - def testTokenTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> ^(INT ID); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("(34 abc)", found) - - - def testTokenTreeAfterOtherStuff(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'void' ID INT -> 'void' ^(INT ID); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "void abc 34") - self.assertEquals("void (34 abc)", found) - - - def testNestedTokenTreeWithOuterLoop(self): - # verify that ID and INT both iterate over outer index variable - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {DUH;} - a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 1 b 2") - self.assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))", found) - - - def testOptionalSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> ID? ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testClosureSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID ID -> ID* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testPositiveClosureSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID ID -> ID+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptionalSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : b -> b?; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testClosureSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : b b -> b*; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testClosureOfLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x+=b x+=b -> $x*; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptionalLabelNoListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : (x=ID)? -> $x?; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a", found) - - - def testPositiveClosureSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : b b -> b+; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testSinglePredicateT(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> {True}? ID -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("abc", found) - - - def testSinglePredicateF(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID -> {False}? ID -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEquals("", found) - - - def testMultiplePredicate(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> {False}? ID - -> {True}? INT - -> - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 2") - self.assertEquals("2", found) - - - def testMultiplePredicateTrees(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID INT -> {False}? ^(ID INT) - -> {True}? ^(INT ID) - -> ID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 2") - self.assertEquals("(2 a)", found) - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : op INT -> ^(op INT); - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "-34") - self.assertEquals("(- 34)", found) - - - def testSimpleTree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : op INT -> ^(INT op); - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "+ 34") - self.assertEquals("(34 +)", found) - - - - def testNestedTrees(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "var a:int; b:float;") - self.assertEquals("(var (: a int) (: b float))", found) - - - def testImaginaryTokenCopy(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {VAR;} - a : ID (',' ID)*-> ^(VAR ID)+ ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c") - self.assertEquals("(VAR a) (VAR b) (VAR c)", found) - - - def testTokenUnreferencedOnLeftButDefined(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {VAR;} - a : b -> ID ; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("ID", found) - - - def testImaginaryTokenCopySetText(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {VAR;} - a : ID (',' ID)*-> ^(VAR["var"] ID)+ ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c") - self.assertEquals("(var a) (var b) (var c)", found) - - - def testImaginaryTokenNoCopyFromToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "{a b c}") - self.assertEquals("({ a b c)", found) - - - def testImaginaryTokenNoCopyFromTokenSetText(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : lc='{' ID+ '}' -> ^(BLOCK[$lc,"block"] ID+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "{a b c}") - self.assertEquals("(block a b c)", found) - - - def testMixedRewriteAndAutoAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : b b^ ; // 2nd b matches only an INT; can make it root - b : ID INT -> INT ID - | INT - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 1 2") - self.assertEquals("(2 1 a)", found) - - - def testSubruleWithRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : b b ; - b : (ID INT -> INT ID | INT INT -> INT+ ) - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a 1 2 3") - self.assertEquals("1 a 2 3", found) - - - def testSubruleWithRewrite2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {TYPE;} - a : b b ; - b : 'int' - ( ID -> ^(TYPE 'int' ID) - | ID '=' INT -> ^(TYPE 'int' ID INT) - ) - ';' - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a; int b=3;") - self.assertEquals("(TYPE int a) (TYPE int b 3)", found) - - - def testNestedRewriteShutsOffAutoAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : b b ; - b : ID ( ID (last=ID -> $last)+ ) ';' // get last ID - | INT // should still get auto AST construction - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b c d; 42") - self.assertEquals("d 42", found) - - - def testRewriteActions(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : atom -> ^({self.adaptor.create(INT,"9")} atom) ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEquals("(9 3)", found) - - - def testRewriteActions2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : atom -> {self.adaptor.create(INT,"9")} atom ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEquals("9 3", found) - - - def testRefToOldValue(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "3+4+5") - self.assertEquals("(+ (+ 3 4) 5)", found) - - - def testCopySemanticsForRules(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom) - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEquals("(3 3)", found) - - - def testCopySemanticsForRules2(self): - # copy type as a root for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : type ID (',' ID)* ';' -> ^(type ID)+ ; - type : 'int' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEquals("(int a) (int b) (int c)", found) - - - def testCopySemanticsForRules3(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEquals("(int public a) (int public b) (int public c)", found) - - - def testCopySemanticsForRules3Double(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)", found) - - - def testCopySemanticsForRules4(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {MOD;} - a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)", found) - - - def testCopySemanticsLists(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {MOD;} - a : ID (',' ID)* ';' -> ID+ ID+ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c;") - self.assertEquals("a b c a b c", found) - - - def testCopyRuleLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=b -> $x $x; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a a", found) - - - def testCopyRuleLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=b -> ^($x $x); - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("(a a)", found) - - - def testQueueingOfTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEquals("(int a b c)", found) - - - def testCopyOfTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'int' ID ';' -> 'int' ID 'int' ID ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a;") - self.assertEquals("int a int a", found) - - - def testTokenCopyInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEquals("(int a) (int b) (int c)", found) - - - def testTokenCopyInLoopAgainstTwoOthers(self): - # must smear 'int' copies across as root of multiple trees - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "int a:1,b:2,c:3;") - self.assertEquals("(int a 1) (int b 2) (int c 3)", found) - - - def testListRefdOneAtATime(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID+ -> ID ID ID ; // works if 3 input IDs - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b c") - self.assertEquals("a b c", found) - - - def testSplitListWithLabels(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {VAR;} - a : first=ID others+=ID* -> $first VAR $others+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b c") - self.assertEquals("a VAR b c", found) - - - def testComplicatedMelange(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : A A b=B B b=B c+=C C c+=C D {s=$D.text} -> A+ B+ C+ D ; - type : 'int' | 'float' ; - A : 'a' ; - B : 'b' ; - C : 'c' ; - D : 'd' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a a b b b c c c d") - self.assertEquals("a a b b b c c c d", found) - - - def testRuleLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=b -> $x; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a", found) - - - def testAmbiguousRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID a -> a | INT ; - ID : 'a'..'z'+ ; - INT: '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, - "a", "abc 34") - self.assertEquals("34", found) - - - def testRuleListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> $x+; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testRuleListLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> $x $x*; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptional(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=b (y=b)? -> $x $y?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a", found) - - - def testOptional2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=ID (y=b)? -> $x $y?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptional3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x=ID (y=b)? -> ($x $y)?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptional4(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x+=ID (y=b)? -> ($x $y)?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("a b", found) - - - def testOptional5(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : ID -> ID? ; // match an ID to optional ID - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEquals("a", found) - - - def testArbitraryExprType(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> {CommonTree(None)}; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEquals("", found) - - - def testSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a: (INT|ID)+ -> INT+ ID+ ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "2 a 34 de") - self.assertEquals("2 34 a de", found) - - - def testSet2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a: (INT|ID) -> INT? ID? ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "2") - self.assertEquals("2", found) - - - @testbase.broken("http://www.antlr.org:8888/browse/ANTLR-162", - antlr3.tree.RewriteEmptyStreamException) - def testSetWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : x=(INT|ID) -> $x ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "2") - self.assertEquals("2", found) - - - def testRewriteAction(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens { FLOAT; } - r - : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text+".0"))} - ; - INT : '0'..'9'+; - WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; - ''') - - found = self.execParser(grammar, "r", "25") - self.assertEquals("25.0", found) - - - def testOptionalSubruleWithoutRealElements(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python;output=AST;} - tokens {PARMS;} - - modulo - : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?) - ; - parms : '#'|ID; - ID : ('a'..'z' | 'A'..'Z')+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - found = self.execParser(grammar, "modulo", "modulo abc (x y #)") - self.assertEquals("(modulo abc (PARMS x y #))", found) - - - ## C A R D I N A L I T Y I S S U E S - - def testCardinality(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - tokens {BLOCK;} - a : ID ID INT INT INT -> (ID INT)+; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - try: - self.execParser(grammar, "a", "a b 3 4 5") - self.fail() - except antlr3.tree.RewriteCardinalityException: - pass - - - def testCardinality2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID+ -> ID ID ID ; // only 2 input IDs - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - try: - self.execParser(grammar, "a", "a b") - self.fail() - except antlr3.tree.RewriteCardinalityException: - pass - - - def testCardinality3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID? INT -> ID INT ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - try: - self.execParser(grammar, "a", "3") - self.fail() - except antlr3.tree.RewriteEmptyStreamException: - pass - - - def testLoopCardinality(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID? INT -> ID+ INT ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - try: - self.execParser(grammar, "a", "3") - self.fail() - except antlr3.tree.RewriteEarlyExitException: - pass - - - def testWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python;output=AST;} - a : ID c=. -> $c; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEquals("34", found) - - - # E R R O R S - - def testExtraTokenInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int 34 x=1;", - expectErrors=True) - self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"], - errors) - self.assertEquals("(EXPR int x 1)", found) # tree gets correct x and 1 tokens - - - #@testbase.broken("FIXME", AssertionError) - def testMissingIDInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int =1;", - expectErrors=True) - self.assertEquals(["line 1:4 missing ID at u'='"], errors) - self.assertEquals("(EXPR int 1)", found) # tree gets invented ID token - - - def testMissingSetInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "decl", "x=1;", - expectErrors=True) - self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"], - errors); - self.assertEquals("(EXPR x 1)", found) # tree gets invented ID token - - - def testMissingTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID INT -> ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", - expectErrors=True) - self.assertEquals(["line 1:3 missing INT at ''"], errors) - # doesn't do in-line recovery for sets (yet?) - self.assertEquals("abc ", found) - - - def testExtraTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b c -> b c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc ick 34", - expectErrors=True) - self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"], - errors) - self.assertEquals("abc 34", found) - - - #@testbase.broken("FIXME", AssertionError) - def testMissingFirstTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : ID INT -> ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - self.assertEquals(["line 1:0 missing ID at u'34'"], errors) - self.assertEquals(" 34", found) - - - #@testbase.broken("FIXME", AssertionError) - def testMissingFirstTokenGivesErrorNode2(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b c -> b c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEquals(["line 1:0 missing ID at u'34'"], errors) - self.assertEquals(" 34", found) - - - def testNoViableAltGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python;output=AST;} - a : b -> b | c -> c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - S : '*' ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found, errors = self.execParser(grammar, "a", "*", expectErrors=True) - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEquals(["line 1:0 no viable alternative at input u'*'"], - errors); - self.assertEquals(",1:0], resync=*>", - found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t059debug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t059debug.py deleted file mode 100644 index 1b620d1e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t059debug.py +++ /dev/null @@ -1,783 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import antlr3.debug -import testbase -import sys -import threading -import socket -import errno -import time - -class Debugger(threading.Thread): - def __init__(self, port): - super(Debugger, self).__init__() - self.events = [] - self.success = False - self.port = port - - def run(self): - # create listening socket - s = None - tstart = time.time() - while time.time() - tstart < 10: - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect(('127.0.0.1', self.port)) - break - except socket.error, exc: - if exc.args[0] != errno.ECONNREFUSED: - raise - time.sleep(0.1) - - if s is None: - self.events.append(['nosocket']) - return - - s.setblocking(1) - s.settimeout(10.0) - - output = s.makefile('w', 0) - input = s.makefile('r', 0) - - try: - # handshake - l = input.readline().strip() - assert l == 'ANTLR 2' - l = input.readline().strip() - assert l.startswith('grammar "') - - output.write('ACK\n') - output.flush() - - while True: - event = input.readline().strip() - self.events.append(event.split('\t')) - - output.write('ACK\n') - output.flush() - - if event == 'terminate': - self.success = True - break - - except socket.timeout: - self.events.append(['timeout']) - except socket.error, exc: - self.events.append(['socketerror', exc.args]) - - s.close() - - -class T(testbase.ANTLRTest): - def execParser(self, grammar, grammarEntry, input, listener, - parser_args={}): - if listener is None: - port = 49100 - debugger = Debugger(port) - debugger.start() - # TODO(pink): install alarm, so it doesn't hang forever in case of a bug - - else: - port = None - - try: - lexerCls, parserCls = self.compileInlineGrammar( - grammar, options='-debug') - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream, dbg=listener, port=port, **parser_args) - getattr(parser, grammarEntry)() - - finally: - if listener is None: - debugger.join() - return debugger - - def testBasicParser(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - listener = antlr3.debug.RecordDebugEventListener() - - self.execParser( - grammar, 'a', - input="a", - listener=listener) - - # We only check that some LT events are present. How many is subject - # to change (at the time of writing there are two, which is one too - # many). - lt_events = [event for event in listener.events - if event.startswith("LT ")] - self.assertNotEqual(lt_events, []) - - # For the rest, filter out LT events to get a reliable test. - expected = ["enterRule a", - "location 6:1", - "location 6:5", - "location 6:8", - "location 6:11", - "exitRule a"] - found = [event for event in listener.events - if not event.startswith("LT ")] - self.assertListEqual(found, expected) - - def testSocketProxy(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '8'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['consumeToken', '-1', '-1', '0', '1', '1', '"'], - ['location', '6', '11'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - def testRecognitionException(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a b", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '5', '99', '1', '1', '"'], - ['location', '6', '8'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '2', '-1', '-1', '0', '1', '3', '"'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['beginResync'], - ['consumeToken', '2', '4', '0', '1', '2', '"b'], - ['endResync'], - ['exception', 'UnwantedTokenException', '2', '1', '2'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['consumeToken', '-1', '-1', '0', '1', '3', '"'], - ['location', '6', '11'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testSemPred(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : {True}? ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['semanticPredicate', '1', 'True'], - ['location', '6', '13'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '16'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['consumeToken', '-1', '-1', '0', '1', '1', '"'], - ['location', '6', '19'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testPositiveClosureBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID ( ID | INT )+ EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1 b c 3", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['consumeToken', '4', '4', '0', '1', '4', '"b'], - ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['consumeToken', '6', '4', '0', '1', '6', '"c'], - ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['consumeToken', '8', '5', '0', '1', '8', '"3'], - ['enterDecision', '1', '0'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['location', '6', '22'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['consumeToken', '-1', '-1', '0', '1', '9', '"'], - ['location', '6', '25'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testClosureBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID ( ID | INT )* EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1 b c 3", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['consumeToken', '4', '4', '0', '1', '4', '"b'], - ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['consumeToken', '6', '4', '0', '1', '6', '"c'], - ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['consumeToken', '8', '5', '0', '1', '8', '"3'], - ['enterDecision', '1', '0'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['location', '6', '22'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['consumeToken', '-1', '-1', '0', '1', '9', '"'], - ['location', '6', '25'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testMismatchedSetException(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID ( ID | INT ) EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '8'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['exception', 'MismatchedSetException', '1', '1', '1'], - ['exception', 'MismatchedSetException', '1', '1', '1'], - ['beginResync'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['endResync'], - ['location', '6', '24'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID ( b | c ) EOF; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '14'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['location', '8', '8'], - ['exitRule', 'T.g', 'c'], - ['exitSubRule', '1'], - ['location', '6', '18'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['consumeToken', '-1', '-1', '0', '1', '3', '"'], - ['location', '6', '21'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testNoViableAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ID ( b | c ) EOF; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a !", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '7', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['exception', 'NoViableAltException', '2', '1', '2'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['exception', 'NoViableAltException', '2', '1', '2'], - ['beginResync'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['consumeToken', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['endResync'], - ['location', '6', '21'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testRuleBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : b | c; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="1", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '9'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['consumeToken', '0', '5', '0', '1', '0', '"1'], - ['location', '8', '8'], - ['exitRule', 'T.g', 'c'], - ['location', '6', '10'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testRuleBlockSingleAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : b; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterRule', 'T.g', 'b'], - ['location', '7', '1'], - ['enterAlt', '1'], - ['location', '7', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '7', '7'], - ['exitRule', 'T.g', 'b'], - ['location', '6', '6'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBlockSingleAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ( b ); - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterAlt', '1'], - ['location', '6', '7'], - ['enterRule', 'T.g', 'b'], - ['location', '7', '1'], - ['enterAlt', '1'], - ['location', '7', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '7', '7'], - ['exitRule', 'T.g', 'b'], - ['location', '6', '10'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testDFA(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - } - a : ( b | c ) EOF; - b : ID* INT; - c : ID+ BANG; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!'; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a!", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['mark', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['consumeToken', '1', '4', '0', '1', '1', '"!'], - ['rewind', '0'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '11'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['enterSubRule', '3'], - ['enterDecision', '3', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['exitDecision', '3'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['enterDecision', '3', '0'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['exitDecision', '3'], - ['exitSubRule', '3'], - ['location', '8', '9'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['consumeToken', '1', '4', '0', '1', '1', '"!'], - ['location', '8', '13'], - ['exitRule', 'T.g', 'c'], - ['exitSubRule', '1'], - ['location', '6', '15'], - ['LT', '1', '-1', '-1', '0', '1', '2', '"'], - ['LT', '1', '-1', '-1', '0', '1', '2', '"'], - ['consumeToken', '-1', '-1', '0', '1', '2', '"'], - ['location', '6', '18'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBasicAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python; - output=AST; - } - a : ( b | c ) EOF!; - b : ID* INT -> ^(INT ID*); - c : ID+ BANG -> ^(BANG ID+); - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!'; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - listener = antlr3.debug.RecordDebugEventListener() - - self.execParser( - grammar, 'a', - input="a!", - listener=listener) - - # don't check output for now (too dynamic), I'm satisfied if it - # doesn't crash - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t060leftrecursion.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t060leftrecursion.py deleted file mode 100644 index 0c064b60..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t060leftrecursion.py +++ /dev/null @@ -1,468 +0,0 @@ -import unittest -import re -import textwrap -import antlr3 -import testbase - - -# Left-recursion resolution is not yet enabled in the tool. - -# class TestLeftRecursion(testbase.ANTLRTest): -# def parserClass(self, base): -# class TParser(base): -# def __init__(self, *args, **kwargs): -# base.__init__(self, *args, **kwargs) - -# self._output = "" - - -# def capture(self, t): -# self._output += str(t) - - -# def recover(self, input, re): -# # no error recovery yet, just crash! -# raise - -# return TParser - - -# def execParser(self, grammar, grammarEntry, input): -# lexerCls, parserCls = self.compileInlineGrammar(grammar) - -# cStream = antlr3.StringStream(input) -# lexer = lexerCls(cStream) -# tStream = antlr3.CommonTokenStream(lexer) -# parser = parserCls(tStream) -# getattr(parser, grammarEntry)() -# return parser._output - - -# def runTests(self, grammar, tests, grammarEntry): -# lexerCls, parserCls = self.compileInlineGrammar(grammar) - -# build_ast = re.search(r'output\s*=\s*AST', grammar) - -# for input, expecting in tests: -# cStream = antlr3.StringStream(input) -# lexer = lexerCls(cStream) -# tStream = antlr3.CommonTokenStream(lexer) -# parser = parserCls(tStream) -# r = getattr(parser, grammarEntry)() -# found = parser._output -# if build_ast: -# found += r.tree.toStringTree() - -# self.assertEquals( -# expecting, found, -# "%r != %r (for input %r)" % (expecting, found, input)) - - -# def testSimple(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# } -# s : a { self.capture($a.text) } ; -# a : a ID -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# found = self.execParser(grammar, 's', 'a b c') -# expecting = "abc" -# self.assertEquals(expecting, found) - - -# def testSemPred(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# } -# s : a { self.capture($a.text) } ; -# a : a {True}? ID -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# found = self.execParser(grammar, "s", "a b c") -# expecting = "abc" -# self.assertEquals(expecting, found) - -# def testTernaryExpr(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# e : e '*'^ e -# | e '+'^ e -# | e '?'^ e ':'! e -# | e '='^ e -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("a+b", "(+ a b)"), -# ("a*b", "(* a b)"), -# ("a?b:c", "(? a b c)"), -# ("a=b=c", "(= a (= b c))"), -# ("a?b+c:d", "(? a (+ b c) d)"), -# ("a?b=c:d", "(? a (= b c) d)"), -# ("a? b?c:d : e", "(? a (? b c d) e)"), -# ("a?b: c?d:e", "(? a b (? c d e))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testDeclarationsUsingASTOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# declarator -# : declarator '['^ e ']'! -# | declarator '['^ ']'! -# | declarator '('^ ')'! -# | '*'^ declarator // binds less tight than suffixes -# | '('! declarator ')'! -# | ID -# ; -# e : INT ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("*a", "(* a)"), -# ("**a", "(* (* a))"), -# ("a[3]", "([ a 3)"), -# ("b[]", "([ b)"), -# ("(a)", "a"), -# ("a[]()", "(( ([ a))"), -# ("a[][]", "([ ([ a))"), -# ("*a[]", "(* ([ a))"), -# ("(*a)[]", "([ (* a))"), -# ] -# self.runTests(grammar, tests, "declarator") - - -# def testDeclarationsUsingRewriteOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# declarator -# : declarator '[' e ']' -> ^('[' declarator e) -# | declarator '[' ']' -> ^('[' declarator) -# | declarator '(' ')' -> ^('(' declarator) -# | '*' declarator -> ^('*' declarator) // binds less tight than suffixes -# | '(' declarator ')' -> declarator -# | ID -> ID -# ; -# e : INT ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("*a", "(* a)"), -# ("**a", "(* (* a))"), -# ("a[3]", "([ a 3)"), -# ("b[]", "([ b)"), -# ("(a)", "a"), -# ("a[]()", "(( ([ a))"), -# ("a[][]", "([ ([ a))"), -# ("*a[]", "(* ([ a))"), -# ("(*a)[]", "([ (* a))"), -# ] -# self.runTests(grammar, tests, "declarator") - - -# def testExpressionsUsingASTOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# e : e '.'^ ID -# | e '.'^ 'this' -# | '-'^ e -# | e '*'^ e -# | e ('+'^|'-'^) e -# | INT -# | ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a.this", "(. a this)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ] -# self.runTests(grammar, tests, "e") - - -# @testbase.broken( -# "Grammar compilation returns errors", testbase.GrammarCompileError) -# def testExpressionsUsingRewriteOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# e : e '.' ID -> ^('.' e ID) -# | e '.' 'this' -> ^('.' e 'this') -# | '-' e -> ^('-' e) -# | e '*' b=e -> ^('*' e $b) -# | e (op='+'|op='-') b=e -> ^($op e $b) -# | INT -> INT -# | ID -> ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a.this", "(. a this)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testExpressionAssociativity(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# e -# : e '.'^ ID -# | '-'^ e -# | e '^'^ e -# | e '*'^ e -# | e ('+'^|'-'^) e -# | e ('='^ |'+='^) e -# | INT -# | ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ("a^b^c", "(^ a (^ b c))"), -# ("a=b=c", "(= a (= b c))"), -# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testJavaExpressions(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# expressionList -# : e (','! e)* -# ; -# e : '('! e ')'! -# | 'this' -# | 'super' -# | INT -# | ID -# | type '.'^ 'class' -# | e '.'^ ID -# | e '.'^ 'this' -# | e '.'^ 'super' '('^ expressionList? ')'! -# | e '.'^ 'new'^ ID '('! expressionList? ')'! -# | 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+) // ugly; simplified -# | e '['^ e ']'! -# | '('^ type ')'! e -# | e ('++'^ | '--'^) -# | e '('^ expressionList? ')'! -# | ('+'^|'-'^|'++'^|'--'^) e -# | ('~'^|'!'^) e -# | e ('*'^|'/'^|'%'^) e -# | e ('+'^|'-'^) e -# | e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e -# | e ('<='^ | '>='^ | '>'^ | '<'^) e -# | e 'instanceof'^ e -# | e ('=='^ | '!='^) e -# | e '&'^ e -# | e '^'^ e -# | e '|'^ e -# | e '&&'^ e -# | e '||'^ e -# | e '?' e ':' e -# | e ('='^ -# |'+='^ -# |'-='^ -# |'*='^ -# |'/='^ -# |'&='^ -# |'|='^ -# |'^='^ -# |'>>='^ -# |'>>>='^ -# |'<<='^ -# |'%='^) e -# ; -# type: ID -# | ID '['^ ']'! -# | 'int' -# | 'int' '['^ ']'! -# ; -# ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ("a^b^c", "(^ a (^ b c))"), -# ("a=b=c", "(= a (= b c))"), -# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"), -# ("a|b&c", "(| a (& b c))"), -# ("(a|b)&c", "(& (| a b) c)"), -# ("a > b", "(> a b)"), -# ("a >> b", "(> a b)"), # text is from one token -# ("a < b", "(< a b)"), -# ("(T)x", "(( T x)"), -# ("new A().b", "(. (new A () b)"), -# ("(T)t.f()", "(( (( T (. t f)))"), -# ("a.f(x)==T.c", "(== (( (. a f) x) (. T c))"), -# ("a.f().g(x,1)", "(( (. (( (. a f)) g) x 1)"), -# ("new T[((n-1) * x) + 1]", "(new T [ (+ (* (- n 1) x) 1))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testReturnValueAndActions(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# } -# s : e { self.capture($e.v) } ; -# e returns [v, ignored] -# : e '*' b=e {$v *= $b.v;} -# | e '+' b=e {$v += $b.v;} -# | INT {$v = int($INT.text);} -# ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("4", "4"), -# ("1+2", "3") -# ] -# self.runTests(grammar, tests, "s") - - -# def testReturnValueAndActionsAndASTs(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python; -# output=AST; -# } -# s : e { self.capture("v=\%s, " \% $e.v) } ; -# e returns [v, ignored] -# : e '*'^ b=e {$v *= $b.v;} -# | e '+'^ b=e {$v += $b.v;} -# | INT {$v = int($INT.text);} -# ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("4", "v=4, 4"), -# ("1+2", "v=3, (+ 1 2)"), -# ] -# self.runTests(grammar, tests, "s") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/testbase.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/testbase.py deleted file mode 100644 index 19c7fec7..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/tests/testbase.py +++ /dev/null @@ -1,450 +0,0 @@ -import unittest -import imp -import os -import errno -import sys -import glob -import re -import tempfile -import shutil -import inspect -import hashlib -from distutils.errors import * -import antlr3 - -def unlink(path): - try: - os.unlink(path) - except OSError, exc: - if exc.errno != errno.ENOENT: - raise - - -class GrammarCompileError(Exception): - """Grammar failed to compile.""" - pass - - -# At least on MacOSX tempdir (/tmp) is a symlink. It's sometimes dereferences, -# sometimes not, breaking the inspect.getmodule() function. -testbasedir = os.path.join( - os.path.realpath(tempfile.gettempdir()), - 'antlr3-test') - - -class BrokenTest(unittest.TestCase.failureException): - def __repr__(self): - name, reason = self.args - return '%s: %s: %s works now' % ( - (self.__class__.__name__, name, reason)) - - -def broken(reason, *exceptions): - '''Indicates a failing (or erroneous) test case fails that should succeed. - If the test fails with an exception, list the exception type in args''' - def wrapper(test_method): - def replacement(*args, **kwargs): - try: - test_method(*args, **kwargs) - except exceptions or unittest.TestCase.failureException: - pass - else: - raise BrokenTest(test_method.__name__, reason) - replacement.__doc__ = test_method.__doc__ - replacement.__name__ = 'XXX_' + test_method.__name__ - replacement.todo = reason - return replacement - return wrapper - - -dependencyCache = {} -compileErrorCache = {} - -# setup java CLASSPATH -if 'CLASSPATH' not in os.environ: - cp = [] - - baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) - libDir = os.path.join(baseDir, 'lib') - - jar = os.path.join(libDir, 'ST-4.0.1.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '%s'. Grap it from a distribution package." - % jar, - ) - cp.append(jar) - - jar = os.path.join(libDir, 'antlr-2.7.7.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '%s'. Grap it from a distribution package." - % jar, - ) - cp.append(jar) - - jar = os.path.join(libDir, 'junit-4.2.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '%s'. Grap it from a distribution package." - % jar, - ) - cp.append(jar) - - cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build')) - - classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"' - -else: - classpath = '' - - -class ANTLRTest(unittest.TestCase): - def __init__(self, *args, **kwargs): - unittest.TestCase.__init__(self, *args, **kwargs) - - self.moduleName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0] - self.className = self.__class__.__name__ - self._baseDir = None - - self.lexerModule = None - self.parserModule = None - - self.grammarName = None - self.grammarType = None - - - def assertListEqual(self, a, b): - if a == b: - return - - import difflib - a = [str(l) + '\n' for l in a] - b = [str(l) + '\n' for l in b] - - raise AssertionError(''.join(difflib.unified_diff(a, b))) - - - @property - def baseDir(self): - if self._baseDir is None: - testName = 'unknownTest' - for frame in inspect.stack(): - code = frame[0].f_code - codeMod = inspect.getmodule(code) - if codeMod is None: - continue - - # skip frames not in requested module - if codeMod is not sys.modules[self.__module__]: - continue - - # skip some unwanted names - if code.co_name in ('nextToken', ''): - continue - - if code.co_name.startswith('test'): - testName = code.co_name - break - - self._baseDir = os.path.join( - testbasedir, - self.moduleName, self.className, testName) - if not os.path.isdir(self._baseDir): - os.makedirs(self._baseDir) - - return self._baseDir - - - def _invokeantlr(self, dir, file, options, javaOptions=''): - cmd = 'cd %s; java %s %s org.antlr.Tool -o . %s %s 2>&1' % ( - dir, javaOptions, classpath, options, file - ) - fp = os.popen(cmd) - output = '' - failed = False - for line in fp: - output += line - - if line.startswith('error('): - failed = True - - rc = fp.close() - if rc is not None: - failed = True - - if failed: - raise GrammarCompileError( - "Failed to compile grammar '%s':\n%s\n\n" % (file, cmd) - + output - ) - - - def compileGrammar(self, grammarName=None, options='', javaOptions=''): - if grammarName is None: - grammarName = self.moduleName + '.g' - - self._baseDir = os.path.join( - testbasedir, - self.moduleName) - if not os.path.isdir(self._baseDir): - os.makedirs(self._baseDir) - - if self.grammarName is None: - self.grammarName = os.path.splitext(grammarName)[0] - - grammarPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), grammarName) - - # get type and name from first grammar line - grammar = open(grammarPath, 'r').read() - m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE) - assert m is not None, grammar - self.grammarType = m.group(2) - if self.grammarType is None: - self.grammarType = 'combined' - - if self.grammarType is None: - assert self.grammarType in ('lexer', 'parser', 'tree', 'combined'), self.grammarType - - # don't try to rebuild grammar, if it already failed - if grammarName in compileErrorCache: - return - - try: - # # get dependencies from antlr - # if grammarName in dependencyCache: - # dependencies = dependencyCache[grammarName] - - # else: - # dependencies = [] - # cmd = ('cd %s; java %s %s org.antlr.Tool -o . -depend %s 2>&1' - # % (self.baseDir, javaOptions, classpath, grammarPath)) - - # output = "" - # failed = False - - # fp = os.popen(cmd) - # for line in fp: - # output += line - - # if line.startswith('error('): - # failed = True - # elif ':' in line: - # a, b = line.strip().split(':', 1) - # dependencies.append( - # (os.path.join(self.baseDir, a.strip()), - # [os.path.join(self.baseDir, b.strip())]) - # ) - - # rc = fp.close() - # if rc is not None: - # failed = True - - # if failed: - # raise GrammarCompileError( - # "antlr -depend failed with code %s on grammar '%s':\n\n" - # % (rc, grammarName) - # + cmd - # + "\n" - # + output - # ) - - # # add dependencies to my .stg files - # templateDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'tool', 'src', 'main', 'resources', 'org', 'antlr', 'codegen', 'templates', 'Python')) - # templates = glob.glob(os.path.join(templateDir, '*.stg')) - - # for dst, src in dependencies: - # src.extend(templates) - - # dependencyCache[grammarName] = dependencies - - # rebuild = False - # for dest, sources in dependencies: - # if not os.path.isfile(dest): - # rebuild = True - # break - - # for source in sources: - # if os.path.getmtime(source) > os.path.getmtime(dest): - # rebuild = True - # break - - - # if rebuild: - # self._invokeantlr(self.baseDir, grammarPath, options, javaOptions) - - self._invokeantlr(self.baseDir, grammarPath, options, javaOptions) - - except: - # mark grammar as broken - compileErrorCache[grammarName] = True - raise - - - def lexerClass(self, base): - """Optionally build a subclass of generated lexer class""" - - return base - - - def parserClass(self, base): - """Optionally build a subclass of generated parser class""" - - return base - - - def walkerClass(self, base): - """Optionally build a subclass of generated walker class""" - - return base - - - def __load_module(self, name): - modFile, modPathname, modDescription \ - = imp.find_module(name, [self.baseDir]) - - return imp.load_module( - name, modFile, modPathname, modDescription - ) - - - def getLexer(self, *args, **kwargs): - """Build lexer instance. Arguments are passed to lexer.__init__().""" - - if self.grammarType == 'lexer': - self.lexerModule = self.__load_module(self.grammarName) - cls = getattr(self.lexerModule, self.grammarName) - else: - self.lexerModule = self.__load_module(self.grammarName + 'Lexer') - cls = getattr(self.lexerModule, self.grammarName + 'Lexer') - - cls = self.lexerClass(cls) - - lexer = cls(*args, **kwargs) - - return lexer - - - def getParser(self, *args, **kwargs): - """Build parser instance. Arguments are passed to parser.__init__().""" - - if self.grammarType == 'parser': - self.lexerModule = self.__load_module(self.grammarName) - cls = getattr(self.lexerModule, self.grammarName) - else: - self.parserModule = self.__load_module(self.grammarName + 'Parser') - cls = getattr(self.parserModule, self.grammarName + 'Parser') - cls = self.parserClass(cls) - - parser = cls(*args, **kwargs) - - return parser - - - def getWalker(self, *args, **kwargs): - """Build walker instance. Arguments are passed to walker.__init__().""" - - self.walkerModule = self.__load_module(self.grammarName + 'Walker') - cls = getattr(self.walkerModule, self.grammarName + 'Walker') - cls = self.walkerClass(cls) - - walker = cls(*args, **kwargs) - - return walker - - - def writeInlineGrammar(self, grammar): - # Create a unique ID for this test and use it as the grammar name, - # to avoid class name reuse. This kinda sucks. Need to find a way so - # tests can use the same grammar name without messing up the namespace. - # Well, first I should figure out what the exact problem is... - id = hashlib.md5(self.baseDir).hexdigest()[-8:] - grammar = grammar.replace('$TP', 'TP' + id) - grammar = grammar.replace('$T', 'T' + id) - - # get type and name from first grammar line - m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE) - assert m is not None, grammar - grammarType = m.group(2) - if grammarType is None: - grammarType = 'combined' - grammarName = m.group(3) - - assert grammarType in ('lexer', 'parser', 'tree', 'combined'), grammarType - - grammarPath = os.path.join(self.baseDir, grammarName + '.g') - - # dump temp grammar file - fp = open(grammarPath, 'w') - fp.write(grammar) - fp.close() - - return grammarName, grammarPath, grammarType - - - def writeFile(self, name, contents): - testDir = os.path.dirname(os.path.abspath(__file__)) - path = os.path.join(self.baseDir, name) - - fp = open(path, 'w') - fp.write(contents) - fp.close() - - return path - - - def compileInlineGrammar(self, grammar, options='', javaOptions='', - returnModule=False): - # write grammar file - grammarName, grammarPath, grammarType = self.writeInlineGrammar(grammar) - - # compile it - self._invokeantlr( - os.path.dirname(grammarPath), - os.path.basename(grammarPath), - options, - javaOptions - ) - - if grammarType == 'combined': - lexerMod = self.__load_module(grammarName + 'Lexer') - parserMod = self.__load_module(grammarName + 'Parser') - if returnModule: - return lexerMod, parserMod - - lexerCls = getattr(lexerMod, grammarName + 'Lexer') - lexerCls = self.lexerClass(lexerCls) - parserCls = getattr(parserMod, grammarName + 'Parser') - parserCls = self.parserClass(parserCls) - - return lexerCls, parserCls - - if grammarType == 'lexer': - lexerMod = self.__load_module(grammarName) - if returnModule: - return lexerMod - - lexerCls = getattr(lexerMod, grammarName) - lexerCls = self.lexerClass(lexerCls) - - return lexerCls - - if grammarType == 'parser': - parserMod = self.__load_module(grammarName) - if returnModule: - return parserMod - - parserCls = getattr(parserMod, grammarName) - parserCls = self.parserClass(parserCls) - - return parserCls - - if grammarType == 'tree': - walkerMod = self.__load_module(grammarName) - if returnModule: - return walkerMod - - walkerCls = getattr(walkerMod, grammarName) - walkerCls = self.walkerClass(walkerCls) - - return walkerCls diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testantlr3.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testantlr3.py deleted file mode 100644 index d4c67647..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testantlr3.py +++ /dev/null @@ -1,7 +0,0 @@ - -import unittest - -import antlr3 - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testbase.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testbase.py deleted file mode 100644 index 5c928878..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testbase.py +++ /dev/null @@ -1,27 +0,0 @@ -import unittest - -class BrokenTest(unittest.TestCase.failureException): - def __repr__(self): - name, reason = self.args - return '%s: %s: %s works now' % ( - (self.__class__.__name__, name, reason)) - - -def broken(reason, *exceptions): - '''Indicates a failing (or erroneous) test case fails that should succeed. - If the test fails with an exception, list the exception type in args''' - def wrapper(test_method): - def replacement(*args, **kwargs): - try: - test_method(*args, **kwargs) - except exceptions or unittest.TestCase.failureException: - pass - else: - raise BrokenTest(test_method.__name__, reason) - replacement.__doc__ = test_method.__doc__ - replacement.__name__ = 'XXX_' + test_method.__name__ - replacement.todo = reason - return replacement - return wrapper - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdfa.py deleted file mode 100644 index 7df3fb85..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdfa.py +++ /dev/null @@ -1,63 +0,0 @@ - -import unittest - -import antlr3 - - -class TestDFA(unittest.TestCase): - """Test case for the DFA class.""" - - def setUp(self): - """Setup test fixure. - - We need a Recognizer in order to instanciate a DFA. - - """ - - class TRecognizer(antlr3.BaseRecognizer): - api_version = 'HEAD' - - self.recog = TRecognizer() - - - def testInit(self): - """DFA.__init__() - - Just a smoke test. - - """ - - dfa = antlr3.DFA( - self.recog, 1, - eot=[], - eof=[], - min=[], - max=[], - accept=[], - special=[], - transition=[] - ) - - - def testUnpack(self): - """DFA.unpack()""" - - self.failUnlessEqual( - antlr3.DFA.unpack( - u"\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff" - u"\32\6\4\uffff\1\6\1\uffff\32\6" - ), - [ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6 - ] - ) - - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdottreegen.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdottreegen.py deleted file mode 100644 index 05b797e5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testdottreegen.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import unittest -from StringIO import StringIO -import textwrap - -import stringtemplate3 - -from antlr3.dottreegen import toDOT -from antlr3.treewizard import TreeWizard -from antlr3.tree import CommonTreeAdaptor - - -class TestToDOT(unittest.TestCase): - """Test case for the toDOT function.""" - - def setUp(self): - self.adaptor = CommonTreeAdaptor() - self.tokens = [ - "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" - ] - self.wiz = TreeWizard(self.adaptor, self.tokens) - - - def testNone(self): - """toDOT()""" - - treeST = stringtemplate3.StringTemplate( - template=( - "digraph {\n" + - " $nodes$\n" + - " $edges$\n" + - "}\n") - ) - - edgeST = stringtemplate3.StringTemplate( - template="$parent$ -> $child$\n" - ) - - tree = self.wiz.create("(A B (B C C) (B (C D D)))") - st = toDOT(tree, self.adaptor, treeST, edgeST) - - result = st.toString() - expected = textwrap.dedent( - '''\ - digraph { - n0 [label="A"]; - n1 [label="B"]; - n2 [label="B"]; - n3 [label="C"]; - n4 [label="C"]; - n5 [label="B"]; - n6 [label="C"]; - n7 [label="D"]; - n8 [label="D"]; - - n0 -> n1 - n0 -> n2 - n2 -> n3 - n2 -> n4 - n0 -> n5 - n5 -> n6 - n6 -> n7 - n6 -> n8 - - } - ''' - ) - self.assertEqual(result, expected) - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testexceptions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testexceptions.py deleted file mode 100644 index 2cdef632..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testexceptions.py +++ /dev/null @@ -1,96 +0,0 @@ -import unittest -import antlr3 -import testbase - - -class TestRecognitionException(unittest.TestCase): - """Tests for the antlr3.RecognitionException class""" - - def testInitNone(self): - """RecognitionException.__init__()""" - - exc = antlr3.RecognitionException() - - -class TestEarlyExitException(unittest.TestCase): - """Tests for the antlr3.EarlyExitException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """EarlyExitException.__init__()""" - - exc = antlr3.EarlyExitException() - - -class TestFailedPredicateException(unittest.TestCase): - """Tests for the antlr3.FailedPredicateException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """FailedPredicateException.__init__()""" - - exc = antlr3.FailedPredicateException() - - -class TestMismatchedNotSetException(unittest.TestCase): - """Tests for the antlr3.MismatchedNotSetException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedNotSetException.__init__()""" - - exc = antlr3.MismatchedNotSetException() - - -class TestMismatchedRangeException(unittest.TestCase): - """Tests for the antlr3.MismatchedRangeException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedRangeException.__init__()""" - - exc = antlr3.MismatchedRangeException() - - -class TestMismatchedSetException(unittest.TestCase): - """Tests for the antlr3.MismatchedSetException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedSetException.__init__()""" - - exc = antlr3.MismatchedSetException() - - -class TestMismatchedTokenException(unittest.TestCase): - """Tests for the antlr3.MismatchedTokenException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedTokenException.__init__()""" - - exc = antlr3.MismatchedTokenException() - - -class TestMismatchedTreeNodeException(unittest.TestCase): - """Tests for the antlr3.MismatchedTreeNodeException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedTreeNodeException.__init__()""" - - exc = antlr3.MismatchedTreeNodeException() - - -class TestNoViableAltException(unittest.TestCase): - """Tests for the antlr3.NoViableAltException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """NoViableAltException.__init__()""" - - exc = antlr3.NoViableAltException() - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testrecognizers.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testrecognizers.py deleted file mode 100644 index 1fd87916..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testrecognizers.py +++ /dev/null @@ -1,67 +0,0 @@ -import sys -import unittest - -import antlr3 - - -class TestBaseRecognizer(unittest.TestCase): - """Tests for BaseRecognizer class""" - - def testGetRuleInvocationStack(self): - """BaseRecognizer._getRuleInvocationStack()""" - - rules = antlr3.BaseRecognizer._getRuleInvocationStack(__name__) - self.failUnlessEqual( - rules, - ['testGetRuleInvocationStack'] - ) - - -class TestTokenSource(unittest.TestCase): - """Testcase to the antlr3.TokenSource class""" - - - def testIteratorInterface(self): - """TokenSource.next()""" - - class TrivialToken(object): - def __init__(self, type): - self.type = type - - class TestSource(antlr3.TokenSource): - def __init__(self): - self.tokens = [ - TrivialToken(1), - TrivialToken(2), - TrivialToken(3), - TrivialToken(4), - TrivialToken(antlr3.EOF), - ] - - def nextToken(self): - return self.tokens.pop(0) - - - src = TestSource() - tokens = [] - for token in src: - tokens.append(token.type) - - self.failUnlessEqual(tokens, [1, 2, 3, 4]) - - - -class TestLexer(unittest.TestCase): - - def testInit(self): - """Lexer.__init__()""" - - class TLexer(antlr3.Lexer): - api_version = 'HEAD' - - stream = antlr3.StringStream('foo') - TLexer(stream) - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input1 b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input1 deleted file mode 100644 index a907ec3f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input1 +++ /dev/null @@ -1,2 +0,0 @@ -foo -bar \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input2 b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input2 deleted file mode 100644 index 49dccf4f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.input2 +++ /dev/null @@ -1,2 +0,0 @@ -foo -bär \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.py deleted file mode 100644 index f8430ba9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/teststreams.py +++ /dev/null @@ -1,661 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import unittest -from StringIO import StringIO -import antlr3 - - -class TestStringStream(unittest.TestCase): - """Test case for the StringStream class.""" - - def testSize(self): - """StringStream.size()""" - - stream = antlr3.StringStream('foo') - - self.failUnlessEqual(stream.size(), 3) - - - def testIndex(self): - """StringStream.index()""" - - stream = antlr3.StringStream('foo') - - self.failUnlessEqual(stream.index(), 0) - - - def testConsume(self): - """StringStream.consume()""" - - stream = antlr3.StringStream('foo\nbar') - - stream.consume() # f - self.failUnlessEqual(stream.index(), 1) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.line, 1) - - stream.consume() # o - self.failUnlessEqual(stream.index(), 2) - self.failUnlessEqual(stream.charPositionInLine, 2) - self.failUnlessEqual(stream.line, 1) - - stream.consume() # o - self.failUnlessEqual(stream.index(), 3) - self.failUnlessEqual(stream.charPositionInLine, 3) - self.failUnlessEqual(stream.line, 1) - - stream.consume() # \n - self.failUnlessEqual(stream.index(), 4) - self.failUnlessEqual(stream.charPositionInLine, 0) - self.failUnlessEqual(stream.line, 2) - - stream.consume() # b - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.line, 2) - - stream.consume() # a - self.failUnlessEqual(stream.index(), 6) - self.failUnlessEqual(stream.charPositionInLine, 2) - self.failUnlessEqual(stream.line, 2) - - stream.consume() # r - self.failUnlessEqual(stream.index(), 7) - self.failUnlessEqual(stream.charPositionInLine, 3) - self.failUnlessEqual(stream.line, 2) - - stream.consume() # EOF - self.failUnlessEqual(stream.index(), 7) - self.failUnlessEqual(stream.charPositionInLine, 3) - self.failUnlessEqual(stream.line, 2) - - stream.consume() # EOF - self.failUnlessEqual(stream.index(), 7) - self.failUnlessEqual(stream.charPositionInLine, 3) - self.failUnlessEqual(stream.line, 2) - - - def testReset(self): - """StringStream.reset()""" - - stream = antlr3.StringStream('foo') - - stream.consume() - stream.consume() - - stream.reset() - self.failUnlessEqual(stream.index(), 0) - self.failUnlessEqual(stream.line, 1) - self.failUnlessEqual(stream.charPositionInLine, 0) - self.failUnlessEqual(stream.LT(1), 'f') - - - def testLA(self): - """StringStream.LA()""" - - stream = antlr3.StringStream('foo') - - self.failUnlessEqual(stream.LT(1), 'f') - self.failUnlessEqual(stream.LT(2), 'o') - self.failUnlessEqual(stream.LT(3), 'o') - - stream.consume() - stream.consume() - - self.failUnlessEqual(stream.LT(1), 'o') - self.failUnlessEqual(stream.LT(2), antlr3.EOF) - self.failUnlessEqual(stream.LT(3), antlr3.EOF) - - - def testSubstring(self): - """StringStream.substring()""" - - stream = antlr3.StringStream('foobar') - - self.failUnlessEqual(stream.substring(0, 0), 'f') - self.failUnlessEqual(stream.substring(0, 1), 'fo') - self.failUnlessEqual(stream.substring(0, 5), 'foobar') - self.failUnlessEqual(stream.substring(3, 5), 'bar') - - - def testSeekForward(self): - """StringStream.seek(): forward""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - self.failUnlessEqual(stream.index(), 4) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 0) - self.failUnlessEqual(stream.LT(1), 'b') - - -## # not yet implemented -## def testSeekBackward(self): -## """StringStream.seek(): backward""" - -## stream = antlr3.StringStream('foo\nbar') - -## stream.seek(4) -## stream.seek(1) - -## self.failUnlessEqual(stream.index(), 1) -## self.failUnlessEqual(stream.line, 1) -## self.failUnlessEqual(stream.charPositionInLine, 1) -## self.failUnlessEqual(stream.LA(1), 'o') - - - def testMark(self): - """StringStream.mark()""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - marker = stream.mark() - self.failUnlessEqual(marker, 1) - self.failUnlessEqual(stream.markDepth, 1) - - stream.consume() - marker = stream.mark() - self.failUnlessEqual(marker, 2) - self.failUnlessEqual(stream.markDepth, 2) - - - def testReleaseLast(self): - """StringStream.release(): last marker""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.release() - self.failUnlessEqual(stream.markDepth, 1) - - # release same marker again, nothing has changed - stream.release() - self.failUnlessEqual(stream.markDepth, 1) - - - def testReleaseNested(self): - """StringStream.release(): nested""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.release(marker2) - self.failUnlessEqual(stream.markDepth, 1) - - - def testRewindLast(self): - """StringStream.rewind(): last marker""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - marker = stream.mark() - stream.consume() - stream.consume() - - stream.rewind() - self.failUnlessEqual(stream.markDepth, 0) - self.failUnlessEqual(stream.index(), 4) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 0) - self.failUnlessEqual(stream.LT(1), 'b') - - - def testRewindNested(self): - """StringStream.rewind(): nested""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.failUnlessEqual(stream.markDepth, 1) - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.LT(1), 'a') - - -class TestFileStream(unittest.TestCase): - """Test case for the FileStream class.""" - - - def testNoEncoding(self): - path = os.path.join(os.path.dirname(__file__), 'teststreams.input1') - - stream = antlr3.FileStream(path) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.failUnlessEqual(stream.markDepth, 1) - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.LT(1), 'a') - self.failUnlessEqual(stream.LA(1), ord('a')) - - - def testEncoded(self): - path = os.path.join(os.path.dirname(__file__), 'teststreams.input2') - - stream = antlr3.FileStream(path, 'utf-8') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.failUnlessEqual(stream.markDepth, 1) - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.LT(1), u'ä') - self.failUnlessEqual(stream.LA(1), ord(u'ä')) - - - -class TestInputStream(unittest.TestCase): - """Test case for the InputStream class.""" - - def testNoEncoding(self): - file = StringIO('foo\nbar') - - stream = antlr3.InputStream(file) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.failUnlessEqual(stream.markDepth, 1) - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.LT(1), 'a') - self.failUnlessEqual(stream.LA(1), ord('a')) - - - def testEncoded(self): - file = StringIO(u'foo\nbär'.encode('utf-8')) - - stream = antlr3.InputStream(file, 'utf-8') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.failUnlessEqual(stream.markDepth, 1) - self.failUnlessEqual(stream.index(), 5) - self.failUnlessEqual(stream.line, 2) - self.failUnlessEqual(stream.charPositionInLine, 1) - self.failUnlessEqual(stream.LT(1), u'ä') - self.failUnlessEqual(stream.LA(1), ord(u'ä')) - - -class TestCommonTokenStream(unittest.TestCase): - """Test case for the StringStream class.""" - - def setUp(self): - """Setup test fixure - - The constructor of CommonTokenStream needs a token source. This - is a simple mock class providing just the nextToken() method. - - """ - - class MockSource(object): - def __init__(self): - self.tokens = [] - - def makeEOFToken(self): - return antlr3.CommonToken(type=antlr3.EOF) - - def nextToken(self): - try: - return self.tokens.pop(0) - except IndexError: - return None - - self.source = MockSource() - - - def testInit(self): - """CommonTokenStream.__init__()""" - - stream = antlr3.CommonTokenStream(self.source) - self.failUnlessEqual(stream.index(), -1) - - - def testSetTokenSource(self): - """CommonTokenStream.setTokenSource()""" - - stream = antlr3.CommonTokenStream(None) - stream.setTokenSource(self.source) - self.failUnlessEqual(stream.index(), -1) - self.failUnlessEqual(stream.channel, antlr3.DEFAULT_CHANNEL) - - - def testLTEmptySource(self): - """CommonTokenStream.LT(): EOF (empty source)""" - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.failUnlessEqual(lt1.type, antlr3.EOF) - - - def testLT1(self): - """CommonTokenStream.LT(1)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.failUnlessEqual(lt1.type, 12) - - - def testLT1WithHidden(self): - """CommonTokenStream.LT(1): with hidden tokens""" - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.failUnlessEqual(lt1.type, 13) - - - def testLT2BeyondEnd(self): - """CommonTokenStream.LT(2): beyond end""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(2) - self.failUnlessEqual(lt1.type, antlr3.EOF) - - - # not yet implemented - def testLTNegative(self): - """CommonTokenStream.LT(-1): look back""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - stream.consume() - - lt1 = stream.LT(-1) - self.failUnlessEqual(lt1.type, 12) - - - def testLB1(self): - """CommonTokenStream.LB(1)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - stream.consume() - - self.failUnlessEqual(stream.LB(1).type, 12) - - - def testLTZero(self): - """CommonTokenStream.LT(0)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(0) - self.failUnless(lt1 is None) - - - def testLBBeyondBegin(self): - """CommonTokenStream.LB(-1): beyond begin""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.failUnless(stream.LB(1) is None) - - stream.consume() - stream.consume() - self.failUnless(stream.LB(3) is None) - - - def testFillBuffer(self): - """CommonTokenStream.fillBuffer()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=14) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - - self.failUnlessEqual(len(stream.tokens), 3) - self.failUnlessEqual(stream.tokens[0].type, 12) - self.failUnlessEqual(stream.tokens[1].type, 13) - self.failUnlessEqual(stream.tokens[2].type, 14) - - - def testConsume(self): - """CommonTokenStream.consume()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.failUnlessEqual(stream.LA(1), 12) - - stream.consume() - self.failUnlessEqual(stream.LA(1), 13) - - stream.consume() - self.failUnlessEqual(stream.LA(1), antlr3.EOF) - - stream.consume() - self.failUnlessEqual(stream.LA(1), antlr3.EOF) - - - def testSeek(self): - """CommonTokenStream.seek()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.failUnlessEqual(stream.LA(1), 12) - - stream.seek(2) - self.failUnlessEqual(stream.LA(1), antlr3.EOF) - - stream.seek(0) - self.failUnlessEqual(stream.LA(1), 12) - - - def testMarkRewind(self): - """CommonTokenStream.mark()/rewind()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - - stream.consume() - marker = stream.mark() - - stream.consume() - stream.rewind(marker) - - self.failUnlessEqual(stream.LA(1), 13) - - - def testToString(self): - """CommonTokenStream.toString()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12, text="foo") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13, text="bar") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=14, text="gnurz") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=15, text="blarz") - ) - - stream = antlr3.CommonTokenStream(self.source) - - assert stream.toString() == "foobargnurzblarz" - assert stream.toString(1, 2) == "bargnurz" - assert stream.toString(stream.tokens[1], stream.tokens[-2]) == "bargnurz" - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtree.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtree.py deleted file mode 100644 index 1f4e36ff..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtree.py +++ /dev/null @@ -1,1351 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import unittest -from StringIO import StringIO - -from antlr3.tree import (CommonTreeNodeStream, CommonTree, CommonTreeAdaptor, - TreeParser, TreeVisitor, TreeIterator) -from antlr3 import CommonToken, UP, DOWN, EOF -from antlr3.treewizard import TreeWizard - -class TestTreeNodeStream(unittest.TestCase): - """Test case for the TreeNodeStream class.""" - - def setUp(self): - self.adaptor = CommonTreeAdaptor() - - - def newStream(self, t): - """Build new stream; let's us override to test other streams.""" - return CommonTreeNodeStream(t) - - - def testSingleNode(self): - t = CommonTree(CommonToken(101)) - - stream = self.newStream(t) - expecting = "101" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testTwoChildrenOfNilRoot(self): - class V(CommonTree): - def __init__(self, token=None, ttype=None, x=None): - if x is not None: - self.x = x - - if ttype is not None and token is None: - self.token = CommonToken(type=ttype) - - if token is not None: - self.token = token - - def __str__(self): - if self.token is not None: - txt = self.token.text - else: - txt = "" - - txt += "" - return txt - - root_0 = self.adaptor.nil(); - t = V(ttype=101, x=2) - u = V(token=CommonToken(type=102, text="102")) - self.adaptor.addChild(root_0, t) - self.adaptor.addChild(root_0, u) - self.assert_(root_0.parent is None) - self.assertEquals(-1, root_0.childIndex) - self.assertEquals(0, t.childIndex) - self.assertEquals(1, u.childIndex) - - - def test4Nodes(self): - # ^(101 ^(102 103) 104) - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - stream = self.newStream(t) - expecting = "101 102 103 104" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101 2 102 2 103 3 104 3" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testList(self): - root = CommonTree(None) - - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - u = CommonTree(CommonToken(105)) - - root.addChild(t) - root.addChild(u) - - stream = CommonTreeNodeStream(root) - expecting = "101 102 103 104 105" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101 2 102 2 103 3 104 3 105" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testFlatList(self): - root = CommonTree(None) - - root.addChild(CommonTree(CommonToken(101))) - root.addChild(CommonTree(CommonToken(102))) - root.addChild(CommonTree(CommonToken(103))) - - stream = CommonTreeNodeStream(root) - expecting = "101 102 103" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101 102 103" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testListWithOneNode(self): - root = CommonTree(None) - - root.addChild(CommonTree(CommonToken(101))) - - stream = CommonTreeNodeStream(root) - expecting = "101" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testAoverB(self): - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - - stream = self.newStream(t) - expecting = "101 102" - found = self.toNodesOnlyString(stream) - self.failUnlessEqual(expecting, found) - - expecting = "101 2 102 3" - found = str(stream) - self.failUnlessEqual(expecting, found) - - - def testLT(self): - # ^(101 ^(102 103) 104) - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - stream = self.newStream(t) - self.failUnlessEqual(101, stream.LT(1).getType()) - self.failUnlessEqual(DOWN, stream.LT(2).getType()) - self.failUnlessEqual(102, stream.LT(3).getType()) - self.failUnlessEqual(DOWN, stream.LT(4).getType()) - self.failUnlessEqual(103, stream.LT(5).getType()) - self.failUnlessEqual(UP, stream.LT(6).getType()) - self.failUnlessEqual(104, stream.LT(7).getType()) - self.failUnlessEqual(UP, stream.LT(8).getType()) - self.failUnlessEqual(EOF, stream.LT(9).getType()) - # check way ahead - self.failUnlessEqual(EOF, stream.LT(100).getType()) - - - def testMarkRewindEntire(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - m = stream.mark() # MARK - for _ in range(13): # consume til end - stream.LT(1) - stream.consume() - - self.failUnlessEqual(EOF, stream.LT(1).getType()) - self.failUnlessEqual(UP, stream.LT(-1).getType()) #TODO: remove? - stream.rewind(m) # REWIND - - # consume til end again :) - for _ in range(13): # consume til end - stream.LT(1) - stream.consume() - - self.failUnlessEqual(EOF, stream.LT(1).getType()) - self.failUnlessEqual(UP, stream.LT(-1).getType()) #TODO: remove? - - - def testMarkRewindInMiddle(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - for _ in range(7): # consume til middle - #System.out.println(tream.LT(1).getType()) - stream.consume() - - self.failUnlessEqual(107, stream.LT(1).getType()) - m = stream.mark() # MARK - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - stream.consume() # consume 104 - stream.rewind(m) # REWIND - - self.failUnlessEqual(107, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(UP, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(UP, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(104, stream.LT(1).getType()) - stream.consume() - # now we're past rewind position - self.failUnlessEqual(105, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(UP, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(EOF, stream.LT(1).getType()) - self.failUnlessEqual(UP, stream.LT(-1).getType()) - - - def testMarkRewindNested(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - m = stream.mark() # MARK at start - stream.consume() # consume 101 - stream.consume() # consume DN - m2 = stream.mark() # MARK on 102 - stream.consume() # consume 102 - stream.consume() # consume DN - stream.consume() # consume 103 - stream.consume() # consume 106 - stream.rewind(m2) # REWIND to 102 - self.failUnlessEqual(102, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() - # stop at 103 and rewind to start - stream.rewind(m) # REWIND to 101 - self.failUnlessEqual(101, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(102, stream.LT(1).getType()) - stream.consume() - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - - - def testSeek(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - stream.consume() # consume 101 - stream.consume() # consume DN - stream.consume() # consume 102 - stream.seek(7) # seek to 107 - self.failUnlessEqual(107, stream.LT(1).getType()) - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - self.failUnlessEqual(104, stream.LT(1).getType()) - - - def testSeekFromStart(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - stream.seek(7) # seek to 107 - self.failUnlessEqual(107, stream.LT(1).getType()) - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - self.failUnlessEqual(104, stream.LT(1).getType()) - - - def testReset(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - v1 = self.toNodesOnlyString(stream) # scan all - stream.reset() - v2 = self.toNodesOnlyString(stream) # scan all - self.assertEquals(v1, v2) - - - def testIterator(self): - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - - expecting = [ - 101, DOWN, 102, DOWN, 103, 106, DOWN, 107, UP, UP, 104, 105, UP] - found = [t.type for t in stream] - self.assertEqual(expecting, found) - - - def toNodesOnlyString(self, nodes): - buf = [] - for i in range(nodes.size()): - t = nodes.LT(i+1) - type = nodes.getTreeAdaptor().getType(t) - if not (type==DOWN or type==UP): - buf.append(str(type)) - - return ' '.join(buf) - - -class TestCommonTreeNodeStream(unittest.TestCase): - """Test case for the CommonTreeNodeStream class.""" - - def testPushPop(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3" - found = str(stream) - self.failUnlessEqual(expecting, found) - - # Assume we want to hit node 107 and then "call 102" then return - - indexOf102 = 2 - indexOf107 = 12 - for _ in range(indexOf107):# consume til 107 node - stream.consume() - - # CALL 102 - self.failUnlessEqual(107, stream.LT(1).getType()) - stream.push(indexOf102) - self.failUnlessEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.failUnlessEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - self.failUnlessEqual(UP, stream.LT(1).getType()) - # RETURN - stream.pop() - self.failUnlessEqual(107, stream.LT(1).getType()) - - - def testNestedPushPop(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - - # Assume we want to hit node 107 and then "call 102", which - # calls 104, then return - - indexOf102 = 2 - indexOf107 = 12 - for _ in range(indexOf107): # consume til 107 node - stream.consume() - - self.failUnlessEqual(107, stream.LT(1).getType()) - # CALL 102 - stream.push(indexOf102) - self.failUnlessEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.failUnlessEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - - # CALL 104 - indexOf104 = 6 - stream.push(indexOf104) - self.failUnlessEqual(104, stream.LT(1).getType()) - stream.consume() # consume 102 - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.failUnlessEqual(105, stream.LT(1).getType()) - stream.consume() # consume 103 - self.failUnlessEqual(UP, stream.LT(1).getType()) - # RETURN (to UP node in 102 subtree) - stream.pop() - - self.failUnlessEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.failUnlessEqual(107, stream.LT(1).getType()) - - - def testPushPopFromEOF(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - - while stream.LA(1) != EOF: - stream.consume() - - indexOf102 = 2 - indexOf104 = 6 - self.failUnlessEqual(EOF, stream.LT(1).getType()) - - # CALL 102 - stream.push(indexOf102) - self.failUnlessEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.failUnlessEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - self.failUnlessEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.failUnlessEqual(EOF, stream.LT(1).getType()) - - # CALL 104 - stream.push(indexOf104) - self.failUnlessEqual(104, stream.LT(1).getType()) - stream.consume() # consume 102 - self.failUnlessEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.failUnlessEqual(105, stream.LT(1).getType()) - stream.consume() # consume 103 - self.failUnlessEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.failUnlessEqual(EOF, stream.LT(1).getType()) - - -class TestCommonTree(unittest.TestCase): - """Test case for the CommonTree class.""" - - def setUp(self): - """Setup test fixure""" - - self.adaptor = CommonTreeAdaptor() - - - def testSingleNode(self): - t = CommonTree(CommonToken(101)) - self.failUnless(t.parent is None) - self.failUnlessEqual(-1, t.childIndex) - - - def test4Nodes(self): - # ^(101 ^(102 103) 104) - r0 = CommonTree(CommonToken(101)) - r0.addChild(CommonTree(CommonToken(102))) - r0.getChild(0).addChild(CommonTree(CommonToken(103))) - r0.addChild(CommonTree(CommonToken(104))) - - self.failUnless(r0.parent is None) - self.failUnlessEqual(-1, r0.childIndex) - - - def testList(self): - # ^(nil 101 102 103) - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - self.failUnless(r0.parent is None) - self.failUnlessEqual(-1, r0.childIndex) - self.failUnlessEqual(r0, c0.parent) - self.failUnlessEqual(0, c0.childIndex) - self.failUnlessEqual(r0, c1.parent) - self.failUnlessEqual(1, c1.childIndex) - self.failUnlessEqual(r0, c2.parent) - self.failUnlessEqual(2, c2.childIndex) - - - def testList2(self): - # Add child ^(nil 101 102 103) to root 5 - # should pull 101 102 103 directly to become 5's child list - root = CommonTree(CommonToken(5)) - - # child tree - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - root.addChild(r0) - - self.failUnless(root.parent is None) - self.failUnlessEqual(-1, root.childIndex) - # check children of root all point at root - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(0, c0.childIndex) - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(1, c1.childIndex) - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(2, c2.childIndex) - - - def testAddListToExistChildren(self): - # Add child ^(nil 101 102 103) to root ^(5 6) - # should add 101 102 103 to end of 5's child list - root = CommonTree(CommonToken(5)) - root.addChild(CommonTree(CommonToken(6))) - - # child tree - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - root.addChild(r0) - - self.failUnless(root.parent is None) - self.failUnlessEqual(-1, root.childIndex) - # check children of root all point at root - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(1, c0.childIndex) - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(2, c1.childIndex) - self.failUnlessEqual(root, c0.parent) - self.failUnlessEqual(3, c2.childIndex) - - - def testDupTree(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - dup = self.adaptor.dupTree(r0) - - self.failUnless(dup.parent is None) - self.failUnlessEqual(-1, dup.childIndex) - dup.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot(self): - # 5 becomes root of ^(nil 101 102 103) - newRoot = CommonTree(CommonToken(5)) - - oldRoot = CommonTree(None) - oldRoot.addChild(CommonTree(CommonToken(101))) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot2(self): - # 5 becomes root of ^(101 102 103) - newRoot = CommonTree(CommonToken(5)) - - oldRoot = CommonTree(CommonToken(101)) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot3(self): - # ^(nil 5) becomes root of ^(nil 101 102 103) - newRoot = CommonTree(None) - newRoot.addChild(CommonTree(CommonToken(5))) - - oldRoot = CommonTree(None) - oldRoot.addChild(CommonTree(CommonToken(101))) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot5(self): - # ^(nil 5) becomes root of ^(101 102 103) - newRoot = CommonTree(None) - newRoot.addChild(CommonTree(CommonToken(5))) - - oldRoot = CommonTree(CommonToken(101)) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot6(self): - # emulates construction of ^(5 6) - root_0 = self.adaptor.nil() - root_1 = self.adaptor.nil() - root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1) - - self.adaptor.addChild(root_1, CommonTree(CommonToken(6))) - - self.adaptor.addChild(root_0, root_1) - - root_0.sanityCheckParentAndChildIndexes() - - - # Test replaceChildren - - def testReplaceWithNoChildren(self): - t = CommonTree(CommonToken(101)) - newChild = CommonTree(CommonToken(5)) - error = False - try: - t.replaceChildren(0, 0, newChild) - - except IndexError: - error = True - - self.failUnless(error) - - - def testReplaceWithOneChildren(self): - # assume token type 99 and use text - t = CommonTree(CommonToken(99, text="a")) - c0 = CommonTree(CommonToken(99, text="b")) - t.addChild(c0) - - newChild = CommonTree(CommonToken(99, text="c")) - t.replaceChildren(0, 0, newChild) - expecting = "(a c)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceInMiddle(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1 - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(1, 1, newChild) - expecting = "(a b x d)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0 - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(0, 0, newChild) - expecting = "(a x c d)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2 - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(2, 2, newChild) - expecting = "(a b c x)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(0, 0, newChildren) - expecting = "(a x y c d)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(2, 2, newChildren) - expecting = "(a b c x y)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoInMiddle(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(1, 1, newChildren) - expecting = "(a b x y d)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceTwoWithOneAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(0, 1, newChild) - expecting = "(a x d)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceTwoWithOneAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(1, 2, newChild) - expecting = "(a b x)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAllWithOne(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(0, 2, newChild) - expecting = "(a x)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAllWithTwo(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(0, 2, newChildren) - expecting = "(a x y)" - self.failUnlessEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - -class TestTreeContext(unittest.TestCase): - """Test the TreeParser.inContext() method""" - - tokenNames = [ - "", "", "", "", "VEC", "ASSIGN", "PRINT", - "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" - ] - - def testSimpleParent(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEquals(expecting, found) - - - def testNoParent(self): - tree = "(PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(%x:PRINT (MULT ID (VEC INT INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEquals(expecting, found) - - - def testParentWithWildcard(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") - self.assertEquals(expecting, found) - - - def testWildcardAtStartIgnored(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "...VEC") - self.assertEquals(expecting, found) - - - def testWildcardInBetween(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT...VEC") - self.assertEquals(expecting, found) - - - def testLotsOfWildcards(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "... PRINT ... VEC ...") - self.assertEquals(expecting, found) - - - def testDeep(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") - self.assertEquals(expecting, found) - - - def testDeepAndFindRoot(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ...") - self.assertEquals(expecting, found) - - - def testDeepAndFindRoot2(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... VEC ...") - self.assertEquals(expecting, found) - - - def testChain(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT MULT VEC MULT") - self.assertEquals(expecting, found) - - - ## TEST INVALID CONTEXTS - - def testNotParent(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEquals(expecting, found) - - - def testMismatch(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - ## missing MULT - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC MULT") - self.assertEquals(expecting, found) - - - def testMismatch2(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC ...") - self.assertEquals(expecting, found) - - - def testMismatch3(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ... VEC MULT") - self.assertEquals(expecting, found) - - - def testDoubleEtc(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - try: - TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... ... VEC") - self.fail() - except ValueError, exc: - expecting = "invalid syntax: ... ..." - found = str(exc) - self.assertEquals(expecting, found) - - - def testDotDot(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - try: - TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT .. VEC") - self.fail() - except ValueError, exc: - expecting = "invalid syntax: .." - found = str(exc) - self.assertEquals(expecting, found) - - -class TestTreeVisitor(unittest.TestCase): - """Test of the TreeVisitor class.""" - - tokenNames = [ - "", "", "", "", "VEC", "ASSIGN", "PRINT", - "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" - ] - - def testTreeVisitor(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - found = [] - def pre(t): - found.append("pre(%s)" % t) - return t - def post(t): - found.append("post(%s)" % t) - return t - - visitor = TreeVisitor(adaptor) - visitor.visit(t, pre, post) - - expecting = [ "pre(PRINT)", "pre(MULT)", "pre(x)", "post(x)", - "pre(VEC)", "pre(MULT)", "pre(9)", "post(9)", "pre(1)", - "post(1)", "post(MULT)", "pre(2)", "post(2)", "pre(3)", - "post(3)", "post(VEC)", "post(MULT)", "post(PRINT)" ] - - self.assertEquals(expecting, found) - - -class TestTreeIterator(unittest.TestCase): - tokens = [ - "", "", "", "", - "A", "B", "C", "D", "E", "F", "G" ] - - def testNode(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("A") - it = TreeIterator(t) - expecting = "A EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testFlatAB(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(nil A B)") - it = TreeIterator(t) - expecting = "nil DOWN A B UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testAB(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A B)") - it = TreeIterator(t) - expecting = "A DOWN B UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testABC(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A B C)") - it = TreeIterator(t) - expecting = "A DOWN B C UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testVerticalList(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B C))") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C UP UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testComplex(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B (C D E) F) G)") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def testReset(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B (C D E) F) G)") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - it.reset() - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEquals(expecting, found) - - - def toString(self, it): - buf = [] - for n in it: - buf.append(str(n)) - - return ' '.join(buf) - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtreewizard.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtreewizard.py deleted file mode 100644 index 2ad99bed..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/unittests/testtreewizard.py +++ /dev/null @@ -1,691 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import unittest -from StringIO import StringIO - -from antlr3.tree import CommonTreeAdaptor, CommonTree, INVALID_TOKEN_TYPE -from antlr3.treewizard import TreeWizard, computeTokenTypes, \ - TreePatternLexer, EOF, ID, BEGIN, END, PERCENT, COLON, DOT, ARG, \ - TreePatternParser, \ - TreePattern, WildcardTreePattern, TreePatternTreeAdaptor - - -class TestComputeTokenTypes(unittest.TestCase): - """Test case for the computeTokenTypes function.""" - - def testNone(self): - """computeTokenTypes(None) -> {}""" - - typeMap = computeTokenTypes(None) - self.failUnless(isinstance(typeMap, dict)) - self.failUnlessEqual(typeMap, {}) - - - def testList(self): - """computeTokenTypes(['a', 'b']) -> { 'a': 0, 'b': 1 }""" - - typeMap = computeTokenTypes(['a', 'b']) - self.failUnless(isinstance(typeMap, dict)) - self.failUnlessEqual(typeMap, { 'a': 0, 'b': 1 }) - - -class TestTreePatternLexer(unittest.TestCase): - """Test case for the TreePatternLexer class.""" - - def testBegin(self): - """TreePatternLexer(): '('""" - - lexer = TreePatternLexer('(') - type = lexer.nextToken() - self.failUnlessEqual(type, BEGIN) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testEnd(self): - """TreePatternLexer(): ')'""" - - lexer = TreePatternLexer(')') - type = lexer.nextToken() - self.failUnlessEqual(type, END) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testPercent(self): - """TreePatternLexer(): '%'""" - - lexer = TreePatternLexer('%') - type = lexer.nextToken() - self.failUnlessEqual(type, PERCENT) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testDot(self): - """TreePatternLexer(): '.'""" - - lexer = TreePatternLexer('.') - type = lexer.nextToken() - self.failUnlessEqual(type, DOT) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testColon(self): - """TreePatternLexer(): ':'""" - - lexer = TreePatternLexer(':') - type = lexer.nextToken() - self.failUnlessEqual(type, COLON) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testEOF(self): - """TreePatternLexer(): EOF""" - - lexer = TreePatternLexer(' \n \r \t ') - type = lexer.nextToken() - self.failUnlessEqual(type, EOF) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, False) - - - def testID(self): - """TreePatternLexer(): ID""" - - lexer = TreePatternLexer('_foo12_bar') - type = lexer.nextToken() - self.failUnlessEqual(type, ID) - self.failUnlessEqual(lexer.sval, '_foo12_bar') - self.failUnlessEqual(lexer.error, False) - - - def testARG(self): - """TreePatternLexer(): ARG""" - - lexer = TreePatternLexer('[ \\]bla\\n]') - type = lexer.nextToken() - self.failUnlessEqual(type, ARG) - self.failUnlessEqual(lexer.sval, ' ]bla\\n') - self.failUnlessEqual(lexer.error, False) - - - def testError(self): - """TreePatternLexer(): error""" - - lexer = TreePatternLexer('1') - type = lexer.nextToken() - self.failUnlessEqual(type, EOF) - self.failUnlessEqual(lexer.sval, '') - self.failUnlessEqual(lexer.error, True) - - -class TestTreePatternParser(unittest.TestCase): - """Test case for the TreePatternParser class.""" - - def setUp(self): - """Setup text fixure - - We need a tree adaptor, use CommonTreeAdaptor. - And a constant list of token names. - - """ - - self.adaptor = CommonTreeAdaptor() - self.tokens = [ - "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" - ] - self.wizard = TreeWizard(self.adaptor, tokenNames=self.tokens) - - - def testSingleNode(self): - """TreePatternParser: 'ID'""" - lexer = TreePatternLexer('ID') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(isinstance(tree, CommonTree)) - self.failUnlessEqual(tree.getType(), 10) - self.failUnlessEqual(tree.getText(), 'ID') - - - def testSingleNodeWithArg(self): - """TreePatternParser: 'ID[foo]'""" - lexer = TreePatternLexer('ID[foo]') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(isinstance(tree, CommonTree)) - self.failUnlessEqual(tree.getType(), 10) - self.failUnlessEqual(tree.getText(), 'foo') - - - def testSingleLevelTree(self): - """TreePatternParser: '(A B)'""" - lexer = TreePatternLexer('(A B)') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(isinstance(tree, CommonTree)) - self.failUnlessEqual(tree.getType(), 5) - self.failUnlessEqual(tree.getText(), 'A') - self.failUnlessEqual(tree.getChildCount(), 1) - self.failUnlessEqual(tree.getChild(0).getType(), 6) - self.failUnlessEqual(tree.getChild(0).getText(), 'B') - - - def testNil(self): - """TreePatternParser: 'nil'""" - lexer = TreePatternLexer('nil') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(isinstance(tree, CommonTree)) - self.failUnlessEqual(tree.getType(), 0) - self.failUnlessEqual(tree.getText(), None) - - - def testWildcard(self): - """TreePatternParser: '(.)'""" - lexer = TreePatternLexer('(.)') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(isinstance(tree, WildcardTreePattern)) - - - def testLabel(self): - """TreePatternParser: '(%a:A)'""" - lexer = TreePatternLexer('(%a:A)') - parser = TreePatternParser(lexer, self.wizard, TreePatternTreeAdaptor()) - tree = parser.pattern() - self.failUnless(isinstance(tree, TreePattern)) - self.failUnlessEqual(tree.label, 'a') - - - def testError1(self): - """TreePatternParser: ')'""" - lexer = TreePatternLexer(')') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(tree is None) - - - def testError2(self): - """TreePatternParser: '()'""" - lexer = TreePatternLexer('()') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(tree is None) - - - def testError3(self): - """TreePatternParser: '(A ])'""" - lexer = TreePatternLexer('(A ])') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.failUnless(tree is None) - - -class TestTreeWizard(unittest.TestCase): - """Test case for the TreeWizard class.""" - - def setUp(self): - """Setup text fixure - - We need a tree adaptor, use CommonTreeAdaptor. - And a constant list of token names. - - """ - - self.adaptor = CommonTreeAdaptor() - self.tokens = [ - "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" - ] - - - def testInit(self): - """TreeWizard.__init__()""" - - wiz = TreeWizard( - self.adaptor, - tokenNames=['a', 'b'] - ) - - self.failUnless(wiz.adaptor is self.adaptor) - self.failUnlessEqual( - wiz.tokenNameToTypeMap, - { 'a': 0, 'b': 1 } - ) - - - def testGetTokenType(self): - """TreeWizard.getTokenType()""" - - wiz = TreeWizard( - self.adaptor, - tokenNames=self.tokens - ) - - self.failUnlessEqual( - wiz.getTokenType('A'), - 5 - ) - - self.failUnlessEqual( - wiz.getTokenType('VAR'), - 11 - ) - - self.failUnlessEqual( - wiz.getTokenType('invalid'), - INVALID_TOKEN_TYPE - ) - - def testSingleNode(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("ID") - found = t.toStringTree() - expecting = "ID" - self.failUnlessEqual(expecting, found) - - - def testSingleNodeWithArg(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("ID[foo]") - found = t.toStringTree() - expecting = "foo" - self.failUnlessEqual(expecting, found) - - - def testSingleNodeTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A)") - found = t.toStringTree() - expecting = "A" - self.failUnlessEqual(expecting, found) - - - def testSingleLevelTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C D)") - found = t.toStringTree() - expecting = "(A B C D)" - self.failUnlessEqual(expecting, found) - - - def testListTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - found = t.toStringTree() - expecting = "A B C" - self.failUnlessEqual(expecting, found) - - - def testInvalidListTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A B C") - self.failUnless(t is None) - - - def testDoubleLevelTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A (B C) (B D) E)") - found = t.toStringTree() - expecting = "(A (B C) (B D) E)" - self.failUnlessEqual(expecting, found) - - - def __simplifyIndexMap(self, indexMap): - return dict( # stringify nodes for easy comparing - (ttype, [str(node) for node in nodes]) - for ttype, nodes in indexMap.items() - ) - - def testSingleNodeIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("ID") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 10: ["ID"] } - self.failUnlessEqual(expecting, found) - - - def testNoRepeatsIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C D)") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 8:['D'], 6:['B'], 7:['C'], 5:['A'] } - self.failUnlessEqual(expecting, found) - - - def testRepeatsIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 8: ['D', 'D'], 6: ['B', 'B', 'B'], 7: ['C'], 5: ['A', 'A'] } - self.failUnlessEqual(expecting, found) - - - def testNoRepeatsVisit(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B'] - self.failUnlessEqual(expecting, elements) - - - def testNoRepeatsVisit2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("C"), visitor) - - expecting = ['C'] - self.failUnlessEqual(expecting, elements) - - - def testRepeatsVisit(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B', 'B', 'B'] - self.failUnlessEqual(expecting, elements) - - - def testRepeatsVisit2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("A"), visitor) - - expecting = ['A', 'A'] - self.failUnlessEqual(expecting, elements) - - - def testRepeatsVisitWithContext(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append('%s@%s[%d]' % (node, parent, childIndex)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B@A[0]', 'B@A[1]', 'B@A[2]'] - self.failUnlessEqual(expecting, elements) - - - def testRepeatsVisitWithNullParentAndContext(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '%s@%s[%d]' - % (node, ['nil', parent][parent is not None], childIndex) - ) - - wiz.visit(tree, wiz.getTokenType("A"), visitor) - - expecting = ['A@nil[0]', 'A@A[1]'] - self.failUnlessEqual(expecting, elements) - - - def testVisitPattern(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A B) D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - str(node) - ) - - wiz.visit(tree, '(A B)', visitor) - - expecting = ['A'] # shouldn't match overall root, just (A B) - self.failUnlessEqual(expecting, elements) - - - def testVisitPatternMultiple(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A B) (D (A B)))") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '%s@%s[%d]' - % (node, ['nil', parent][parent is not None], childIndex) - ) - - wiz.visit(tree, '(A B)', visitor) - - expecting = ['A@A[2]', 'A@D[0]'] - self.failUnlessEqual(expecting, elements) - - - def testVisitPatternMultipleWithLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '%s@%s[%d]%s&%s' - % (node, - ['nil', parent][parent is not None], - childIndex, - labels['a'], - labels['b'], - ) - ) - - wiz.visit(tree, '(%a:A %b:B)', visitor) - - expecting = ['foo@A[2]foo&bar', 'big@D[0]big&dog'] - self.failUnlessEqual(expecting, elements) - - - def testParse(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A B C)") - self.failUnless(valid) - - - def testParseSingleNode(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A") - valid = wiz.parse(t, "A") - self.failUnless(valid) - - - def testParseSingleNodeFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A") - valid = wiz.parse(t, "B") - self.failUnless(not valid) - - - def testParseFlatTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B C)") - self.failUnless(valid) - - - def testParseFlatTreeFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B)") - self.failUnless(not valid) - - - def testParseFlatTreeFails2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B A)") - self.failUnless(not valid) - - - def testWildcard(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A . .)") - self.failUnless(valid) - - - def testParseWithText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[foo] C[bar])") - # C pattern has no text arg so despite [bar] in t, no need - # to match text--check structure only. - valid = wiz.parse(t, "(A B[foo] C)") - self.failUnless(valid) - - - def testParseWithText2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[T__32] (C (D E[a])))") - # C pattern has no text arg so despite [bar] in t, no need - # to match text--check structure only. - valid = wiz.parse(t, "(A B[foo] C)") - self.assertEquals("(A T__32 (C (D a)))", t.toStringTree()) - - - def testParseWithTextFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A[foo] B C)") - self.failUnless(not valid) # fails - - - def testParseLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - labels = {} - valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels) - self.failUnless(valid) - self.failUnlessEqual("A", str(labels["a"])) - self.failUnlessEqual("B", str(labels["b"])) - self.failUnlessEqual("C", str(labels["c"])) - - - def testParseWithWildcardLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - labels = {} - valid = wiz.parse(t, "(A %b:. %c:.)", labels) - self.failUnless(valid) - self.failUnlessEqual("B", str(labels["b"])) - self.failUnlessEqual("C", str(labels["c"])) - - - def testParseLabelsAndTestText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[foo] C)") - labels = {} - valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels) - self.failUnless(valid) - self.failUnlessEqual("A", str(labels["a"])) - self.failUnlessEqual("foo", str(labels["b"])) - self.failUnlessEqual("C", str(labels["c"])) - - - def testParseLabelsInNestedTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A (B C) (D E))") - labels = {} - valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels) - self.failUnless(valid) - self.failUnlessEqual("A", str(labels["a"])) - self.failUnlessEqual("B", str(labels["b"])) - self.failUnlessEqual("C", str(labels["c"])) - self.failUnlessEqual("D", str(labels["d"])) - self.failUnlessEqual("E", str(labels["e"])) - - - def testEquals(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B C)") - same = wiz.equals(t1, t2) - self.failUnless(same) - - - def testEqualsWithText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B[foo] C)") - t2 = wiz.create("(A B[foo] C)") - same = wiz.equals(t1, t2) - self.failUnless(same) - - - def testEqualsWithMismatchedText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B[foo] C)") - t2 = wiz.create("(A B C)") - same = wiz.equals(t1, t2) - self.failUnless(not same) - - - def testEqualsWithMismatchedList(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B A)") - same = wiz.equals(t1, t2) - self.failUnless(not same) - - - def testEqualsWithMismatchedListLength(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B)") - same = wiz.equals(t1, t2) - self.failUnless(not same) - - - def testFindPattern(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - subtrees = wiz.find(t, "(A B)") - found = [str(node) for node in subtrees] - expecting = ['foo', 'big'] - self.failUnlessEqual(expecting, found) - - - def testFindTokenType(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - subtrees = wiz.find(t, wiz.getTokenType('A')) - found = [str(node) for node in subtrees] - expecting = ['A', 'foo', 'big'] - self.failUnlessEqual(expecting, found) - - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python/xmlrunner.py b/thirdparty/antlr3-antlr-3.5/runtime/Python/xmlrunner.py deleted file mode 100644 index a4fed9fa..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python/xmlrunner.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -XML Test Runner for PyUnit -""" - -# Written by Sebastian Rittau and placed in -# the Public Domain. With contributions by Paolo Borelli. - -__revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $" - -import os.path -import re -import sys -import time -import traceback -import unittest -from StringIO import StringIO -from xml.sax.saxutils import escape - -from StringIO import StringIO - - -class _TestInfo(object): - - """Information about a particular test. - - Used by _XMLTestResult. - - """ - - def __init__(self, test, time): - (self._class, self._method) = test.id().rsplit(".", 1) - self._time = time - self._error = None - self._failure = None - - @staticmethod - def create_success(test, time): - """Create a _TestInfo instance for a successful test.""" - return _TestInfo(test, time) - - @staticmethod - def create_failure(test, time, failure): - """Create a _TestInfo instance for a failed test.""" - info = _TestInfo(test, time) - info._failure = failure - return info - - @staticmethod - def create_error(test, time, error): - """Create a _TestInfo instance for an erroneous test.""" - info = _TestInfo(test, time) - info._error = error - return info - - def print_report(self, stream): - """Print information about this test case in XML format to the - supplied stream. - - """ - stream.write(' ' % \ - { - "class": self._class, - "method": self._method, - "time": self._time, - }) - if self._failure != None: - self._print_error(stream, 'failure', self._failure) - if self._error != None: - self._print_error(stream, 'error', self._error) - stream.write('\n') - - def _print_error(self, stream, tagname, error): - """Print information from a failure or error to the supplied stream.""" - text = escape(str(error[1])) - stream.write('\n') - stream.write(' <%s type="%s">%s\n' \ - % (tagname, str(error[0]), text)) - tb_stream = StringIO() - traceback.print_tb(error[2], None, tb_stream) - stream.write(escape(tb_stream.getvalue())) - stream.write(' \n' % tagname) - stream.write(' ') - - -class _XMLTestResult(unittest.TestResult): - - """A test result class that stores result as XML. - - Used by XMLTestRunner. - - """ - - def __init__(self, classname): - unittest.TestResult.__init__(self) - self._test_name = classname - self._start_time = None - self._tests = [] - self._error = None - self._failure = None - - def startTest(self, test): - unittest.TestResult.startTest(self, test) - self._error = None - self._failure = None - self._start_time = time.time() - - def stopTest(self, test): - time_taken = time.time() - self._start_time - unittest.TestResult.stopTest(self, test) - if self._error: - info = _TestInfo.create_error(test, time_taken, self._error) - elif self._failure: - info = _TestInfo.create_failure(test, time_taken, self._failure) - else: - info = _TestInfo.create_success(test, time_taken) - self._tests.append(info) - - def addError(self, test, err): - unittest.TestResult.addError(self, test, err) - self._error = err - - def addFailure(self, test, err): - unittest.TestResult.addFailure(self, test, err) - self._failure = err - - def print_report(self, stream, time_taken, out, err): - """Prints the XML report to the supplied stream. - - The time the tests took to perform as well as the captured standard - output and standard error streams must be passed in.a - - """ - stream.write('\n' % \ - { - "n": self._test_name, - "t": self.testsRun, - "time": time_taken, - }) - for info in self._tests: - info.print_report(stream) - stream.write(' \n' % out) - stream.write(' \n' % err) - stream.write('\n') - - -class XMLTestRunner(object): - - """A test runner that stores results in XML format compatible with JUnit. - - XMLTestRunner(stream=None) -> XML test runner - - The XML file is written to the supplied stream. If stream is None, the - results are stored in a file called TEST-..xml in the - current working directory (if not overridden with the path property), - where and are the module and class name of the test class. - - """ - - def __init__(self, stream=None): - self._stream = stream - self._path = "." - - def run(self, test): - """Run the given test case or test suite.""" - class_ = test.__class__ - classname = class_.__module__ + "." + class_.__name__ - if self._stream == None: - filename = "TEST-%s.xml" % classname - stream = file(os.path.join(self._path, filename), "w") - stream.write('\n') - else: - stream = self._stream - - result = _XMLTestResult(classname) - start_time = time.time() - - # TODO: Python 2.5: Use the with statement - old_stdout = sys.stdout - old_stderr = sys.stderr - sys.stdout = StringIO() - sys.stderr = StringIO() - - try: - test(result) - try: - out_s = sys.stdout.getvalue() - except AttributeError: - out_s = "" - try: - err_s = sys.stderr.getvalue() - except AttributeError: - err_s = "" - finally: - sys.stdout = old_stdout - sys.stderr = old_stderr - - time_taken = time.time() - start_time - result.print_report(stream, time_taken, out_s, err_s) - if self._stream == None: - stream.close() - - return result - - def _set_path(self, path): - self._path = path - - path = property(lambda self: self._path, _set_path, None, - """The path where the XML files are stored. - - This property is ignored when the XML file is written to a file - stream.""") - - -class XMLTestRunnerTest(unittest.TestCase): - def setUp(self): - self._stream = StringIO() - - def _try_test_run(self, test_class, expected): - - """Run the test suite against the supplied test class and compare the - XML result against the expected XML string. Fail if the expected - string doesn't match the actual string. All time attribute in the - expected string should have the value "0.000". All error and failure - messages are reduced to "Foobar". - - """ - - runner = XMLTestRunner(self._stream) - runner.run(unittest.makeSuite(test_class)) - - got = self._stream.getvalue() - # Replace all time="X.YYY" attributes by time="0.000" to enable a - # simple string comparison. - got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got) - # Likewise, replace all failure and error messages by a simple "Foobar" - # string. - got = re.sub(r'(?s).*?', r'Foobar', got) - got = re.sub(r'(?s).*?', r'Foobar', got) - - self.assertEqual(expected, got) - - def test_no_tests(self): - """Regression test: Check whether a test run without any tests - matches a previous run. - - """ - class TestTest(unittest.TestCase): - pass - self._try_test_run(TestTest, """ - - - -""") - - def test_success(self): - """Regression test: Check whether a test run with a successful test - matches a previous run. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - pass - self._try_test_run(TestTest, """ - - - - -""") - - def test_failure(self): - """Regression test: Check whether a test run with a failing test - matches a previous run. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - self.assert_(False) - self._try_test_run(TestTest, """ - - Foobar - - - - -""") - - def test_error(self): - """Regression test: Check whether a test run with a erroneous test - matches a previous run. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - raise IndexError() - self._try_test_run(TestTest, """ - - Foobar - - - - -""") - - def test_stdout_capture(self): - """Regression test: Check whether a test run with output to stdout - matches a previous run. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - print "Test" - self._try_test_run(TestTest, """ - - - - -""") - - def test_stderr_capture(self): - """Regression test: Check whether a test run with output to stderr - matches a previous run. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - print >>sys.stderr, "Test" - self._try_test_run(TestTest, """ - - - - -""") - - class NullStream(object): - """A file-like object that discards everything written to it.""" - def write(self, buffer): - pass - - def test_unittests_changing_stdout(self): - """Check whether the XMLTestRunner recovers gracefully from unit tests - that change stdout, but don't change it back properly. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - sys.stdout = XMLTestRunnerTest.NullStream() - - runner = XMLTestRunner(self._stream) - runner.run(unittest.makeSuite(TestTest)) - - def test_unittests_changing_stderr(self): - """Check whether the XMLTestRunner recovers gracefully from unit tests - that change stderr, but don't change it back properly. - - """ - class TestTest(unittest.TestCase): - def test_foo(self): - sys.stderr = XMLTestRunnerTest.NullStream() - - runner = XMLTestRunner(self._stream) - runner.run(unittest.makeSuite(TestTest)) - - -class XMLTestProgram(unittest.TestProgram): - def runTests(self): - if self.testRunner is None: - self.testRunner = XMLTestRunner() - unittest.TestProgram.runTests(self) - -main = XMLTestProgram - - -if __name__ == "__main__": - main(module=None) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/.gitignore b/thirdparty/antlr3-antlr-3.5/runtime/Python3/.gitignore deleted file mode 100644 index 1868f2af..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.*.swp -*~ -*.pyc -*.gz diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/AUTHORS b/thirdparty/antlr3-antlr-3.5/runtime/Python3/AUTHORS deleted file mode 100644 index 5040e43c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/AUTHORS +++ /dev/null @@ -1,6 +0,0 @@ -Python target: -Benjamin Niemann : Main developer of Python target. -Clinton Roy : AST templates and runtime. - -Python3 target: -Benjamin S Wolf (http://github.com/Zannick): Converted Python target to Python3. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/ChangeLog b/thirdparty/antlr3-antlr-3.5/runtime/Python3/ChangeLog deleted file mode 100644 index ff5113f8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/ChangeLog +++ /dev/null @@ -1,58 +0,0 @@ -2012-06-26 Benjamin S Wolf - - Initial Python3 target, branched from the Python target by Benjamin - Niemann, with lots of code cleanup and minor refactoring. - - * CodeGenerator.java, Python3.stg: - Generated code now uses set notation for setTest, rather than long - conditionals like "a == FOO or a == BAR or 10 <= a <= 12". This is - a (slight) performance improvement. - - * tokens.py: - Token objects no longer have get/set methods for their attributes as I - switched them to use @property instead. The attributes should be accessed - directly. - - * tokens.py, Python3.stg: - Fix a circular dependency in generated parsers, and give Token objects the - ability to return their typeName when asked for it. (The generated - recognizer gives Token the mapping from token type to type name.) - -2007-11-03 Benjamin Niemann - - * PythonTarget.java, dfa.py, exceptions.py, recognizer.py, streams.py: - ANTLRStringStream.LA() now returns the character's ordinal and - generated lexers operate on integers. Also made various performance - tunings. - -2007-10-07 Benjamin Niemann - - * main.py, Python.stg (outputFile): Added simple __main__ section to - generated code, so (simple) grammars can be executed as standalone - script. - - * tree.py (RecognitionException.extractInformationFromTreeNodeStream), - exceptions.py (CommonTree): Small bugfixes. - -2007-09-30 Benjamin Niemann - - * recognizers.py (TokenSource): Added iterator interface to TokenSource - class - and thus to Lexer. - -2007-06-27 Benjamin Niemann - - * Python.stg (genericParser, parser, treeParser): Use correct @init - action block for tree parsers. - -2007-05-24 Benjamin Niemann - - * Python.stg (rule): Added support for @decorate {...} action for - parser rules to add decorators to the rule method. - -2007-05-18 Benjamin Niemann - - * Python.stg (isolatedLookaheadRangeTest, lookaheadRangeTest): - Minor improvement of generated code (use ' <= <= ' - instead of ' >= and <= '). - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/LICENSE b/thirdparty/antlr3-antlr-3.5/runtime/Python3/LICENSE deleted file mode 100644 index 66653dd6..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -[The "BSD licence"] -Copyright (c) 2003-2012 Terence Parr -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/README b/thirdparty/antlr3-antlr-3.5/runtime/Python3/README deleted file mode 100644 index 821d4d78..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/README +++ /dev/null @@ -1,81 +0,0 @@ -1) ABOUT -======== - -This is the Python3 package 'antlr3', which is required to use parsers created -by the ANTLR3 tool. See for more information about -ANTLR3. - - -2) STATUS -========= - -The Python3 target for ANTLR3 is still in beta. Documentation is lacking, some -bits of the code is not yet done, some functionality has not been tested yet. -Also the API might change a bit - it currently mimics the Java implementation, -but it may be made a bit more pythonic here and there. - -WARNING: The runtime library is not compatible with recognizers generated by -ANTLR versions preceding V3.4.x. If you are an application developer, -then the suggested way to solve this is to package the correct runtime with -your application. Installing the runtime in the global site-packages directory -may not be a good idea. -Sorry for the inconvenience. - - -3) DOWNLOAD -=========== - -This runtime is part of the ANTLR distribution. The latest version can be found -at . - -If you are interested in the latest, most bleeding edge version, have a look at -the git repository at . - - -4) INSTALLATION -=============== - -Just like any other Python package: -$ python3 setup.py install - -See for more information. - - -5) DOCUMENTATION -================ - -Documentation (as far as it exists) can be found in the wiki - - - -6) REPORTING BUGS -================= - -Please file bug reports on github: . - - -7) HACKING -========== - -Only the runtime package can be found here. There are also some StringTemplate -files in 'src/org/antlr/codegen/templates/Python3/' and some Java code in -'src/org/antlr/codegen/Python3Target.java' (of the main ANTLR3 source -distribution). - -If there are no directories 'tests' and 'unittests' in 'runtime/Python3', you -should fetch the latest ANTLR3 version from the perforce depot. See section -DOWNLOAD. -You'll need java and ant in order to compile and use the tool. -Be sure to properly setup your CLASSPATH. -(FIXME: is there some generic information, how to build it yourself? I should -point to it to avoid duplication.) - -You can then use the commands -$ python3 setup.py unittest -$ python3 setup.py functest -to ensure that changes do not break existing behaviour. - -Please send patches as pull requests on github. For larger code contributions -you'll have to sign the "Developer's Certificate of Origin", which can be -found on or use the feedback form at -. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/__init__.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/__init__.py deleted file mode 100644 index 73b215b0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/__init__.py +++ /dev/null @@ -1,152 +0,0 @@ -""" @package antlr3 -@brief ANTLR3 runtime package - -This module contains all support classes, which are needed to use recognizers -generated by ANTLR3. - -@mainpage - -\\note Please be warned that the line numbers in the API documentation do not -match the real locations in the source code of the package. This is an -unintended artifact of doxygen, which I could only convince to use the -correct module names by concatenating all files from the package into a single -module file... - -Here is a little overview over the most commonly used classes provided by -this runtime: - -@section recognizers Recognizers - -These recognizers are baseclasses for the code which is generated by ANTLR3. - -- BaseRecognizer: Base class with common recognizer functionality. -- Lexer: Base class for lexers. -- Parser: Base class for parsers. -- tree.TreeParser: Base class for %tree parser. - -@section streams Streams - -Each recognizer pulls its input from one of the stream classes below. Streams -handle stuff like buffering, look-ahead and seeking. - -A character stream is usually the first element in the pipeline of a typical -ANTLR3 application. It is used as the input for a Lexer. - -- ANTLRStringStream: Reads from a string objects. The input should be a unicode - object, or ANTLR3 will have trouble decoding non-ascii data. -- ANTLRFileStream: Opens a file and read the contents, with optional character - decoding. -- ANTLRInputStream: Reads the date from a file-like object, with optional - character decoding. - -A Parser needs a TokenStream as input (which in turn is usually fed by a -Lexer): - -- CommonTokenStream: A basic and most commonly used TokenStream - implementation. -- TokenRewriteStream: A modification of CommonTokenStream that allows the - stream to be altered (by the Parser). See the 'tweak' example for a usecase. - -And tree.TreeParser finally fetches its input from a tree.TreeNodeStream: - -- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream - implementation. - - -@section tokenstrees Tokens and Trees - -A Lexer emits Token objects which are usually buffered by a TokenStream. A -Parser can build a Tree, if the output=AST option has been set in the grammar. - -The runtime provides these Token implementations: - -- CommonToken: A basic and most commonly used Token implementation. -- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree - construction. - -Tree objects are wrapper for Token objects. - -- tree.CommonTree: A basic and most commonly used Tree implementation. - -A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the -input Token objects. - -- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor -implementation. - - -@section Exceptions - -RecognitionException are generated, when a recognizer encounters incorrect -or unexpected input. - -- RecognitionException - - MismatchedRangeException - - MismatchedSetException - - MismatchedNotSetException - . - - MismatchedTokenException - - MismatchedTreeNodeException - - NoViableAltException - - EarlyExitException - - FailedPredicateException - . -. - -A tree.RewriteCardinalityException is raised, when the parsers hits a -cardinality mismatch during AST construction. Although this is basically a -bug in your grammar, it can only be detected at runtime. - -- tree.RewriteCardinalityException - - tree.RewriteEarlyExitException - - tree.RewriteEmptyStreamException - . -. - -""" - -# tree.RewriteRuleElementStream -# tree.RewriteRuleSubtreeStream -# tree.RewriteRuleTokenStream -# CharStream -# DFA -# TokenSource - -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__version__ = '3.4' - -# This runtime is compatible with generated parsers using the -# API versions listed in constants.compatible_api_versions. -# 'HEAD' is only used by unittests. - -from .constants import * -from .dfa import * -from .exceptions import * -from .recognizers import * -from .streams import * -from .tokens import * diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/constants.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/constants.py deleted file mode 100644 index f0203eee..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/constants.py +++ /dev/null @@ -1,59 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -compatible_api_versions = ['HEAD', 1] - -EOF = -1 - -## All tokens go to the parser (unless skip() is called in that rule) -# on a particular "channel". The parser tunes to a particular channel -# so that whitespace etc... can go to the parser on a "hidden" channel. -DEFAULT_CHANNEL = 0 - -## Anything on different channel than DEFAULT_CHANNEL is not parsed -# by parser. -HIDDEN_CHANNEL = 99 - -# Predefined token types -EOR_TOKEN_TYPE = 1 - -## -# imaginary tree navigation type; traverse "get child" link -DOWN = 2 -## -#imaginary tree navigation type; finish with a child list -UP = 3 - -MIN_TOKEN_TYPE = UP + 1 - -INVALID_TOKEN_TYPE = 0 - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/debug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/debug.py deleted file mode 100644 index c309a360..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/debug.py +++ /dev/null @@ -1,1134 +0,0 @@ -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -import socket -import sys -from .constants import INVALID_TOKEN_TYPE -from .exceptions import RecognitionException -from .recognizers import Parser -from .streams import TokenStream -from .tokens import Token -from .tree import CommonTreeAdaptor, TreeAdaptor, Tree - -class DebugParser(Parser): - def __init__(self, stream, state=None, dbg=None, *args, **kwargs): - # wrap token stream in DebugTokenStream (unless user already did so). - if not isinstance(stream, DebugTokenStream): - stream = DebugTokenStream(stream, dbg) - - super().__init__(stream, state, *args, **kwargs) - - # Who to notify when events in the parser occur. - self._dbg = None - - self.setDebugListener(dbg) - - - def setDebugListener(self, dbg): - """Provide a new debug event listener for this parser. Notify the - input stream too that it should send events to this listener. - """ - - if hasattr(self.input, 'dbg'): - self.input.dbg = dbg - - self._dbg = dbg - - def getDebugListener(self): - return self._dbg - - dbg = property(getDebugListener, setDebugListener) - - - def beginResync(self): - self._dbg.beginResync() - - - def endResync(self): - self._dbg.endResync() - - - def beginBacktrack(self, level): - self._dbg.beginBacktrack(level) - - - def endBacktrack(self, level, successful): - self._dbg.endBacktrack(level, successful) - - - def reportError(self, exc): - Parser.reportError(self, exc) - - if isinstance(exc, RecognitionException): - self._dbg.recognitionException(exc) - - -class DebugTokenStream(TokenStream): - def __init__(self, input, dbg=None): - super().__init__() - self.input = input - self.initialStreamState = True - # Track the last mark() call result value for use in rewind(). - self.lastMarker = None - - self._dbg = None - self.setDebugListener(dbg) - - # force TokenStream to get at least first valid token - # so we know if there are any hidden tokens first in the stream - self.input.LT(1) - - - def getDebugListener(self): - return self._dbg - - def setDebugListener(self, dbg): - self._dbg = dbg - - dbg = property(getDebugListener, setDebugListener) - - - def consume(self): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - a = self.input.index() - t = self.input.LT(1) - self.input.consume() - b = self.input.index() - self._dbg.consumeToken(t) - - if b > a + 1: - # then we consumed more than one token; must be off channel tokens - for idx in range(a + 1, b): - self._dbg.consumeHiddenToken(self.input.get(idx)) - - - def consumeInitialHiddenTokens(self): - """consume all initial off-channel tokens""" - - firstOnChannelTokenIndex = self.input.index() - for idx in range(firstOnChannelTokenIndex): - self._dbg.consumeHiddenToken(self.input.get(idx)) - - self.initialStreamState = False - - - def LT(self, i): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - t = self.input.LT(i) - self._dbg.LT(i, t) - return t - - - def LA(self, i): - if self.initialStreamState: - self.consumeInitialHiddenTokens() - - t = self.input.LT(i) - self._dbg.LT(i, t) - return t.type - - - def get(self, i): - return self.input.get(i) - - - def index(self): - return self.input.index() - - - def mark(self): - self.lastMarker = self.input.mark() - self._dbg.mark(self.lastMarker) - return self.lastMarker - - - def rewind(self, marker=None): - self._dbg.rewind(marker) - self.input.rewind(marker) - - - def release(self, marker): - pass - - - def seek(self, index): - # TODO: implement seek in dbg interface - # self._dbg.seek(index); - self.input.seek(index) - - - def size(self): - return self.input.size() - - - def getTokenSource(self): - return self.input.getTokenSource() - - - def getSourceName(self): - return self.getTokenSource().getSourceName() - - - def toString(self, start=None, stop=None): - return self.input.toString(start, stop) - - -class DebugTreeAdaptor(TreeAdaptor): - """A TreeAdaptor proxy that fires debugging events to a DebugEventListener - delegate and uses the TreeAdaptor delegate to do the actual work. All - AST events are triggered by this adaptor; no code gen changes are needed - in generated rules. Debugging events are triggered *after* invoking - tree adaptor routines. - - Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})" - cannot be tracked as they might not use the adaptor to create foo, bar. - The debug listener has to deal with tree node IDs for which it did - not see a createNode event. A single node is sufficient even - if it represents a whole tree. - """ - - def __init__(self, dbg, adaptor): - super().__init__() - self.dbg = dbg - self.adaptor = adaptor - - - def createWithPayload(self, payload): - if payload.index < 0: - # could be token conjured up during error recovery - return self.createFromType(payload.type, payload.text) - - node = self.adaptor.createWithPayload(payload) - self.dbg.createNode(node, payload) - return node - - def createFromToken(self, tokenType, fromToken, text=None): - node = self.adaptor.createFromToken(tokenType, fromToken, text) - self.dbg.createNode(node) - return node - - def createFromType(self, tokenType, text): - node = self.adaptor.createFromType(tokenType, text) - self.dbg.createNode(node) - return node - - - def errorNode(self, input, start, stop, exc): - node = self.adaptor.errorNode(input, start, stop, exc) - if node is not None: - self.dbg.errorNode(node) - - return node - - - def dupTree(self, tree): - t = self.adaptor.dupTree(tree) - # walk the tree and emit create and add child events - # to simulate what dupTree has done. dupTree does not call this debug - # adapter so I must simulate. - self.simulateTreeConstruction(t) - return t - - - def simulateTreeConstruction(self, t): - """^(A B C): emit create A, create B, add child, ...""" - self.dbg.createNode(t) - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self.simulateTreeConstruction(child) - self.dbg.addChild(t, child) - - - def dupNode(self, treeNode): - d = self.adaptor.dupNode(treeNode) - self.dbg.createNode(d) - return d - - - def nil(self): - node = self.adaptor.nil() - self.dbg.nilNode(node) - return node - - - def isNil(self, tree): - return self.adaptor.isNil(tree) - - - def addChild(self, t, child): - if isinstance(child, Token): - n = self.createWithPayload(child) - self.addChild(t, n) - - else: - if t is None or child is None: - return - - self.adaptor.addChild(t, child) - self.dbg.addChild(t, child) - - def becomeRoot(self, newRoot, oldRoot): - if isinstance(newRoot, Token): - n = self.createWithPayload(newRoot) - self.adaptor.becomeRoot(n, oldRoot) - else: - n = self.adaptor.becomeRoot(newRoot, oldRoot) - - self.dbg.becomeRoot(newRoot, oldRoot) - return n - - - def rulePostProcessing(self, root): - return self.adaptor.rulePostProcessing(root) - - - def getType(self, t): - return self.adaptor.getType(t) - - - def setType(self, t, type): - self.adaptor.setType(t, type) - - - def getText(self, t): - return self.adaptor.getText(t) - - - def setText(self, t, text): - self.adaptor.setText(t, text) - - - def getToken(self, t): - return self.adaptor.getToken(t) - - - def setTokenBoundaries(self, t, startToken, stopToken): - self.adaptor.setTokenBoundaries(t, startToken, stopToken) - if t and startToken and stopToken: - self.dbg.setTokenBoundaries( - t, startToken.index, stopToken.index) - - - def getTokenStartIndex(self, t): - return self.adaptor.getTokenStartIndex(t) - - - def getTokenStopIndex(self, t): - return self.adaptor.getTokenStopIndex(t) - - - def getChild(self, t, i): - return self.adaptor.getChild(t, i) - - - def setChild(self, t, i, child): - self.adaptor.setChild(t, i, child) - - - def deleteChild(self, t, i): - return self.adaptor.deleteChild(t, i) - - - def getChildCount(self, t): - return self.adaptor.getChildCount(t) - - - def getUniqueID(self, node): - return self.adaptor.getUniqueID(node) - - - def getParent(self, t): - return self.adaptor.getParent(t) - - - def getChildIndex(self, t): - return self.adaptor.getChildIndex(t) - - - def setParent(self, t, parent): - self.adaptor.setParent(t, parent) - - - def setChildIndex(self, t, index): - self.adaptor.setChildIndex(t, index) - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - self.adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t) - - - ## support - - def getDebugListener(self): - return self.dbg - - def setDebugListener(self, dbg): - self.dbg = dbg - - - def getTreeAdaptor(self): - return self.adaptor - - - -class DebugEventListener(object): - """All debugging events that a recognizer can trigger. - - I did not create a separate AST debugging interface as it would create - lots of extra classes and DebugParser has a dbg var defined, which makes - it hard to change to ASTDebugEventListener. I looked hard at this issue - and it is easier to understand as one monolithic event interface for all - possible events. Hopefully, adding ST debugging stuff won't be bad. Leave - for future. 4/26/2006. - """ - - # Moved to version 2 for v3.1: added grammar name to enter/exit Rule - PROTOCOL_VERSION = "2" - - def enterRule(self, grammarFileName, ruleName): - """The parser has just entered a rule. No decision has been made about - which alt is predicted. This is fired AFTER init actions have been - executed. Attributes are defined and available etc... - The grammarFileName allows composite grammars to jump around among - multiple grammar files. - """ - - pass - - - def enterAlt(self, alt): - """Because rules can have lots of alternatives, it is very useful to - know which alt you are entering. This is 1..n for n alts. - """ - pass - - - def exitRule(self, grammarFileName, ruleName): - """This is the last thing executed before leaving a rule. It is - executed even if an exception is thrown. This is triggered after - error reporting and recovery have occurred (unless the exception is - not caught in this rule). This implies an "exitAlt" event. - The grammarFileName allows composite grammars to jump around among - multiple grammar files. - """ - pass - - - def enterSubRule(self, decisionNumber): - """Track entry into any (...) subrule other EBNF construct""" - pass - - - def exitSubRule(self, decisionNumber): - pass - - - def enterDecision(self, decisionNumber, couldBacktrack): - """Every decision, fixed k or arbitrary, has an enter/exit event - so that a GUI can easily track what LT/consume events are - associated with prediction. You will see a single enter/exit - subrule but multiple enter/exit decision events, one for each - loop iteration. - """ - pass - - - def exitDecision(self, decisionNumber): - pass - - - def consumeToken(self, t): - """An input token was consumed; matched by any kind of element. - Trigger after the token was matched by things like match(), matchAny(). - """ - pass - - - def consumeHiddenToken(self, t): - """An off-channel input token was consumed. - Trigger after the token was matched by things like match(), matchAny(). - (unless of course the hidden token is first stuff in the input stream). - """ - pass - - - def LT(self, i, t): - """Somebody (anybody) looked ahead. Note that this actually gets - triggered by both LA and LT calls. The debugger will want to know - which Token object was examined. Like consumeToken, this indicates - what token was seen at that depth. A remote debugger cannot look - ahead into a file it doesn't have so LT events must pass the token - even if the info is redundant. - For tree parsers, if the type is UP or DOWN, - then the ID is not really meaningful as it's fixed--there is - just one UP node and one DOWN navigation node. - """ - pass - - - def mark(self, marker): - """The parser is going to look arbitrarily ahead; mark this location, - the token stream's marker is sent in case you need it. - """ - pass - - - def rewind(self, marker=None): - """After an arbitrairly long lookahead as with a cyclic DFA (or with - any backtrack), this informs the debugger that stream should be - rewound to the position associated with marker. - - """ - pass - - - def beginBacktrack(self, level): - pass - - - def endBacktrack(self, level, successful): - pass - - - def location(self, line, pos): - """To watch a parser move through the grammar, the parser needs to - inform the debugger what line/charPos it is passing in the grammar. - For now, this does not know how to switch from one grammar to the - other and back for island grammars etc... - - This should also allow breakpoints because the debugger can stop - the parser whenever it hits this line/pos. - """ - pass - - - def recognitionException(self, e): - """A recognition exception occurred such as NoViableAltException. I made - this a generic event so that I can alter the exception hierachy later - without having to alter all the debug objects. - - Upon error, the stack of enter rule/subrule must be properly unwound. - If no viable alt occurs it is within an enter/exit decision, which - also must be rewound. Even the rewind for each mark must be unwount. - In the Java target this is pretty easy using try/finally, if a bit - ugly in the generated code. The rewind is generated in DFA.predict() - actually so no code needs to be generated for that. For languages - w/o this "finally" feature (C++?), the target implementor will have - to build an event stack or something. - - Across a socket for remote debugging, only the RecognitionException - data fields are transmitted. The token object or whatever that - caused the problem was the last object referenced by LT. The - immediately preceding LT event should hold the unexpected Token or - char. - - Here is a sample event trace for grammar: - - b : C ({;}A|B) // {;} is there to prevent A|B becoming a set - | D - ; - - The sequence for this rule (with no viable alt in the subrule) for - input 'c c' (there are 3 tokens) is: - - commence - LT(1) - enterRule b - location 7 1 - enter decision 3 - LT(1) - exit decision 3 - enterAlt1 - location 7 5 - LT(1) - consumeToken [c/<4>,1:0] - location 7 7 - enterSubRule 2 - enter decision 2 - LT(1) - LT(1) - recognitionException NoViableAltException 2 1 2 - exit decision 2 - exitSubRule 2 - beginResync - LT(1) - consumeToken [c/<4>,1:1] - LT(1) - endResync - LT(-1) - exitRule b - terminate - """ - pass - - - def beginResync(self): - """Indicates the recognizer is about to consume tokens to resynchronize - the parser. Any consume events from here until the recovered event - are not part of the parse--they are dead tokens. - """ - pass - - - def endResync(self): - """Indicates that the recognizer has finished consuming tokens in order - to resychronize. There may be multiple beginResync/endResync pairs - before the recognizer comes out of errorRecovery mode (in which - multiple errors are suppressed). This will be useful - in a gui where you want to probably grey out tokens that are consumed - but not matched to anything in grammar. Anything between - a beginResync/endResync pair was tossed out by the parser. - """ - pass - - - def semanticPredicate(self, result, predicate): - """A semantic predicate was evaluate with this result and action text""" - pass - - - def commence(self): - """Announce that parsing has begun. Not technically useful except for - sending events over a socket. A GUI for example will launch a thread - to connect and communicate with a remote parser. The thread will want - to notify the GUI when a connection is made. ANTLR parsers - trigger this upon entry to the first rule (the ruleLevel is used to - figure this out). - """ - pass - - - def terminate(self): - """Parsing is over; successfully or not. Mostly useful for telling - remote debugging listeners that it's time to quit. When the rule - invocation level goes to zero at the end of a rule, we are done - parsing. - """ - pass - - - ## T r e e P a r s i n g - - def consumeNode(self, t): - """Input for a tree parser is an AST, but we know nothing for sure - about a node except its type and text (obtained from the adaptor). - This is the analog of the consumeToken method. Again, the ID is - the hashCode usually of the node so it only works if hashCode is - not implemented. If the type is UP or DOWN, then - the ID is not really meaningful as it's fixed--there is - just one UP node and one DOWN navigation node. - """ - pass - - - ## A S T E v e n t s - - def nilNode(self, t): - """A nil was created (even nil nodes have a unique ID... - they are not "null" per se). As of 4/28/2006, this - seems to be uniquely triggered when starting a new subtree - such as when entering a subrule in automatic mode and when - building a tree in rewrite mode. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID is set. - """ - pass - - - def errorNode(self, t): - """Upon syntax error, recognizers bracket the error with an error node - if they are building ASTs. - """ - pass - - - def createNode(self, node, token=None): - """Announce a new node built from token elements such as type etc... - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID, type, text are - set. - """ - pass - - - def becomeRoot(self, newRoot, oldRoot): - """Make a node the new root of an existing root. - - Note: the newRootID parameter is possibly different - than the TreeAdaptor.becomeRoot() newRoot parameter. - In our case, it will always be the result of calling - TreeAdaptor.becomeRoot() and not root_n or whatever. - - The listener should assume that this event occurs - only when the current subrule (or rule) subtree is - being reset to newRootID. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only IDs are set. - - @see antlr3.tree.TreeAdaptor.becomeRoot() - """ - pass - - - def addChild(self, root, child): - """Make childID a child of rootID. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only IDs are set. - - @see antlr3.tree.TreeAdaptor.addChild() - """ - pass - - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - """Set the token start/stop token index for a subtree root or node. - - If you are receiving this event over a socket via - RemoteDebugEventSocketListener then only t.ID is set. - """ - pass - - -class BlankDebugEventListener(DebugEventListener): - """A blank listener that does nothing; useful for real classes so - they don't have to have lots of blank methods and are less - sensitive to updates to debug interface. - - Note: this class is identical to DebugEventListener and exists purely - for compatibility with Java. - """ - pass - - -class TraceDebugEventListener(DebugEventListener): - """A listener that simply records text representations of the events. - - Useful for debugging the debugging facility ;) - - Subclasses can override the record() method (which defaults to printing to - stdout) to record the events in a different way. - """ - - def __init__(self, adaptor=None): - super().__init__() - - if adaptor is None: - adaptor = CommonTreeAdaptor() - self.adaptor = adaptor - - def record(self, event): - sys.stdout.write(event + '\n') - - def enterRule(self, grammarFileName, ruleName): - self.record("enterRule " + ruleName) - - def exitRule(self, grammarFileName, ruleName): - self.record("exitRule " + ruleName) - - def enterSubRule(self, decisionNumber): - self.record("enterSubRule") - - def exitSubRule(self, decisionNumber): - self.record("exitSubRule") - - def location(self, line, pos): - self.record("location {}:{}".format(line, pos)) - - ## Tree parsing stuff - - def consumeNode(self, t): - self.record("consumeNode {} {} {}".format( - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - def LT(self, i, t): - self.record("LT {} {} {} {}".format( - i, - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - - ## AST stuff - def nilNode(self, t): - self.record("nilNode {}".format(self.adaptor.getUniqueID(t))) - - def createNode(self, t, token=None): - if token is None: - self.record("create {}: {}, {}".format( - self.adaptor.getUniqueID(t), - self.adaptor.getText(t), - self.adaptor.getType(t))) - - else: - self.record("create {}: {}".format( - self.adaptor.getUniqueID(t), - token.index)) - - def becomeRoot(self, newRoot, oldRoot): - self.record("becomeRoot {}, {}".format( - self.adaptor.getUniqueID(newRoot), - self.adaptor.getUniqueID(oldRoot))) - - def addChild(self, root, child): - self.record("addChild {}, {}".format( - self.adaptor.getUniqueID(root), - self.adaptor.getUniqueID(child))) - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - self.record("setTokenBoundaries {}, {}, {}".format( - self.adaptor.getUniqueID(t), - tokenStartIndex, tokenStopIndex)) - - -class RecordDebugEventListener(TraceDebugEventListener): - """A listener that records events as strings in an array.""" - - def __init__(self, adaptor=None): - super().__init__(adaptor) - - self.events = [] - - def record(self, event): - self.events.append(event) - - -class DebugEventSocketProxy(DebugEventListener): - """A proxy debug event listener that forwards events over a socket to - a debugger (or any other listener) using a simple text-based protocol; - one event per line. ANTLRWorks listens on server socket with a - RemoteDebugEventSocketListener instance. These two objects must therefore - be kept in sync. New events must be handled on both sides of socket. - """ - - DEFAULT_DEBUGGER_PORT = 49100 - - def __init__(self, recognizer, adaptor=None, port=None, debug=None): - super().__init__() - - self.grammarFileName = recognizer.getGrammarFileName() - - # Almost certainly the recognizer will have adaptor set, but - # we don't know how to cast it (Parser or TreeParser) to get - # the adaptor field. Must be set with a constructor. :( - self.adaptor = adaptor - - self.port = port or self.DEFAULT_DEBUGGER_PORT - - self.debug = debug - - self.socket = None - self.connection = None - self.input = None - self.output = None - - - def log(self, msg): - if self.debug: - self.debug.write(msg + '\n') - - - def handshake(self): - if self.socket is None: - # create listening socket - self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.socket.bind(('', self.port)) - self.socket.listen(1) - self.log("Waiting for incoming connection on port {}".format(self.port)) - - # wait for an incoming connection - self.connection, addr = self.socket.accept() - self.log("Accepted connection from {}:{}".format(addr[0], addr[1])) - - self.connection.setblocking(1) - self.connection.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) - - self.output = self.connection.makefile('w', 1) - self.input = self.connection.makefile('r', 1) - - self.write("ANTLR {}".format(self.PROTOCOL_VERSION)) - self.write('grammar "{}"'.format(self.grammarFileName)) - self.ack() - - - def write(self, msg): - self.log("> {}".format(msg)) - self.output.write("{}\n".format(msg)) - self.output.flush() - - - def ack(self): - t = self.input.readline() - self.log("< {}".format(t.rstrip())) - - - def transmit(self, event): - self.write(event) - self.ack() - - - def commence(self): - # don't bother sending event; listener will trigger upon connection - pass - - - def terminate(self): - self.transmit("terminate") - self.output.close() - self.input.close() - self.connection.close() - self.socket.close() - - - def enterRule(self, grammarFileName, ruleName): - self.transmit("enterRule\t{}\t{}".format(grammarFileName, ruleName)) - - - def enterAlt(self, alt): - self.transmit("enterAlt\t{}".format(alt)) - - - def exitRule(self, grammarFileName, ruleName): - self.transmit("exitRule\t{}\t{}".format(grammarFileName, ruleName)) - - - def enterSubRule(self, decisionNumber): - self.transmit("enterSubRule\t{}".format(decisionNumber)) - - - def exitSubRule(self, decisionNumber): - self.transmit("exitSubRule\t{}".format(decisionNumber)) - - - def enterDecision(self, decisionNumber, couldBacktrack): - self.transmit( - "enterDecision\t{}\t{:d}".format(decisionNumber, couldBacktrack)) - - - def exitDecision(self, decisionNumber): - self.transmit("exitDecision\t{}".format(decisionNumber)) - - - def consumeToken(self, t): - self.transmit("consumeToken\t{}".format(self.serializeToken(t))) - - - def consumeHiddenToken(self, t): - self.transmit("consumeHiddenToken\t{}".format(self.serializeToken(t))) - - - def LT(self, i, o): - if isinstance(o, Tree): - return self.LT_tree(i, o) - return self.LT_token(i, o) - - - def LT_token(self, i, t): - if t is not None: - self.transmit("LT\t{}\t{}".format(i, self.serializeToken(t))) - - - def mark(self, i): - self.transmit("mark\t{}".format(i)) - - - def rewind(self, i=None): - if i is not None: - self.transmit("rewind\t{}".format(i)) - else: - self.transmit("rewind") - - - def beginBacktrack(self, level): - self.transmit("beginBacktrack\t{}".format(level)) - - - def endBacktrack(self, level, successful): - self.transmit("endBacktrack\t{}\t{}".format( - level, '1' if successful else '0')) - - - def location(self, line, pos): - self.transmit("location\t{}\t{}".format(line, pos)) - - - def recognitionException(self, exc): - self.transmit('\t'.join([ - "exception", - exc.__class__.__name__, - str(int(exc.index)), - str(int(exc.line)), - str(int(exc.charPositionInLine))])) - - - def beginResync(self): - self.transmit("beginResync") - - - def endResync(self): - self.transmit("endResync") - - - def semanticPredicate(self, result, predicate): - self.transmit('\t'.join([ - "semanticPredicate", - str(int(result)), - self.escapeNewlines(predicate)])) - - ## A S T P a r s i n g E v e n t s - - def consumeNode(self, t): - FIXME(31) -# StringBuffer buf = new StringBuffer(50); -# buf.append("consumeNode"); -# serializeNode(buf, t); -# transmit(buf.toString()); - - - def LT_tree(self, i, t): - FIXME(34) -# int ID = adaptor.getUniqueID(t); -# String text = adaptor.getText(t); -# int type = adaptor.getType(t); -# StringBuffer buf = new StringBuffer(50); -# buf.append("LN\t"); // lookahead node; distinguish from LT in protocol -# buf.append(i); -# serializeNode(buf, t); -# transmit(buf.toString()); - - - def serializeNode(self, buf, t): - FIXME(33) -# int ID = adaptor.getUniqueID(t); -# String text = adaptor.getText(t); -# int type = adaptor.getType(t); -# buf.append("\t"); -# buf.append(ID); -# buf.append("\t"); -# buf.append(type); -# Token token = adaptor.getToken(t); -# int line = -1; -# int pos = -1; -# if ( token!=null ) { -# line = token.getLine(); -# pos = token.getCharPositionInLine(); -# } -# buf.append("\t"); -# buf.append(line); -# buf.append("\t"); -# buf.append(pos); -# int tokenIndex = adaptor.getTokenStartIndex(t); -# buf.append("\t"); -# buf.append(tokenIndex); -# serializeText(buf, text); - - - ## A S T E v e n t s - - def nilNode(self, t): - self.transmit("nilNode\t{}".format(self.adaptor.getUniqueID(t))) - - - def errorNode(self, t): - self.transmit('errorNode\t{}\t{}\t"{}'.format( - self.adaptor.getUniqueID(t), - INVALID_TOKEN_TYPE, - self.escapeNewlines(t.toString()))) - - - def createNode(self, node, token=None): - if token is not None: - self.transmit("createNode\t{}\t{}".format( - self.adaptor.getUniqueID(node), - token.index)) - - else: - self.transmit('createNodeFromTokenElements\t{}\t{}\t"{}'.format( - self.adaptor.getUniqueID(node), - self.adaptor.getType(node), - self.adaptor.getText(node))) - - - def becomeRoot(self, newRoot, oldRoot): - self.transmit("becomeRoot\t{}\t{}".format( - self.adaptor.getUniqueID(newRoot), - self.adaptor.getUniqueID(oldRoot))) - - - def addChild(self, root, child): - self.transmit("addChild\t{}\t{}".format( - self.adaptor.getUniqueID(root), - self.adaptor.getUniqueID(child))) - - - def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): - self.transmit("setTokenBoundaries\t{}\t{}\t{}".format( - self.adaptor.getUniqueID(t), - tokenStartIndex, tokenStopIndex)) - - - - ## support - - def setTreeAdaptor(self, adaptor): - self.adaptor = adaptor - - def getTreeAdaptor(self): - return self.adaptor - - - def serializeToken(self, t): - buf = [str(int(t.index)), - str(int(t.type)), - str(int(t.channel)), - str(int(t.line or 0)), - str(int(t.charPositionInLine or 0)), - '"' + self.escapeNewlines(t.text)] - return '\t'.join(buf) - - - def escapeNewlines(self, txt): - if txt is None: - return '' - - txt = txt.replace("%","%25") # escape all escape char ;) - txt = txt.replace("\n","%0A") # escape \n - txt = txt.replace("\r","%0D") # escape \r - return txt diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py deleted file mode 100644 index 10538624..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py +++ /dev/null @@ -1,195 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from .constants import EOF -from .exceptions import NoViableAltException, BacktrackingFailed - - -class DFA(object): - """@brief A DFA implemented as a set of transition tables. - - Any state that has a semantic predicate edge is special; those states - are generated with if-then-else structures in a specialStateTransition() - which is generated by cyclicDFA template. - - """ - - def __init__( - self, - recognizer, decisionNumber, - eot, eof, min, max, accept, special, transition - ): - ## Which recognizer encloses this DFA? Needed to check backtracking - self.recognizer = recognizer - - self.decisionNumber = decisionNumber - self.eot = eot - self.eof = eof - self.min = min - self.max = max - self.accept = accept - self.special = special - self.transition = transition - - - def predict(self, input): - """ - From the input stream, predict what alternative will succeed - using this DFA (representing the covering regular approximation - to the underlying CFL). Return an alternative number 1..n. Throw - an exception upon error. - """ - mark = input.mark() - s = 0 # we always start at s0 - try: - for _ in range(50000): - specialState = self.special[s] - if specialState >= 0: - s = self.specialStateTransition(specialState, input) - if s == -1: - self.noViableAlt(s, input) - return 0 - input.consume() - continue - - if self.accept[s] >= 1: - return self.accept[s] - - # look for a normal char transition - c = input.LA(1) - - if c >= self.min[s] and c <= self.max[s]: - # move to next state - snext = self.transition[s][c-self.min[s]] - - if snext < 0: - # was in range but not a normal transition - # must check EOT, which is like the else clause. - # eot[s]>=0 indicates that an EOT edge goes to another - # state. - if self.eot[s] >= 0: # EOT Transition to accept state? - s = self.eot[s] - input.consume() - # TODO: I had this as return accept[eot[s]] - # which assumed here that the EOT edge always - # went to an accept...faster to do this, but - # what about predicated edges coming from EOT - # target? - continue - - self.noViableAlt(s, input) - return 0 - - s = snext - input.consume() - continue - - if self.eot[s] >= 0: - s = self.eot[s] - input.consume() - continue - - # EOF Transition to accept state? - if c == EOF and self.eof[s] >= 0: - return self.accept[self.eof[s]] - - # not in range and not EOF/EOT, must be invalid symbol - self.noViableAlt(s, input) - return 0 - - else: - raise RuntimeError("DFA bang!") - - finally: - input.rewind(mark) - - - def noViableAlt(self, s, input): - if self.recognizer._state.backtracking > 0: - raise BacktrackingFailed - - nvae = NoViableAltException( - self.getDescription(), - self.decisionNumber, - s, - input - ) - - self.error(nvae) - raise nvae - - - def error(self, nvae): - """A hook for debugging interface""" - pass - - - def specialStateTransition(self, s, input): - return -1 - - - def getDescription(self): - return "n/a" - - -## def specialTransition(self, state, symbol): -## return 0 - - - @classmethod - def unpack(cls, string): - """@brief Unpack the runlength encoded table data. - - Terence implemented packed table initializers, because Java has a - size restriction on .class files and the lookup tables can grow - pretty large. The generated JavaLexer.java of the Java.g example - would be about 15MB with uncompressed array initializers. - - Python does not have any size restrictions, but the compilation of - such large source files seems to be pretty memory hungry. The memory - consumption of the python process grew to >1.5GB when importing a - 15MB lexer, eating all my swap space and I was to impatient to see, - if it could finish at all. With packed initializers that are unpacked - at import time of the lexer module, everything works like a charm. - - """ - - ret = [] - for i in range(0, len(string) - 1, 2): - (n, v) = ord(string[i]), ord(string[i + 1]) - - if v == 0xFFFF: - v = -1 - - ret += [v] * n - - return ret diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/exceptions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/exceptions.py deleted file mode 100644 index 78ea4419..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/exceptions.py +++ /dev/null @@ -1,364 +0,0 @@ -"""ANTLR3 exception hierarchy""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from .constants import INVALID_TOKEN_TYPE - - -class BacktrackingFailed(Exception): - """@brief Raised to signal failed backtrack attempt""" - - pass - - -class RecognitionException(Exception): - """@brief The root of the ANTLR exception hierarchy. - - To avoid English-only error messages and to generally make things - as flexible as possible, these exceptions are not created with strings, - but rather the information necessary to generate an error. Then - the various reporting methods in Parser and Lexer can be overridden - to generate a localized error message. For example, MismatchedToken - exceptions are built with the expected token type. - So, don't expect getMessage() to return anything. - - Note that as of Java 1.4, you can access the stack trace, which means - that you can compute the complete trace of rules from the start symbol. - This gives you considerable context information with which to generate - useful error messages. - - ANTLR generates code that throws exceptions upon recognition error and - also generates code to catch these exceptions in each rule. If you - want to quit upon first error, you can turn off the automatic error - handling mechanism using rulecatch action, but you still need to - override methods mismatch and recoverFromMismatchSet. - - In general, the recognition exceptions can track where in a grammar a - problem occurred and/or what was the expected input. While the parser - knows its state (such as current input symbol and line info) that - state can change before the exception is reported so current token index - is computed and stored at exception time. From this info, you can - perhaps print an entire line of input not just a single token, for example. - Better to just say the recognizer had a problem and then let the parser - figure out a fancy report. - - """ - - def __init__(self, input=None): - super().__init__() - - # What input stream did the error occur in? - self.input = None - - # What is index of token/char were we looking at when the error - # occurred? - self.index = None - - # The current Token when an error occurred. Since not all streams - # can retrieve the ith Token, we have to track the Token object. - # For parsers. Even when it's a tree parser, token might be set. - self.token = None - - # If this is a tree parser exception, node is set to the node with - # the problem. - self.node = None - - # The current char when an error occurred. For lexers. - self.c = None - - # Track the line at which the error occurred in case this is - # generated from a lexer. We need to track this since the - # unexpected char doesn't carry the line info. - self.line = None - - self.charPositionInLine = None - - # If you are parsing a tree node stream, you will encounter som - # imaginary nodes w/o line/col info. We now search backwards looking - # for most recent token with line/col info, but notify getErrorHeader() - # that info is approximate. - self.approximateLineInfo = False - - - if input: - self.input = input - self.index = input.index() - - # late import to avoid cyclic dependencies - from .streams import TokenStream, CharStream - from .tree import TreeNodeStream - - if isinstance(self.input, TokenStream): - self.token = self.input.LT(1) - self.line = self.token.line - self.charPositionInLine = self.token.charPositionInLine - - if isinstance(self.input, TreeNodeStream): - self.extractInformationFromTreeNodeStream(self.input) - - else: - if isinstance(self.input, CharStream): - self.c = self.input.LT(1) - self.line = self.input.line - self.charPositionInLine = self.input.charPositionInLine - - else: - self.c = self.input.LA(1) - - def extractInformationFromTreeNodeStream(self, nodes): - from .tree import Tree, CommonTree - from .tokens import CommonToken - - self.node = nodes.LT(1) - adaptor = nodes.adaptor - payload = adaptor.getToken(self.node) - if payload: - self.token = payload - if payload.line <= 0: - # imaginary node; no line/pos info; scan backwards - i = -1 - priorNode = nodes.LT(i) - while priorNode: - priorPayload = adaptor.getToken(priorNode) - if priorPayload and priorPayload.line > 0: - # we found the most recent real line / pos info - self.line = priorPayload.line - self.charPositionInLine = priorPayload.charPositionInLine - self.approximateLineInfo = True - break - - i -= 1 - priorNode = nodes.LT(i) - - else: # node created from real token - self.line = payload.line - self.charPositionInLine = payload.charPositionInLine - - elif isinstance(self.node, Tree): - self.line = self.node.line - self.charPositionInLine = self.node.charPositionInLine - if isinstance(self.node, CommonTree): - self.token = self.node.token - - else: - type = adaptor.getType(self.node) - text = adaptor.getText(self.node) - self.token = CommonToken(type=type, text=text) - - - def getUnexpectedType(self): - """Return the token type or char of the unexpected input element""" - - from .streams import TokenStream - from .tree import TreeNodeStream - - if isinstance(self.input, TokenStream): - return self.token.type - - elif isinstance(self.input, TreeNodeStream): - adaptor = self.input.treeAdaptor - return adaptor.getType(self.node) - - else: - return self.c - - unexpectedType = property(getUnexpectedType) - - -class MismatchedTokenException(RecognitionException): - """@brief A mismatched char or Token or tree node.""" - - def __init__(self, expecting, input): - super().__init__(input) - self.expecting = expecting - - - def __str__(self): - return "MismatchedTokenException({!r}!={!r})".format( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class UnwantedTokenException(MismatchedTokenException): - """An extra token while parsing a TokenStream""" - - def getUnexpectedToken(self): - return self.token - - - def __str__(self): - exp = ", expected {}".format(self.expecting) - if self.expecting == INVALID_TOKEN_TYPE: - exp = "" - - if not self.token: - return "UnwantedTokenException(found={}{})".format(None, exp) - - return "UnwantedTokenException(found={}{})".format(self.token.text, exp) - __repr__ = __str__ - - -class MissingTokenException(MismatchedTokenException): - """ - We were expecting a token but it's not found. The current token - is actually what we wanted next. - """ - - def __init__(self, expecting, input, inserted): - super().__init__(expecting, input) - - self.inserted = inserted - - - def getMissingType(self): - return self.expecting - - - def __str__(self): - if self.token: - if self.inserted: - return "MissingTokenException(inserted {!r} at {!r})".format( - self.inserted, self.token.text) - - return "MissingTokenException(at {!r})".format(self.token.text) - - return "MissingTokenException" - __repr__ = __str__ - - -class MismatchedRangeException(RecognitionException): - """@brief The next token does not match a range of expected types.""" - - def __init__(self, a, b, input): - super().__init__(input) - - self.a = a - self.b = b - - - def __str__(self): - return "MismatchedRangeException({!r} not in [{!r}..{!r}])".format( - self.getUnexpectedType(), self.a, self.b - ) - __repr__ = __str__ - - -class MismatchedSetException(RecognitionException): - """@brief The next token does not match a set of expected types.""" - - def __init__(self, expecting, input): - super().__init__(input) - - self.expecting = expecting - - - def __str__(self): - return "MismatchedSetException({!r} not in {!r})".format( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class MismatchedNotSetException(MismatchedSetException): - """@brief Used for remote debugger deserialization""" - - def __str__(self): - return "MismatchedNotSetException({!r}!={!r})".format( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ - - -class NoViableAltException(RecognitionException): - """@brief Unable to decide which alternative to choose.""" - - def __init__( - self, grammarDecisionDescription, decisionNumber, stateNumber, input - ): - super().__init__(input) - - self.grammarDecisionDescription = grammarDecisionDescription - self.decisionNumber = decisionNumber - self.stateNumber = stateNumber - - - def __str__(self): - return "NoViableAltException({!r}!=[{!r}])".format( - self.unexpectedType, self.grammarDecisionDescription - ) - __repr__ = __str__ - - -class EarlyExitException(RecognitionException): - """@brief The recognizer did not match anything for a (..)+ loop.""" - - def __init__(self, decisionNumber, input): - super().__init__(input) - - self.decisionNumber = decisionNumber - - -class FailedPredicateException(RecognitionException): - """@brief A semantic predicate failed during validation. - - Validation of predicates - occurs when normally parsing the alternative just like matching a token. - Disambiguating predicate evaluation occurs when we hoist a predicate into - a prediction decision. - """ - - def __init__(self, input, ruleName, predicateText): - super().__init__(input) - - self.ruleName = ruleName - self.predicateText = predicateText - - - def __str__(self): - return "FailedPredicateException({},{{{}}}?)".format( - self.ruleName, self.predicateText) - __repr__ = __str__ - - -class MismatchedTreeNodeException(RecognitionException): - """@brief The next tree mode does not match the expected type.""" - - def __init__(self, expecting, input): - super().__init__(input) - - self.expecting = expecting - - def __str__(self): - return "MismatchedTreeNodeException({!r}!={!r})".format( - self.getUnexpectedType(), self.expecting - ) - __repr__ = __str__ diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/main.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/main.py deleted file mode 100644 index 739726b1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/main.py +++ /dev/null @@ -1,234 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - - -import sys -import argparse - -from .streams import ANTLRStringStream, ANTLRFileStream, \ - ANTLRInputStream, CommonTokenStream -from .tree import CommonTreeNodeStream - - -class _Main(object): - def __init__(self): - self.stdin = sys.stdin - self.stdout = sys.stdout - self.stderr = sys.stderr - - - def parseArgs(self, argv): - argParser = argparse.ArgumentParser() - argParser.add_argument("--input") - argParser.add_argument("--interactive", "-i", action="store_true") - argParser.add_argument("--no-output", action="store_true") - argParser.add_argument("--profile", action="store_true") - argParser.add_argument("--hotshot", action="store_true") - argParser.add_argument("--port", type=int) - argParser.add_argument("--debug-socket", action='store_true') - argParser.add_argument("file", nargs='?') - - self.setupArgs(argParser) - - return argParser.parse_args(argv[1:]) - - - def setupArgs(self, argParser): - pass - - - def execute(self, argv): - args = self.parseArgs(argv) - - self.setUp(args) - - if args.interactive: - while True: - try: - input_str = input(">>> ") - except (EOFError, KeyboardInterrupt): - self.stdout.write("\nBye.\n") - break - - inStream = ANTLRStringStream(input_str) - self.parseStream(args, inStream) - - else: - if args.input: - inStream = ANTLRStringStream(args.input) - - elif args.file and args.file != '-': - inStream = ANTLRFileStream(args.file) - - else: - inStream = ANTLRInputStream(self.stdin) - - if args.profile: - try: - import cProfile as profile - except ImportError: - import profile - - profile.runctx( - 'self.parseStream(args, inStream)', - globals(), - locals(), - 'profile.dat' - ) - - import pstats - stats = pstats.Stats('profile.dat') - stats.strip_dirs() - stats.sort_stats('time') - stats.print_stats(100) - - elif args.hotshot: - import hotshot - - profiler = hotshot.Profile('hotshot.dat') - profiler.runctx( - 'self.parseStream(args, inStream)', - globals(), - locals() - ) - - else: - self.parseStream(args, inStream) - - - def setUp(self, args): - pass - - - def parseStream(self, args, inStream): - raise NotImplementedError - - - def write(self, args, text): - if not args.no_output: - self.stdout.write(text) - - - def writeln(self, args, text): - self.write(args, text + '\n') - - -class LexerMain(_Main): - def __init__(self, lexerClass): - super().__init__() - - self.lexerClass = lexerClass - - - def parseStream(self, args, inStream): - lexer = self.lexerClass(inStream) - for token in lexer: - self.writeln(args, str(token)) - - -class ParserMain(_Main): - def __init__(self, lexerClassName, parserClass): - super().__init__() - - self.lexerClassName = lexerClassName - self.lexerClass = None - self.parserClass = parserClass - - - def setupArgs(self, argParser): - argParser.add_argument("--lexer", dest="lexerClass", - default=self.lexerClassName) - argParser.add_argument("--rule", dest="parserRule") - - - def setUp(self, args): - lexerMod = __import__(args.lexerClass) - self.lexerClass = getattr(lexerMod, args.lexerClass) - - - def parseStream(self, args, inStream): - kwargs = {} - if args.port is not None: - kwargs['port'] = args.port - if args.debug_socket: - kwargs['debug_socket'] = sys.stderr - - lexer = self.lexerClass(inStream) - tokenStream = CommonTokenStream(lexer) - parser = self.parserClass(tokenStream, **kwargs) - result = getattr(parser, args.parserRule)() - if result: - if hasattr(result, 'tree') and result.tree: - self.writeln(args, result.tree.toStringTree()) - else: - self.writeln(args, repr(result)) - - -class WalkerMain(_Main): - def __init__(self, walkerClass): - super().__init__() - - self.lexerClass = None - self.parserClass = None - self.walkerClass = walkerClass - - - def setupArgs(self, argParser): - argParser.add_argument("--lexer", dest="lexerClass") - argParser.add_argument("--parser", dest="parserClass") - argParser.add_argument("--parser-rule", dest="parserRule") - argParser.add_argument("--rule", dest="walkerRule") - - - def setUp(self, args): - lexerMod = __import__(args.lexerClass) - self.lexerClass = getattr(lexerMod, args.lexerClass) - parserMod = __import__(args.parserClass) - self.parserClass = getattr(parserMod, args.parserClass) - - - def parseStream(self, args, inStream): - lexer = self.lexerClass(inStream) - tokenStream = CommonTokenStream(lexer) - parser = self.parserClass(tokenStream) - result = getattr(parser, args.parserRule)() - if result: - assert hasattr(result, 'tree'), "Parser did not return an AST" - nodeStream = CommonTreeNodeStream(result.tree) - nodeStream.setTokenStream(tokenStream) - walker = self.walkerClass(nodeStream) - result = getattr(walker, args.walkerRule)() - if result: - if hasattr(result, 'tree'): - self.writeln(args, result.tree.toStringTree()) - else: - self.writeln(args, repr(result)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py deleted file mode 100644 index 40deddac..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py +++ /dev/null @@ -1,1467 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -import sys -import inspect - -from .constants import compatible_api_versions, DEFAULT_CHANNEL, \ - HIDDEN_CHANNEL, EOF, EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE -from .exceptions import RecognitionException, MismatchedTokenException, \ - MismatchedRangeException, MismatchedTreeNodeException, \ - NoViableAltException, EarlyExitException, MismatchedSetException, \ - MismatchedNotSetException, FailedPredicateException, \ - BacktrackingFailed, UnwantedTokenException, MissingTokenException -from .tokens import CommonToken, SKIP_TOKEN - - -class RecognizerSharedState(object): - """ - The set of fields needed by an abstract recognizer to recognize input - and recover from errors etc... As a separate state object, it can be - shared among multiple grammars; e.g., when one grammar imports another. - - These fields are publically visible but the actual state pointer per - parser is protected. - """ - - def __init__(self): - # Track the set of token types that can follow any rule invocation. - # Stack grows upwards. - self.following = [] - - # This is true when we see an error and before having successfully - # matched a token. Prevents generation of more than one error message - # per error. - self.errorRecovery = False - - # The index into the input stream where the last error occurred. - # This is used to prevent infinite loops where an error is found - # but no token is consumed during recovery...another error is found, - # ad naseum. This is a failsafe mechanism to guarantee that at least - # one token/tree node is consumed for two errors. - self.lastErrorIndex = -1 - - # If 0, no backtracking is going on. Safe to exec actions etc... - # If >0 then it's the level of backtracking. - self.backtracking = 0 - - # An array[size num rules] of (int -> int) dicts that tracks - # the stop token index for each rule. ruleMemo[ruleIndex] is - # the memoization table for ruleIndex. For key ruleStartIndex, you - # get back the stop token for associated rule or MEMO_RULE_FAILED. - # - # This is only used if rule memoization is on (which it is by default). - self.ruleMemo = None - - ## Did the recognizer encounter a syntax error? Track how many. - self.syntaxErrors = 0 - - - # LEXER FIELDS (must be in same state object to avoid casting - # constantly in generated code and Lexer object) :( - - - ## The goal of all lexer rules/methods is to create a token object. - # This is an instance variable as multiple rules may collaborate to - # create a single token. nextToken will return this object after - # matching lexer rule(s). If you subclass to allow multiple token - # emissions, then set this to the last token to be matched or - # something nonnull so that the auto token emit mechanism will not - # emit another token. - self.token = None - - ## What character index in the stream did the current token start at? - # Needed, for example, to get the text for current token. Set at - # the start of nextToken. - self.tokenStartCharIndex = -1 - - ## The line on which the first character of the token resides - self.tokenStartLine = None - - ## The character position of first character within the line - self.tokenStartCharPositionInLine = None - - ## The channel number for the current token - self.channel = None - - ## The token type for the current token - self.type = None - - ## You can set the text for the current token to override what is in - # the input char buffer. Use setText() or can set this instance var. - self.text = None - - -class BaseRecognizer(object): - """ - @brief Common recognizer functionality. - - A generic recognizer that can handle recognizers generated from - lexer, parser, and tree grammars. This is all the parsing - support code essentially; most of it is error recovery stuff and - backtracking. - """ - - MEMO_RULE_FAILED = -2 - MEMO_RULE_UNKNOWN = -1 - - # copies from Token object for convenience in actions - DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL - - # for convenience in actions - HIDDEN = HIDDEN_CHANNEL - - # overridden by generated subclasses - grammarFileName = None - tokenNames = None - - # The api_version attribute has been introduced in 3.3. If it is not - # overwritten in the generated recognizer, we assume a default of v0. - api_version = 0 - - def __init__(self, state=None): - # Input stream of the recognizer. Must be initialized by a subclass. - self.input = None - - ## State of a lexer, parser, or tree parser are collected into a state - # object so the state can be shared. This sharing is needed to - # have one grammar import others and share same error variables - # and other state variables. It's a kind of explicit multiple - # inheritance via delegation of methods and shared state. - if state is None: - state = RecognizerSharedState() - self._state = state - - if self.api_version not in compatible_api_versions: - raise RuntimeError( - "ANTLR version mismatch: " - "The recognizer has been generated with API V{}, " - "but this runtime does not support this." - .format(self.api_version)) - - # this one only exists to shut up pylint :( - def setInput(self, input): - self.input = input - - - def reset(self): - """ - reset the parser's state; subclasses must rewind the input stream - """ - - # wack everything related to error recovery - if self._state is None: - # no shared state work to do - return - - self._state.following = [] - self._state.errorRecovery = False - self._state.lastErrorIndex = -1 - self._state.syntaxErrors = 0 - # wack everything related to backtracking and memoization - self._state.backtracking = 0 - if self._state.ruleMemo is not None: - self._state.ruleMemo = {} - - - def match(self, input, ttype, follow): - """ - Match current input symbol against ttype. Attempt - single token insertion or deletion error recovery. If - that fails, throw MismatchedTokenException. - - To turn off single token insertion or deletion error - recovery, override recoverFromMismatchedToken() and have it - throw an exception. See TreeParser.recoverFromMismatchedToken(). - This way any error in a rule will cause an exception and - immediate exit from rule. Rule would recover by resynchronizing - to the set of symbols that can follow rule ref. - """ - - matchedSymbol = self.getCurrentInputSymbol(input) - if self.input.LA(1) == ttype: - self.input.consume() - self._state.errorRecovery = False - return matchedSymbol - - if self._state.backtracking > 0: - # FIXME: need to return matchedSymbol here as well. damn!! - raise BacktrackingFailed - - matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow) - return matchedSymbol - - - def matchAny(self): - """Match the wildcard: in a symbol""" - - self._state.errorRecovery = False - self.input.consume() - - - def mismatchIsUnwantedToken(self, input, ttype): - return input.LA(2) == ttype - - - def mismatchIsMissingToken(self, input, follow): - if follow is None: - # we have no information about the follow; we can only consume - # a single token and hope for the best - return False - - # compute what can follow this grammar element reference - if EOR_TOKEN_TYPE in follow: - viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW() - follow |= viableTokensFollowingThisRule - - if len(self._state.following) > 0: - # remove EOR if we're not the start symbol - follow -= {EOR_TOKEN_TYPE} - - # if current token is consistent with what could come after set - # then we know we're missing a token; error recovery is free to - # "insert" the missing token - if input.LA(1) in follow or EOR_TOKEN_TYPE in follow: - return True - - return False - - - def reportError(self, e): - """Report a recognition problem. - - This method sets errorRecovery to indicate the parser is recovering - not parsing. Once in recovery mode, no errors are generated. - To get out of recovery mode, the parser must successfully match - a token (after a resync). So it will go: - - 1. error occurs - 2. enter recovery mode, report error - 3. consume until token found in resync set - 4. try to resume parsing - 5. next match() will reset errorRecovery mode - - If you override, make sure to update syntaxErrors if you care about - that. - - """ - # if we've already reported an error and have not matched a token - # yet successfully, don't report any errors. - if self._state.errorRecovery: - return - - self._state.syntaxErrors += 1 # don't count spurious - self._state.errorRecovery = True - - # ekcs: first param added for compat w antlr Python2 runtime interface - self.displayRecognitionError(self.tokenNames, e) - - # ekcs: restored to implementation from antlr Python2 runtime for compat - def displayRecognitionError(self, token_names, e): - hdr = self.getErrorHeader(e) - msg = self.getErrorMessage(e, token_names) - self.error_list.append(str(hdr) + " " + str(msg)) - - # ekcs: restored to implementation from antlr Python2 runtime for compat - def getErrorMessage(self, e, tokenNames): - """ - What error message should be generated for the various - exception types? - - Not very object-oriented code, but I like having all error message - generation within one method rather than spread among all of the - exception classes. This also makes it much easier for the exception - handling because the exception classes do not have to have pointers back - to this object to access utility routines and so on. Also, changing - the message for an exception type would be difficult because you - would have to subclassing exception, but then somehow get ANTLR - to make those kinds of exception objects instead of the default. - This looks weird, but trust me--it makes the most sense in terms - of flexibility. - - For grammar debugging, you will want to override this to add - more information such as the stack frame with - getRuleInvocationStack(e, this.getClass().getName()) and, - for no viable alts, the decision description and state etc... - - Override this to change the message generated for one or more - exception types. - """ - - if isinstance(e, UnwantedTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - - else: - tokenName = self.tokenNames[e.expecting] - - msg = "extraneous input %s expecting %s" % ( - self.getTokenErrorDisplay(e.getUnexpectedToken()), - tokenName - ) - - elif isinstance(e, MissingTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - - else: - tokenName = self.tokenNames[e.expecting] - - msg = "missing %s at %s" % ( - tokenName, self.getTokenErrorDisplay(e.token) - ) - - elif isinstance(e, MismatchedTokenException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - else: - tokenName = self.tokenNames[e.expecting] - - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting " \ - + tokenName - - elif isinstance(e, MismatchedTreeNodeException): - tokenName = "" - if e.expecting == EOF: - tokenName = "EOF" - else: - tokenName = self.tokenNames[e.expecting] - - msg = "mismatched tree node: %s expecting %s" \ - % (e.node, tokenName) - - elif isinstance(e, NoViableAltException): - msg = "no viable alternative at input " \ - + self.getTokenErrorDisplay(e.token) - - elif isinstance(e, EarlyExitException): - msg = "required (...)+ loop did not match anything at input " \ - + self.getTokenErrorDisplay(e.token) - - elif isinstance(e, MismatchedSetException): - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedNotSetException): - msg = "mismatched input " \ - + self.getTokenErrorDisplay(e.token) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, FailedPredicateException): - msg = "rule " \ - + e.ruleName \ - + " failed predicate: {" \ - + e.predicateText \ - + "}?" - - else: - msg = str(e) - - return msg - - def getNumberOfSyntaxErrors(self): - """ - Get number of recognition errors (lexer, parser, tree parser). Each - recognizer tracks its own number. So parser and lexer each have - separate count. Does not count the spurious errors found between - an error and next valid token match. - - See also reportError(). - """ - return self._state.syntaxErrors - - - def getErrorHeader(self, e): - """ - What is the error header, normally line/character position information? - """ - - source_name = self.getSourceName() - if source_name is not None: - return "{} line {}:{}".format(source_name, e.line, e.charPositionInLine) - return "line {}:{}".format(e.line, e.charPositionInLine) - - - def getTokenErrorDisplay(self, t): - """ - How should a token be displayed in an error message? The default - is to display just the text, but during development you might - want to have a lot of information spit out. Override in that case - to use t.toString() (which, for CommonToken, dumps everything about - the token). This is better than forcing you to override a method in - your token objects because you don't have to go modify your lexer - so that it creates a new Java type. - """ - - s = t.text - if s is None: - if t.type == EOF: - s = "" - else: - s = "<{}>".format(t.typeName) - - return repr(s) - - - def emitErrorMessage(self, msg): - """Override this method to change where error messages go""" - sys.stderr.write(msg + '\n') - - - def recover(self, input, re): - """ - Recover from an error found on the input stream. This is - for NoViableAlt and mismatched symbol exceptions. If you enable - single token insertion and deletion, this will usually not - handle mismatched symbol exceptions but there could be a mismatched - token that the match() routine could not recover from. - """ - - # PROBLEM? what if input stream is not the same as last time - # perhaps make lastErrorIndex a member of input - if self._state.lastErrorIndex == input.index(): - # uh oh, another error at same token index; must be a case - # where LT(1) is in the recovery token set so nothing is - # consumed; consume a single token so at least to prevent - # an infinite loop; this is a failsafe. - input.consume() - - self._state.lastErrorIndex = input.index() - followSet = self.computeErrorRecoverySet() - - self.beginResync() - self.consumeUntil(input, followSet) - self.endResync() - - - def beginResync(self): - """ - A hook to listen in on the token consumption during error recovery. - The DebugParser subclasses this to fire events to the listenter. - """ - - pass - - - def endResync(self): - """ - A hook to listen in on the token consumption during error recovery. - The DebugParser subclasses this to fire events to the listenter. - """ - - pass - - - def computeErrorRecoverySet(self): - """ - Compute the error recovery set for the current rule. During - rule invocation, the parser pushes the set of tokens that can - follow that rule reference on the stack; this amounts to - computing FIRST of what follows the rule reference in the - enclosing rule. This local follow set only includes tokens - from within the rule; i.e., the FIRST computation done by - ANTLR stops at the end of a rule. - - EXAMPLE - - When you find a "no viable alt exception", the input is not - consistent with any of the alternatives for rule r. The best - thing to do is to consume tokens until you see something that - can legally follow a call to r *or* any rule that called r. - You don't want the exact set of viable next tokens because the - input might just be missing a token--you might consume the - rest of the input looking for one of the missing tokens. - - Consider grammar: - - a : '[' b ']' - | '(' b ')' - ; - b : c '^' INT ; - c : ID - | INT - ; - - At each rule invocation, the set of tokens that could follow - that rule is pushed on a stack. Here are the various "local" - follow sets: - - FOLLOW(b1_in_a) = FIRST(']') = ']' - FOLLOW(b2_in_a) = FIRST(')') = ')' - FOLLOW(c_in_b) = FIRST('^') = '^' - - Upon erroneous input "[]", the call chain is - - a -> b -> c - - and, hence, the follow context stack is: - - depth local follow set after call to rule - 0 \ a (from main()) - 1 ']' b - 3 '^' c - - Notice that ')' is not included, because b would have to have - been called from a different context in rule a for ')' to be - included. - - For error recovery, we cannot consider FOLLOW(c) - (context-sensitive or otherwise). We need the combined set of - all context-sensitive FOLLOW sets--the set of all tokens that - could follow any reference in the call chain. We need to - resync to one of those tokens. Note that FOLLOW(c)='^' and if - we resync'd to that token, we'd consume until EOF. We need to - sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. - In this case, for input "[]", LA(1) is in this set so we would - not consume anything and after printing an error rule c would - return normally. It would not find the required '^' though. - At this point, it gets a mismatched token error and throws an - exception (since LA(1) is not in the viable following token - set). The rule exception handler tries to recover, but finds - the same recovery set and doesn't consume anything. Rule b - exits normally returning to rule a. Now it finds the ']' (and - with the successful match exits errorRecovery mode). - - So, you can see that the parser walks up call chain looking - for the token that was a member of the recovery set. - - Errors are not generated in errorRecovery mode. - - ANTLR's error recovery mechanism is based upon original ideas: - - "Algorithms + Data Structures = Programs" by Niklaus Wirth - - and - - "A note on error recovery in recursive descent parsers": - http://portal.acm.org/citation.cfm?id=947902.947905 - - Later, Josef Grosch had some good ideas: - - "Efficient and Comfortable Error Recovery in Recursive Descent - Parsers": - ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip - - Like Grosch I implemented local FOLLOW sets that are combined - at run-time upon error to avoid overhead during parsing. - """ - - return self.combineFollows(False) - - - def computeContextSensitiveRuleFOLLOW(self): - """ - Compute the context-sensitive FOLLOW set for current rule. - This is set of token types that can follow a specific rule - reference given a specific call chain. You get the set of - viable tokens that can possibly come next (lookahead depth 1) - given the current call chain. Contrast this with the - definition of plain FOLLOW for rule r: - - FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)} - - where x in T* and alpha, beta in V*; T is set of terminals and - V is the set of terminals and nonterminals. In other words, - FOLLOW(r) is the set of all tokens that can possibly follow - references to r in *any* sentential form (context). At - runtime, however, we know precisely which context applies as - we have the call chain. We may compute the exact (rather - than covering superset) set of following tokens. - - For example, consider grammar: - - stat : ID '=' expr ';' // FOLLOW(stat)=={EOF} - | "return" expr '.' - ; - expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'} - atom : INT // FOLLOW(atom)=={'+',')',';','.'} - | '(' expr ')' - ; - - The FOLLOW sets are all inclusive whereas context-sensitive - FOLLOW sets are precisely what could follow a rule reference. - For input "i=(3);", here is the derivation: - - stat => ID '=' expr ';' - => ID '=' atom ('+' atom)* ';' - => ID '=' '(' expr ')' ('+' atom)* ';' - => ID '=' '(' atom ')' ('+' atom)* ';' - => ID '=' '(' INT ')' ('+' atom)* ';' - => ID '=' '(' INT ')' ';' - - At the "3" token, you'd have a call chain of - - stat -> expr -> atom -> expr -> atom - - What can follow that specific nested ref to atom? Exactly ')' - as you can see by looking at the derivation of this specific - input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}. - - You want the exact viable token set when recovering from a - token mismatch. Upon token mismatch, if LA(1) is member of - the viable next token set, then you know there is most likely - a missing token in the input stream. "Insert" one by just not - throwing an exception. - """ - - return self.combineFollows(True) - - - def combineFollows(self, exact): - followSet = set() - for idx, localFollowSet in reversed(list(enumerate(self._state.following))): - followSet |= localFollowSet - if exact: - # can we see end of rule? - if EOR_TOKEN_TYPE in localFollowSet: - # Only leave EOR in set if at top (start rule); this lets - # us know if have to include follow(start rule); i.e., EOF - if idx > 0: - followSet.remove(EOR_TOKEN_TYPE) - - else: - # can't see end of rule, quit - break - - return followSet - - - def recoverFromMismatchedToken(self, input, ttype, follow): - """Attempt to recover from a single missing or extra token. - - EXTRA TOKEN - - LA(1) is not what we are looking for. If LA(2) has the right token, - however, then assume LA(1) is some extra spurious token. Delete it - and LA(2) as if we were doing a normal match(), which advances the - input. - - MISSING TOKEN - - If current token is consistent with what could come after - ttype then it is ok to 'insert' the missing token, else throw - exception For example, Input 'i=(3;' is clearly missing the - ')'. When the parser returns from the nested call to expr, it - will have call chain: - - stat -> expr -> atom - - and it will be trying to match the ')' at this point in the - derivation: - - => ID '=' '(' INT ')' ('+' atom)* ';' - ^ - match() will see that ';' doesn't match ')' and report a - mismatched token error. To recover, it sees that LA(1)==';' - is in the set of tokens that can follow the ')' token - reference in rule atom. It can assume that you forgot the ')'. - """ - - e = None - - # if next token is what we are looking for then "delete" this token - if self.mismatchIsUnwantedToken(input, ttype): - e = UnwantedTokenException(ttype, input) - - self.beginResync() - input.consume() # simply delete extra token - self.endResync() - - # report after consuming so AW sees the token in the exception - self.reportError(e) - - # we want to return the token we're actually matching - matchedSymbol = self.getCurrentInputSymbol(input) - - # move past ttype token as if all were ok - input.consume() - return matchedSymbol - - # can't recover with single token deletion, try insertion - if self.mismatchIsMissingToken(input, follow): - inserted = self.getMissingSymbol(input, e, ttype, follow) - e = MissingTokenException(ttype, input, inserted) - - # report after inserting so AW sees the token in the exception - self.reportError(e) - return inserted - - # even that didn't work; must throw the exception - e = MismatchedTokenException(ttype, input) - raise e - - - def recoverFromMismatchedSet(self, input, e, follow): - """Not currently used""" - - if self.mismatchIsMissingToken(input, follow): - self.reportError(e) - # we don't know how to conjure up a token for sets yet - return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow) - - # TODO do single token deletion like above for Token mismatch - raise e - - - def getCurrentInputSymbol(self, input): - """ - Match needs to return the current input symbol, which gets put - into the label for the associated token ref; e.g., x=ID. Token - and tree parsers need to return different objects. Rather than test - for input stream type or change the IntStream interface, I use - a simple method to ask the recognizer to tell me what the current - input symbol is. - - This is ignored for lexers. - """ - - return None - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - """Conjure up a missing token during error recovery. - - The recognizer attempts to recover from single missing - symbols. But, actions might refer to that missing symbol. - For example, x=ID {f($x);}. The action clearly assumes - that there has been an identifier matched previously and that - $x points at that token. If that token is missing, but - the next token in the stream is what we want we assume that - this token is missing and we keep going. Because we - have to return some token to replace the missing token, - we have to conjure one up. This method gives the user control - over the tokens returned for missing tokens. Mostly, - you will want to create something special for identifier - tokens. For literals such as '{' and ',', the default - action in the parser or tree parser works. It simply creates - a CommonToken of the appropriate type. The text will be the token. - If you change what tokens must be created by the lexer, - override this method to create the appropriate tokens. - """ - - return None - - - def consumeUntil(self, input, tokenTypes): - """ - Consume tokens until one matches the given token or token set - - tokenTypes can be a single token type or a set of token types - - """ - - if not isinstance(tokenTypes, (set, frozenset)): - tokenTypes = frozenset([tokenTypes]) - - ttype = input.LA(1) - while ttype != EOF and ttype not in tokenTypes: - input.consume() - ttype = input.LA(1) - - - def getRuleInvocationStack(self): - """ - Return List of the rules in your parser instance - leading up to a call to this method. You could override if - you want more details such as the file/line info of where - in the parser java code a rule is invoked. - - This is very useful for error messages and for context-sensitive - error recovery. - - You must be careful, if you subclass a generated recognizers. - The default implementation will only search the module of self - for rules, but the subclass will not contain any rules. - You probably want to override this method to look like - - def getRuleInvocationStack(self): - return self._getRuleInvocationStack(.__module__) - - where is the class of the generated recognizer, e.g. - the superclass of self. - """ - - return self._getRuleInvocationStack(self.__module__) - - - @classmethod - def _getRuleInvocationStack(cls, module): - """ - A more general version of getRuleInvocationStack where you can - pass in, for example, a RecognitionException to get it's rule - stack trace. This routine is shared with all recognizers, hence, - static. - - TODO: move to a utility class or something; weird having lexer call - this - """ - - # mmmhhh,... perhaps look at the first argument - # (f_locals[co_varnames[0]]?) and test if it's a (sub)class of - # requested recognizer... - - rules = [] - for frame in reversed(inspect.stack()): - code = frame[0].f_code - codeMod = inspect.getmodule(code) - if codeMod is None: - continue - - # skip frames not in requested module - if codeMod.__name__ != module: - continue - - # skip some unwanted names - if code.co_name in ('nextToken', ''): - continue - - rules.append(code.co_name) - - return rules - - - def getBacktrackingLevel(self): - return self._state.backtracking - - def setBacktrackingLevel(self, n): - self._state.backtracking = n - - - def getGrammarFileName(self): - """For debugging and other purposes, might want the grammar name. - - Have ANTLR generate an implementation for this method. - """ - - return self.grammarFileName - - - def getSourceName(self): - raise NotImplementedError - - - def toStrings(self, tokens): - """A convenience method for use most often with template rewrites. - - Convert a Token list to a str list. - """ - - if tokens is None: - return None - - return [token.text for token in tokens] - - - def getRuleMemoization(self, ruleIndex, ruleStartIndex): - """ - Given a rule number and a start token index number, return - MEMO_RULE_UNKNOWN if the rule has not parsed input starting from - start index. If this rule has parsed input starting from the - start index before, then return where the rule stopped parsing. - It returns the index of the last token matched by the rule. - """ - - if ruleIndex not in self._state.ruleMemo: - self._state.ruleMemo[ruleIndex] = {} - - return self._state.ruleMemo[ruleIndex].get( - ruleStartIndex, self.MEMO_RULE_UNKNOWN - ) - - - def alreadyParsedRule(self, input, ruleIndex): - """ - Has this rule already parsed input at the current index in the - input stream? Return the stop token index or MEMO_RULE_UNKNOWN. - If we attempted but failed to parse properly before, return - MEMO_RULE_FAILED. - - This method has a side-effect: if we have seen this input for - this rule and successfully parsed before, then seek ahead to - 1 past the stop token matched for this rule last time. - """ - - stopIndex = self.getRuleMemoization(ruleIndex, input.index()) - if stopIndex == self.MEMO_RULE_UNKNOWN: - return False - - if stopIndex == self.MEMO_RULE_FAILED: - raise BacktrackingFailed - - else: - input.seek(stopIndex + 1) - - return True - - - def memoize(self, input, ruleIndex, ruleStartIndex, success): - """ - Record whether or not this rule parsed the input at this position - successfully. - """ - - if success: - stopTokenIndex = input.index() - 1 - else: - stopTokenIndex = self.MEMO_RULE_FAILED - - if ruleIndex in self._state.ruleMemo: - self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex - - - def traceIn(self, ruleName, ruleIndex, inputSymbol): - sys.stdout.write("enter {} {}".format(ruleName, inputSymbol)) - - if self._state.backtracking > 0: - sys.stdout.write(" backtracking={}".format(self._state.backtracking)) - - sys.stdout.write('\n') - - - def traceOut(self, ruleName, ruleIndex, inputSymbol): - sys.stdout.write("exit {} {}".format(ruleName, inputSymbol)) - - if self._state.backtracking > 0: - sys.stdout.write(" backtracking={}".format(self._state.backtracking)) - - # mmmm... we use BacktrackingFailed exceptions now. So how could we - # get that information here? - #if self._state.failed: - # sys.stdout.write(" failed") - #else: - # sys.stdout.write(" succeeded") - - sys.stdout.write('\n') - - -class TokenSource(object): - """ - @brief Abstract baseclass for token producers. - - A source of tokens must provide a sequence of tokens via nextToken() - and also must reveal it's source of characters; CommonToken's text is - computed from a CharStream; it only store indices into the char stream. - - Errors from the lexer are never passed to the parser. Either you want - to keep going or you do not upon token recognition error. If you do not - want to continue lexing then you do not want to continue parsing. Just - throw an exception not under RecognitionException and Java will naturally - toss you all the way out of the recognizers. If you want to continue - lexing then you should not throw an exception to the parser--it has already - requested a token. Keep lexing until you get a valid one. Just report - errors and keep going, looking for a valid token. - """ - - def nextToken(self): - """Return a Token object from your input stream (usually a CharStream). - - Do not fail/return upon lexing error; keep chewing on the characters - until you get a good one; errors are not passed through to the parser. - """ - - raise NotImplementedError - - - def __iter__(self): - """The TokenSource is an iterator. - - The iteration will not include the final EOF token, see also the note - for the __next__() method. - - """ - - return self - - - def __next__(self): - """Return next token or raise StopIteration. - - Note that this will raise StopIteration when hitting the EOF token, - so EOF will not be part of the iteration. - - """ - - token = self.nextToken() - if token is None or token.type == EOF: - raise StopIteration - return token - - -class Lexer(BaseRecognizer, TokenSource): - """ - @brief Baseclass for generated lexer classes. - - A lexer is recognizer that draws input symbols from a character stream. - lexer grammars result in a subclass of this object. A Lexer object - uses simplified match() and error recovery mechanisms in the interest - of speed. - """ - - def __init__(self, input, state=None): - BaseRecognizer.__init__(self, state) - TokenSource.__init__(self) - - # Where is the lexer drawing characters from? - self.input = input - - - def reset(self): - super().reset() # reset all recognizer state variables - - if self.input is not None: - # rewind the input - self.input.seek(0) - - if self._state is None: - # no shared state work to do - return - - # wack Lexer state variables - self._state.token = None - self._state.type = INVALID_TOKEN_TYPE - self._state.channel = DEFAULT_CHANNEL - self._state.tokenStartCharIndex = -1 - self._state.tokenStartLine = -1 - self._state.tokenStartCharPositionInLine = -1 - self._state.text = None - - - def makeEOFToken(self): - eof = CommonToken( - type=EOF, channel=DEFAULT_CHANNEL, - input=self.input, - start=self.input.index(), stop=self.input.index()) - eof.line = self.input.line - eof.charPositionInLine = self.input.charPositionInLine - return eof - - def nextToken(self): - """ - Return a token from this source; i.e., match a token on the char - stream. - """ - - while 1: - self._state.token = None - self._state.channel = DEFAULT_CHANNEL - self._state.tokenStartCharIndex = self.input.index() - self._state.tokenStartCharPositionInLine = self.input.charPositionInLine - self._state.tokenStartLine = self.input.line - self._state.text = None - if self.input.LA(1) == EOF: - return self.makeEOFToken() - - try: - self.mTokens() - - if self._state.token is None: - self.emit() - - elif self._state.token == SKIP_TOKEN: - continue - - return self._state.token - - except NoViableAltException as re: - self.reportError(re) - self.recover(re) # throw out current char and try again - - except RecognitionException as re: - self.reportError(re) - # match() routine has already called recover() - - - def skip(self): - """ - Instruct the lexer to skip creating a token for current lexer rule - and look for another token. nextToken() knows to keep looking when - a lexer rule finishes with token set to SKIP_TOKEN. Recall that - if token==null at end of any token rule, it creates one for you - and emits it. - """ - - self._state.token = SKIP_TOKEN - - - def mTokens(self): - """This is the lexer entry point that sets instance var 'token'""" - - # abstract method - raise NotImplementedError - - - def setCharStream(self, input): - """Set the char stream and reset the lexer""" - self.input = None - self.reset() - self.input = input - - - def getSourceName(self): - return self.input.getSourceName() - - - def emit(self, token=None): - """ - The standard method called to automatically emit a token at the - outermost lexical rule. The token object should point into the - char buffer start..stop. If there is a text override in 'text', - use that to set the token's text. Override this method to emit - custom Token objects. - - If you are building trees, then you should also override - Parser or TreeParser.getMissingSymbol(). - """ - - if token is None: - token = CommonToken( - input=self.input, - type=self._state.type, - channel=self._state.channel, - start=self._state.tokenStartCharIndex, - stop=self.getCharIndex()-1 - ) - token.line = self._state.tokenStartLine - token.text = self._state.text - token.charPositionInLine = self._state.tokenStartCharPositionInLine - - self._state.token = token - - return token - - - def match(self, s): - if isinstance(s, str): - for c in s: - if self.input.LA(1) != ord(c): - if self._state.backtracking > 0: - raise BacktrackingFailed - - mte = MismatchedTokenException(c, self.input) - self.recover(mte) - raise mte - - self.input.consume() - - else: - if self.input.LA(1) != s: - if self._state.backtracking > 0: - raise BacktrackingFailed - - mte = MismatchedTokenException(chr(s), self.input) - self.recover(mte) # don't really recover; just consume in lexer - raise mte - - self.input.consume() - - - def matchAny(self): - self.input.consume() - - - def matchRange(self, a, b): - if self.input.LA(1) < a or self.input.LA(1) > b: - if self._state.backtracking > 0: - raise BacktrackingFailed - - mre = MismatchedRangeException(chr(a), chr(b), self.input) - self.recover(mre) - raise mre - - self.input.consume() - - - def getLine(self): - return self.input.line - - - def getCharPositionInLine(self): - return self.input.charPositionInLine - - - def getCharIndex(self): - """What is the index of the current character of lookahead?""" - - return self.input.index() - - - def getText(self): - """ - Return the text matched so far for the current token or any - text override. - """ - if self._state.text is not None: - return self._state.text - - return self.input.substring( - self._state.tokenStartCharIndex, - self.getCharIndex()-1 - ) - - - def setText(self, text): - """ - Set the complete text of this token; it wipes any previous - changes to the text. - """ - self._state.text = text - - - text = property(getText, setText) - - - def reportError(self, e): - ## TODO: not thought about recovery in lexer yet. - - ## # if we've already reported an error and have not matched a token - ## # yet successfully, don't report any errors. - ## if self.errorRecovery: - ## return - ## - ## self.errorRecovery = True - - # ekcs: first param added for compat w antlr Python2 runtime interface - self.displayRecognitionError(self.tokenNames, e) - - # ekcs: restored to implementation from antlr Python2 runtime for compat - def getErrorMessage(self, e, tokenNames): - msg = None - - if isinstance(e, MismatchedTokenException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting " \ - + self.getCharErrorDisplay(e.expecting) - - elif isinstance(e, NoViableAltException): - msg = "no viable alternative at character " \ - + self.getCharErrorDisplay(e.c) - - elif isinstance(e, EarlyExitException): - msg = "required (...)+ loop did not match anything at character " \ - + self.getCharErrorDisplay(e.c) - - elif isinstance(e, MismatchedNotSetException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedSetException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + repr(e.expecting) - - elif isinstance(e, MismatchedRangeException): - msg = "mismatched character " \ - + self.getCharErrorDisplay(e.c) \ - + " expecting set " \ - + self.getCharErrorDisplay(e.a) \ - + ".." \ - + self.getCharErrorDisplay(e.b) - - else: - msg = BaseRecognizer.getErrorMessage(self, e, tokenNames) - - return msg - - - def getCharErrorDisplay(self, c): - if c == EOF: - c = '' - return repr(c) - - - def recover(self, re): - """ - Lexers can normally match any char in it's vocabulary after matching - a token, so do the easy thing and just kill a character and hope - it all works out. You can instead use the rule invocation stack - to do sophisticated error recovery if you are in a fragment rule. - """ - - self.input.consume() - - - def traceIn(self, ruleName, ruleIndex): - inputSymbol = "{} line={}:{}".format(self.input.LT(1), - self.getLine(), - self.getCharPositionInLine() - ) - - super().traceIn(ruleName, ruleIndex, inputSymbol) - - - def traceOut(self, ruleName, ruleIndex): - inputSymbol = "{} line={}:{}".format(self.input.LT(1), - self.getLine(), - self.getCharPositionInLine() - ) - - super().traceOut(ruleName, ruleIndex, inputSymbol) - - - -class Parser(BaseRecognizer): - """ - @brief Baseclass for generated parser classes. - """ - - def __init__(self, lexer, state=None): - super().__init__(state) - - self.input = lexer - - - def reset(self): - super().reset() # reset all recognizer state variables - if self.input is not None: - self.input.seek(0) # rewind the input - - - def getCurrentInputSymbol(self, input): - return input.LT(1) - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - if expectedTokenType == EOF: - tokenText = "" - else: - tokenText = "".format(self.tokenNames[expectedTokenType]) - t = CommonToken(type=expectedTokenType, text=tokenText) - current = input.LT(1) - if current.type == EOF: - current = input.LT(-1) - - if current is not None: - t.line = current.line - t.charPositionInLine = current.charPositionInLine - t.channel = DEFAULT_CHANNEL - return t - - - def setTokenStream(self, input): - """Set the token stream and reset the parser""" - - self.input = None - self.reset() - self.input = input - - - def getTokenStream(self): - return self.input - - - def getSourceName(self): - return self.input.getSourceName() - - - def traceIn(self, ruleName, ruleIndex): - super().traceIn(ruleName, ruleIndex, self.input.LT(1)) - - - def traceOut(self, ruleName, ruleIndex): - super().traceOut(ruleName, ruleIndex, self.input.LT(1)) - - -class RuleReturnScope(object): - """ - Rules can return start/stop info as well as possible trees and templates. - """ - - def getStart(self): - """Return the start token or tree.""" - return None - - - def getStop(self): - """Return the stop token or tree.""" - return None - - - def getTree(self): - """Has a value potentially if output=AST.""" - return None - - - def getTemplate(self): - """Has a value potentially if output=template.""" - return None - - -class ParserRuleReturnScope(RuleReturnScope): - """ - Rules that return more than a single value must return an object - containing all the values. Besides the properties defined in - RuleLabelScope.predefinedRulePropertiesScope there may be user-defined - return values. This class simply defines the minimum properties that - are always defined and methods to access the others that might be - available depending on output option such as template and tree. - - Note text is not an actual property of the return value, it is computed - from start and stop using the input stream's toString() method. I - could add a ctor to this so that we can pass in and store the input - stream, but I'm not sure we want to do that. It would seem to be undefined - to get the .text property anyway if the rule matches tokens from multiple - input streams. - - I do not use getters for fields of objects that are used simply to - group values such as this aggregate. The getters/setters are there to - satisfy the superclass interface. - """ - - def __init__(self): - super().__init__() - self.start = None - self.stop = None - self.tree = None # only used when output=AST - - - def getStart(self): - return self.start - - - def getStop(self): - return self.stop - - - def getTree(self): - return self.tree diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/streams.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/streams.py deleted file mode 100644 index 069755bf..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/streams.py +++ /dev/null @@ -1,1460 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from io import StringIO - -from .constants import DEFAULT_CHANNEL, EOF -from .tokens import Token - - -############################################################################ -# -# basic interfaces -# IntStream -# +- CharStream -# \- TokenStream -# -# subclasses must implemented all methods -# -############################################################################ - -class IntStream(object): - """ - @brief Base interface for streams of integer values. - - A simple stream of integers used when all I care about is the char - or token type sequence (such as interpretation). - """ - - def consume(self): - raise NotImplementedError - - - def LA(self, i): - """Get int at current input pointer + i ahead where i=1 is next int. - - Negative indexes are allowed. LA(-1) is previous token (token - just matched). LA(-i) where i is before first token should - yield -1, invalid char / EOF. - """ - - raise NotImplementedError - - - def mark(self): - """ - Tell the stream to start buffering if it hasn't already. Return - current input position, index(), or some other marker so that - when passed to rewind() you get back to the same spot. - rewind(mark()) should not affect the input cursor. The Lexer - track line/col info as well as input index so its markers are - not pure input indexes. Same for tree node streams. - """ - - raise NotImplementedError - - - def index(self): - """ - Return the current input symbol index 0..n where n indicates the - last symbol has been read. The index is the symbol about to be - read not the most recently read symbol. - """ - - raise NotImplementedError - - - def rewind(self, marker=None): - """ - Reset the stream so that next call to index would return marker. - The marker will usually be index() but it doesn't have to be. It's - just a marker to indicate what state the stream was in. This is - essentially calling release() and seek(). If there are markers - created after this marker argument, this routine must unroll them - like a stack. Assume the state the stream was in when this marker - was created. - - If marker is None: - Rewind to the input position of the last marker. - Used currently only after a cyclic DFA and just - before starting a sem/syn predicate to get the - input position back to the start of the decision. - Do not "pop" the marker off the state. mark(i) - and rewind(i) should balance still. It is - like invoking rewind(last marker) but it should not "pop" - the marker off. It's like seek(last marker's input position). - """ - - raise NotImplementedError - - - def release(self, marker=None): - """ - You may want to commit to a backtrack but don't want to force the - stream to keep bookkeeping objects around for a marker that is - no longer necessary. This will have the same behavior as - rewind() except it releases resources without the backward seek. - This must throw away resources for all markers back to the marker - argument. So if you're nested 5 levels of mark(), and then release(2) - you have to release resources for depths 2..5. - """ - - raise NotImplementedError - - - def seek(self, index): - """ - Set the input cursor to the position indicated by index. This is - normally used to seek ahead in the input stream. No buffering is - required to do this unless you know your stream will use seek to - move backwards such as when backtracking. - - This is different from rewind in its multi-directional - requirement and in that its argument is strictly an input cursor - (index). - - For char streams, seeking forward must update the stream state such - as line number. For seeking backwards, you will be presumably - backtracking using the mark/rewind mechanism that restores state and - so this method does not need to update state when seeking backwards. - - Currently, this method is only used for efficient backtracking using - memoization, but in the future it may be used for incremental parsing. - - The index is 0..n-1. A seek to position i means that LA(1) will - return the ith symbol. So, seeking to 0 means LA(1) will return the - first element in the stream. - """ - - raise NotImplementedError - - - def size(self): - """ - Only makes sense for streams that buffer everything up probably, but - might be useful to display the entire stream or for testing. This - value includes a single EOF. - """ - - raise NotImplementedError - - - def getSourceName(self): - """ - Where are you getting symbols from? Normally, implementations will - pass the buck all the way to the lexer who can ask its input stream - for the file name or whatever. - """ - - raise NotImplementedError - - -class CharStream(IntStream): - """ - @brief A source of characters for an ANTLR lexer. - - This is an abstract class that must be implemented by a subclass. - - """ - - # pylint does not realize that this is an interface, too - #pylint: disable-msg=W0223 - - EOF = -1 - - def __init__(self): - # line number 1..n within the input - self._line = 1 - - # The index of the character relative to the beginning of the - # line 0..n-1 - self._charPositionInLine = 0 - - - def substring(self, start, stop): - """ - For infinite streams, you don't need this; primarily I'm providing - a useful interface for action code. Just make sure actions don't - use this on streams that don't support it. - """ - - raise NotImplementedError - - - def LT(self, i): - """ - Get the ith character of lookahead. This is the same usually as - LA(i). This will be used for labels in the generated - lexer code. I'd prefer to return a char here type-wise, but it's - probably better to be 32-bit clean and be consistent with LA. - """ - - raise NotImplementedError - - - @property - def line(self): - """ANTLR tracks the line information automatically""" - return self._line - - @line.setter - def line(self, value): - """ - Because this stream can rewind, we need to be able to reset the line - """ - self._line = value - - - @property - def charPositionInLine(self): - """ - The index of the character relative to the beginning of the line 0..n-1 - """ - return self._charPositionInLine - - @charPositionInLine.setter - def charPositionInLine(self, pos): - self._charPositionInLine = pos - - -class TokenStream(IntStream): - """ - - @brief A stream of tokens accessing tokens from a TokenSource - - This is an abstract class that must be implemented by a subclass. - - """ - - # pylint does not realize that this is an interface, too - #pylint: disable-msg=W0223 - - def LT(self, k): - """ - Get Token at current input pointer + i ahead where i=1 is next Token. - i<0 indicates tokens in the past. So -1 is previous token and -2 is - two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken. - Return null for LT(0) and any index that results in an absolute address - that is negative. - """ - - raise NotImplementedError - - - def range(self): - """ - How far ahead has the stream been asked to look? The return - value is a valid index from 0..n-1. - """ - - raise NotImplementedError - - - def get(self, i): - """ - Get a token at an absolute index i; 0..n-1. This is really only - needed for profiling and debugging and token stream rewriting. - If you don't want to buffer up tokens, then this method makes no - sense for you. Naturally you can't use the rewrite stream feature. - I believe DebugTokenStream can easily be altered to not use - this method, removing the dependency. - """ - - raise NotImplementedError - - - def getTokenSource(self): - """ - Where is this stream pulling tokens from? This is not the name, but - the object that provides Token objects. - """ - - raise NotImplementedError - - - def toString(self, start=None, stop=None): - """ - Return the text of all tokens from start to stop, inclusive. - If the stream does not buffer all the tokens then it can just - return "" or null; Users should not access $ruleLabel.text in - an action of course in that case. - - Because the user is not required to use a token with an index stored - in it, we must provide a means for two token objects themselves to - indicate the start/end location. Most often this will just delegate - to the other toString(int,int). This is also parallel with - the TreeNodeStream.toString(Object,Object). - """ - - raise NotImplementedError - - -############################################################################ -# -# character streams for use in lexers -# CharStream -# \- ANTLRStringStream -# -############################################################################ - - -class ANTLRStringStream(CharStream): - """ - @brief CharStream that pull data from a unicode string. - - A pretty quick CharStream that pulls all data from an array - directly. Every method call counts in the lexer. - - """ - - - def __init__(self, data): - """ - @param data This should be a unicode string holding the data you want - to parse. If you pass in a byte string, the Lexer will choke on - non-ascii data. - """ - - super().__init__() - - # The data being scanned - self.strdata = str(data) - self.data = [ord(c) for c in self.strdata] - - # How many characters are actually in the buffer - self.n = len(data) - - # 0..n-1 index into string of next char - self.p = 0 - - # A list of CharStreamState objects that tracks the stream state - # values line, charPositionInLine, and p that can change as you - # move through the input stream. Indexed from 0..markDepth-1. - self._markers = [ ] - self.lastMarker = None - self.markDepth = 0 - - # What is name or source of this char stream? - self.name = None - - - def reset(self): - """ - Reset the stream so that it's in the same state it was - when the object was created *except* the data array is not - touched. - """ - - self.p = 0 - self._line = 1 - self.charPositionInLine = 0 - self._markers = [ ] - self.lastMarker = None - self.markDepth = 0 - - - def consume(self): - if self.p < self.n: - if self.data[self.p] == 10: # ord('\n') - self._line += 1 - self.charPositionInLine = 0 - else: - self.charPositionInLine += 1 - - self.p += 1 - - # else we reached EOF - # just do nothing - - - def LA(self, i): - if i == 0: - return 0 # undefined - - if i < 0: - i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] - - if self.p + i - 1 < self.n: - return self.data[self.p + i - 1] - else: - return EOF - - - - def LT(self, i): - if i == 0: - return 0 # undefined - - if i < 0: - i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] - - if self.p + i - 1 < self.n: - return self.strdata[self.p + i - 1] - else: - return EOF - - - def index(self): - """ - Return the current input symbol index 0..n where n indicates the - last symbol has been read. The index is the index of char to - be returned from LA(1). - """ - - return self.p - - - def size(self): - return self.n - - - def mark(self): - state = (self.p, self.line, self.charPositionInLine) - if self.markDepth < len(self._markers): - self._markers[self.markDepth] = state - else: - self._markers.append(state) - self.markDepth += 1 - - self.lastMarker = self.markDepth - - return self.lastMarker - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - p, line, charPositionInLine = self._markers[marker - 1] - - self.seek(p) - self._line = line - self.charPositionInLine = charPositionInLine - self.release(marker) - - - def release(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.markDepth = marker - 1 - - - def seek(self, index): - """ - consume() ahead until p==index; can't just set p=index as we must - update line and charPositionInLine. - """ - - if index <= self.p: - self.p = index # just jump; don't update stream state (line, ...) - return - - # seek forward, consume until p hits index - while self.p < index: - self.consume() - - - def substring(self, start, stop): - return self.strdata[start:stop + 1] - - - def getSourceName(self): - return self.name - - -class ANTLRFileStream(ANTLRStringStream): - """ - @brief CharStream that opens a file to read the data. - - This is a char buffer stream that is loaded from a file - all at once when you construct the object. - """ - - def __init__(self, fileName): - """ - @param fileName The path to the file to be opened. The file will be - opened with mode 'r'. - - """ - - self._fileName = fileName - - with open(fileName, 'r') as fp: - super().__init__(fp.read()) - - - @property - def fileName(self): - return self._fileName - - -class ANTLRInputStream(ANTLRStringStream): - """ - @brief CharStream that reads data from a file-like object. - - This is a char buffer stream that is loaded from a file like object - all at once when you construct the object. - - All input is consumed from the file, but it is not closed. - """ - - def __init__(self, file): - """ - @param file A file-like object holding your input. Only the read() - method must be implemented. - - """ - - data = file.read() - - super().__init__(data) - - -# I guess the ANTLR prefix exists only to avoid a name clash with some Java -# mumbojumbo. A plain "StringStream" looks better to me, which should be -# the preferred name in Python. -StringStream = ANTLRStringStream -FileStream = ANTLRFileStream -InputStream = ANTLRInputStream - - -############################################################################ -# -# Token streams -# TokenStream -# +- CommonTokenStream -# \- TokenRewriteStream -# -############################################################################ - - -class CommonTokenStream(TokenStream): - """ - @brief The most common stream of tokens - - The most common stream of tokens is one where every token is buffered up - and tokens are prefiltered for a certain channel (the parser will only - see these tokens and cannot change the filter channel number during the - parse). - """ - - def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): - """ - @param tokenSource A TokenSource instance (usually a Lexer) to pull - the tokens from. - - @param channel Skip tokens on any channel but this one; this is how we - skip whitespace... - - """ - - super().__init__() - - self.tokenSource = tokenSource - - # Record every single token pulled from the source so we can reproduce - # chunks of it later. - self.tokens = [] - - # Map to override some Tokens' channel numbers - self.channelOverrideMap = {} - - # Set; discard any tokens with this type - self.discardSet = set() - - # Skip tokens on any channel but this one; this is how we skip - # whitespace... - self.channel = channel - - # By default, track all incoming tokens - self.discardOffChannelTokens = False - - # The index into the tokens list of the current token (next token - # to consume). p==-1 indicates that the tokens list is empty - self.p = -1 - - # Remember last marked position - self.lastMarker = None - - # how deep have we gone? - self._range = -1 - - - def makeEOFToken(self): - return self.tokenSource.makeEOFToken() - - - def setTokenSource(self, tokenSource): - """Reset this token stream by setting its token source.""" - - self.tokenSource = tokenSource - self.tokens = [] - self.p = -1 - self.channel = DEFAULT_CHANNEL - - - def reset(self): - self.p = 0 - self.lastMarker = None - - - def fillBuffer(self): - """ - Load all tokens from the token source and put in tokens. - This is done upon first LT request because you might want to - set some token type / channel overrides before filling buffer. - """ - - - index = 0 - t = self.tokenSource.nextToken() - while t and t.type != EOF: - discard = False - - if self.discardSet and t.type in self.discardSet: - discard = True - - elif self.discardOffChannelTokens and t.channel != self.channel: - discard = True - - # is there a channel override for token type? - if t.type in self.channelOverrideMap: - overrideChannel = self.channelOverrideMap[t.type] - - if overrideChannel == self.channel: - t.channel = overrideChannel - else: - discard = True - - if not discard: - t.index = index - self.tokens.append(t) - index += 1 - - t = self.tokenSource.nextToken() - - # leave p pointing at first token on channel - self.p = 0 - self.p = self.skipOffTokenChannels(self.p) - - - def consume(self): - """ - Move the input pointer to the next incoming token. The stream - must become active with LT(1) available. consume() simply - moves the input pointer so that LT(1) points at the next - input symbol. Consume at least one token. - - Walk past any token not on the channel the parser is listening to. - """ - - if self.p < len(self.tokens): - self.p += 1 - - self.p = self.skipOffTokenChannels(self.p) # leave p on valid token - - - def skipOffTokenChannels(self, i): - """ - Given a starting index, return the index of the first on-channel - token. - """ - - n = len(self.tokens) - while i < n and self.tokens[i].channel != self.channel: - i += 1 - - return i - - - def skipOffTokenChannelsReverse(self, i): - while i >= 0 and self.tokens[i].channel != self.channel: - i -= 1 - - return i - - - def setTokenTypeChannel(self, ttype, channel): - """ - A simple filter mechanism whereby you can tell this token stream - to force all tokens of type ttype to be on channel. For example, - when interpreting, we cannot exec actions so we need to tell - the stream to force all WS and NEWLINE to be a different, ignored - channel. - """ - - self.channelOverrideMap[ttype] = channel - - - def discardTokenType(self, ttype): - self.discardSet.add(ttype) - - - def getTokens(self, start=None, stop=None, types=None): - """ - Given a start and stop index, return a list of all tokens in - the token type set. Return None if no tokens were found. This - method looks at both on and off channel tokens. - """ - - if self.p == -1: - self.fillBuffer() - - if stop is None or stop > len(self.tokens): - stop = len(self.tokens) - - if start is None or start < 0: - start = 0 - - if start > stop: - return None - - if isinstance(types, int): - # called with a single type, wrap into set - types = set([types]) - - filteredTokens = [ - token for token in self.tokens[start:stop] - if types is None or token.type in types - ] - - if len(filteredTokens) == 0: - return None - - return filteredTokens - - - def LT(self, k): - """ - Get the ith token from the current position 1..n where k=1 is the - first symbol of lookahead. - """ - - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if k < 0: - return self.LB(-k) - - i = self.p - n = 1 - # find k good tokens - while n < k: - # skip off-channel tokens - i = self.skipOffTokenChannels(i + 1) # leave p on valid token - n += 1 - - if i > self._range: - self._range = i - - if i < len(self.tokens): - return self.tokens[i] - else: - return self.makeEOFToken() - - - def LB(self, k): - """Look backwards k tokens on-channel tokens""" - - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if self.p - k < 0: - return None - - i = self.p - n = 1 - # find k good tokens looking backwards - while n <= k: - # skip off-channel tokens - i = self.skipOffTokenChannelsReverse(i - 1) # leave p on valid token - n += 1 - - if i < 0: - return None - - return self.tokens[i] - - - def get(self, i): - """ - Return absolute token i; ignore which channel the tokens are on; - that is, count all tokens not just on-channel tokens. - """ - - return self.tokens[i] - - - def slice(self, start, stop): - if self.p == -1: - self.fillBuffer() - - if start < 0 or stop < 0: - return None - - return self.tokens[start:stop + 1] - - - def LA(self, i): - return self.LT(i).type - - - def mark(self): - self.lastMarker = self.index() - return self.lastMarker - - - def release(self, marker=None): - # no resources to release - pass - - - def size(self): - return len(self.tokens) - - - def range(self): - return self._range - - - def index(self): - return self.p - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.seek(marker) - - - def seek(self, index): - self.p = index - - - def getTokenSource(self): - return self.tokenSource - - - def getSourceName(self): - return self.tokenSource.getSourceName() - - - def toString(self, start=None, stop=None): - """Returns a string of all tokens between start and stop (inclusive).""" - if self.p == -1: - self.fillBuffer() - - if start is None: - start = 0 - elif not isinstance(start, int): - start = start.index - - if stop is None: - stop = len(self.tokens) - 1 - elif not isinstance(stop, int): - stop = stop.index - - if stop >= len(self.tokens): - stop = len(self.tokens) - 1 - - return ''.join([t.text for t in self.tokens[start:stop + 1]]) - - -class RewriteOperation(object): - """@brief Internal helper class.""" - - def __init__(self, stream, index, text): - self.stream = stream - - # What index into rewrites List are we? - self.instructionIndex = None - - # Token buffer index. - self.index = index - self.text = text - - def execute(self, buf): - """Execute the rewrite operation by possibly adding to the buffer. - Return the index of the next token to operate on. - """ - - return self.index - - def toString(self): - opName = self.__class__.__name__ - return '<{opName}@{0.index}:"{0.text}">'.format(self, opName=opName) - - __str__ = toString - __repr__ = toString - - -class InsertBeforeOp(RewriteOperation): - """@brief Internal helper class.""" - - def execute(self, buf): - buf.write(self.text) - if self.stream.tokens[self.index].type != EOF: - buf.write(self.stream.tokens[self.index].text) - return self.index + 1 - - -class ReplaceOp(RewriteOperation): - """ - @brief Internal helper class. - - I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp - instructions. - """ - - def __init__(self, stream, first, last, text): - super().__init__(stream, first, text) - self.lastIndex = last - - - def execute(self, buf): - if self.text is not None: - buf.write(self.text) - - return self.lastIndex + 1 - - - def toString(self): - if self.text is None: - return ''.format(self) - - return ''.format(self) - - __str__ = toString - __repr__ = toString - - -class TokenRewriteStream(CommonTokenStream): - """@brief CommonTokenStream that can be modified. - - Useful for dumping out the input stream after doing some - augmentation or other manipulations. - - You can insert stuff, replace, and delete chunks. Note that the - operations are done lazily--only if you convert the buffer to a - String. This is very efficient because you are not moving data around - all the time. As the buffer of tokens is converted to strings, the - toString() method(s) check to see if there is an operation at the - current index. If so, the operation is done and then normal String - rendering continues on the buffer. This is like having multiple Turing - machine instruction streams (programs) operating on a single input tape. :) - - Since the operations are done lazily at toString-time, operations do not - screw up the token index values. That is, an insert operation at token - index i does not change the index values for tokens i+1..n-1. - - Because operations never actually alter the buffer, you may always get - the original token stream back without undoing anything. Since - the instructions are queued up, you can easily simulate transactions and - roll back any changes if there is an error just by removing instructions. - For example, - - CharStream input = new ANTLRFileStream("input"); - TLexer lex = new TLexer(input); - TokenRewriteStream tokens = new TokenRewriteStream(lex); - T parser = new T(tokens); - parser.startRule(); - - Then in the rules, you can execute - Token t,u; - ... - input.insertAfter(t, "text to put after t");} - input.insertAfter(u, "text after u");} - System.out.println(tokens.toString()); - - Actually, you have to cast the 'input' to a TokenRewriteStream. :( - - You can also have multiple "instruction streams" and get multiple - rewrites from a single pass over the input. Just name the instruction - streams and use that name again when printing the buffer. This could be - useful for generating a C file and also its header file--all from the - same buffer: - - tokens.insertAfter("pass1", t, "text to put after t");} - tokens.insertAfter("pass2", u, "text after u");} - System.out.println(tokens.toString("pass1")); - System.out.println(tokens.toString("pass2")); - - If you don't use named rewrite streams, a "default" stream is used as - the first example shows. - """ - - DEFAULT_PROGRAM_NAME = "default" - MIN_TOKEN_INDEX = 0 - - def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): - super().__init__(tokenSource, channel) - - # You may have multiple, named streams of rewrite operations. - # I'm calling these things "programs." - # Maps String (name) -> rewrite (List) - self.programs = {} - self.programs[self.DEFAULT_PROGRAM_NAME] = [] - - # Map String (program name) -> Integer index - self.lastRewriteTokenIndexes = {} - - - def rollback(self, *args): - """ - Rollback the instruction stream for a program so that - the indicated instruction (via instructionIndex) is no - longer in the stream. UNTESTED! - """ - - if len(args) == 2: - programName = args[0] - instructionIndex = args[1] - elif len(args) == 1: - programName = self.DEFAULT_PROGRAM_NAME - instructionIndex = args[0] - else: - raise TypeError("Invalid arguments") - - p = self.programs.get(programName) - if p: - self.programs[programName] = ( - p[self.MIN_TOKEN_INDEX:instructionIndex]) - - - def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME): - """Reset the program so that no instructions exist""" - - self.rollback(programName, self.MIN_TOKEN_INDEX) - - - def insertAfter(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - index = args[0] - text = args[1] - - elif len(args) == 3: - programName = args[0] - index = args[1] - text = args[2] - - else: - raise TypeError("Invalid arguments") - - if isinstance(index, Token): - # index is a Token, grap the stream index from it - index = index.index - - # to insert after, just insert before next index (even if past end) - self.insertBefore(programName, index + 1, text) - - - def insertBefore(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - index = args[0] - text = args[1] - - elif len(args) == 3: - programName = args[0] - index = args[1] - text = args[2] - - else: - raise TypeError("Invalid arguments") - - if isinstance(index, Token): - # index is a Token, grab the stream index from it - index = index.index - - op = InsertBeforeOp(self, index, text) - rewrites = self.getProgram(programName) - op.instructionIndex = len(rewrites) - rewrites.append(op) - - - def replace(self, *args): - if len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - first = args[0] - last = args[0] - text = args[1] - - elif len(args) == 3: - programName = self.DEFAULT_PROGRAM_NAME - first = args[0] - last = args[1] - text = args[2] - - elif len(args) == 4: - programName = args[0] - first = args[1] - last = args[2] - text = args[3] - - else: - raise TypeError("Invalid arguments") - - if isinstance(first, Token): - # first is a Token, grap the stream index from it - first = first.index - - if isinstance(last, Token): - # last is a Token, grap the stream index from it - last = last.index - - if first > last or first < 0 or last < 0 or last >= len(self.tokens): - raise ValueError( - "replace: range invalid: {}..{} (size={})" - .format(first, last, len(self.tokens))) - - op = ReplaceOp(self, first, last, text) - rewrites = self.getProgram(programName) - op.instructionIndex = len(rewrites) - rewrites.append(op) - - - def delete(self, *args): - self.replace(*(list(args) + [None])) - - - def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME): - return self.lastRewriteTokenIndexes.get(programName, -1) - - - def setLastRewriteTokenIndex(self, programName, i): - self.lastRewriteTokenIndexes[programName] = i - - - def getProgram(self, name): - p = self.programs.get(name) - if not p: - p = self.initializeProgram(name) - - return p - - - def initializeProgram(self, name): - p = [] - self.programs[name] = p - return p - - - def toOriginalString(self, start=None, end=None): - if self.p == -1: - self.fillBuffer() - - if start is None: - start = self.MIN_TOKEN_INDEX - if end is None: - end = self.size() - 1 - - buf = StringIO() - i = start - while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): - if self.get(i).type != EOF: - buf.write(self.get(i).text) - i += 1 - - return buf.getvalue() - - - def toString(self, *args): - if self.p == -1: - self.fillBuffer() - - if len(args) == 0: - programName = self.DEFAULT_PROGRAM_NAME - start = self.MIN_TOKEN_INDEX - end = self.size() - 1 - - elif len(args) == 1: - programName = args[0] - start = self.MIN_TOKEN_INDEX - end = self.size() - 1 - - elif len(args) == 2: - programName = self.DEFAULT_PROGRAM_NAME - start = args[0] - end = args[1] - - if start is None: - start = self.MIN_TOKEN_INDEX - elif not isinstance(start, int): - start = start.index - - if end is None: - end = len(self.tokens) - 1 - elif not isinstance(end, int): - end = end.index - - # ensure start/end are in range - if end >= len(self.tokens): - end = len(self.tokens) - 1 - - if start < 0: - start = 0 - - rewrites = self.programs.get(programName) - if not rewrites: - # no instructions to execute - return self.toOriginalString(start, end) - - buf = StringIO() - - # First, optimize instruction stream - indexToOp = self.reduceToSingleOperationPerIndex(rewrites) - - # Walk buffer, executing instructions and emitting tokens - i = start - while i <= end and i < len(self.tokens): - # remove so any left have index size-1 - op = indexToOp.pop(i, None) - - t = self.tokens[i] - if op is None: - # no operation at that index, just dump token - if t.type != EOF: - buf.write(t.text) - i += 1 # move to next token - - else: - i = op.execute(buf) # execute operation and skip - - # include stuff after end if it's last index in buffer - # So, if they did an insertAfter(lastValidIndex, "foo"), include - # foo if end == lastValidIndex. - if end == len(self.tokens) - 1: - # Scan any remaining operations after last token - # should be included (they will be inserts). - for i, op in sorted(indexToOp.items()): - if op.index >= len(self.tokens) - 1: - buf.write(op.text) - - return buf.getvalue() - - __str__ = toString - - - def reduceToSingleOperationPerIndex(self, rewrites): - """ - We need to combine operations and report invalid operations (like - overlapping replaces that are not completed nested). Inserts to - same index need to be combined etc... Here are the cases: - - I.i.u I.j.v leave alone, nonoverlapping - I.i.u I.i.v combine: Iivu - - R.i-j.u R.x-y.v | i-j in x-y delete first R - R.i-j.u R.i-j.v delete first R - R.i-j.u R.x-y.v | x-y in i-j ERROR - R.i-j.u R.x-y.v | boundaries overlap ERROR - - Delete special case of replace (text==null): - D.i-j.u D.x-y.v | boundaries overlapcombine to - max(min)..max(right) - - I.i.u R.x-y.v | i in (x+1)-ydelete I (since - insert before we're not deleting - i) - I.i.u R.x-y.v | i not in (x+1)-yleave alone, - nonoverlapping - - R.x-y.v I.i.u | i in x-y ERROR - R.x-y.v I.x.u R.x-y.uv (combine, delete I) - R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping - - I.i.u = insert u before op @ index i - R.x-y.u = replace x-y indexed tokens with u - - First we need to examine replaces. For any replace op: - - 1. wipe out any insertions before op within that range. - 2. Drop any replace op before that is contained completely within - that range. - 3. Throw exception upon boundary overlap with any previous replace. - - Then we can deal with inserts: - - 1. for any inserts to same index, combine even if not adjacent. - 2. for any prior replace with same left boundary, combine this - insert with replace and delete this replace. - 3. throw exception if index in same range as previous replace - - Don't actually delete; make op null in list. Easier to walk list. - Later we can throw as we add to index -> op map. - - Note that I.2 R.2-2 will wipe out I.2 even though, technically, the - inserted stuff would be before the replace range. But, if you - add tokens in front of a method body '{' and then delete the method - body, I think the stuff before the '{' you added should disappear too. - - Return a map from token index to operation. - """ - - # WALK REPLACES - for i, rop in enumerate(rewrites): - if not rop: - continue - - if not isinstance(rop, ReplaceOp): - continue - - # Wipe prior inserts within range - for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i): - if iop.index == rop.index: - # E.g., insert before 2, delete 2..2; update replace - # text to include insert before, kill insert - rewrites[iop.instructionIndex] = None - rop.text = self.catOpText(iop.text, rop.text) - - elif iop.index > rop.index and iop.index <= rop.lastIndex: - # delete insert as it's a no-op. - rewrites[j] = None - - # Drop any prior replaces contained within - for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i): - if (prevRop.index >= rop.index - and prevRop.lastIndex <= rop.lastIndex): - # delete replace as it's a no-op. - rewrites[j] = None - continue - - # throw exception unless disjoint or identical - disjoint = (prevRop.lastIndex < rop.index - or prevRop.index > rop.lastIndex) - same = (prevRop.index == rop.index - and prevRop.lastIndex == rop.lastIndex) - - # Delete special case of replace (text==null): - # D.i-j.u D.x-y.v| boundaries overlapcombine to - # max(min)..max(right) - if prevRop.text is None and rop.text is None and not disjoint: - # kill first delete - rewrites[prevRop.instructionIndex] = None - - rop.index = min(prevRop.index, rop.index) - rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex) - - elif not disjoint and not same: - raise ValueError( - "replace op boundaries of {} overlap with previous {}" - .format(rop, prevRop)) - - # WALK INSERTS - for i, iop in enumerate(rewrites): - if iop is None: - continue - - if not isinstance(iop, InsertBeforeOp): - continue - - # combine current insert with prior if any at same index - for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i): - if prevIop.index == iop.index: # combine objects - # convert to strings...we're in process of toString'ing - # whole token buffer so no lazy eval issue with any - # templates - iop.text = self.catOpText(iop.text, prevIop.text) - # delete redundant prior insert - rewrites[j] = None - - # look for replaces where iop.index is in range; error - for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i): - if iop.index == rop.index: - rop.text = self.catOpText(iop.text, rop.text) - # delete current insert - rewrites[i] = None - continue - - if iop.index >= rop.index and iop.index <= rop.lastIndex: - raise ValueError( - "insert op {} within boundaries of previous {}" - .format(iop, rop)) - - m = {} - for i, op in enumerate(rewrites): - if op is None: - # ignore deleted ops - continue - - assert op.index not in m, "should only be one op per index" - m[op.index] = op - - return m - - - def catOpText(self, a, b): - x = "" - y = "" - if a: - x = a - if b: - y = b - return x + y - - - def getKindOfOps(self, rewrites, kind, before=None): - """Get all operations before an index of a particular kind.""" - - if before is None: - before = len(rewrites) - elif before > len(rewrites): - before = len(rewrites) - - for i, op in enumerate(rewrites[:before]): - # ignore deleted - if op and op.__class__ == kind: - yield i, op - - - def toDebugString(self, start=None, end=None): - if start is None: - start = self.MIN_TOKEN_INDEX - if end is None: - end = self.size() - 1 - - buf = StringIO() - i = start - while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): - buf.write(self.get(i)) - i += 1 - - return buf.getvalue() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tokens.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tokens.py deleted file mode 100644 index 31a2e93f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tokens.py +++ /dev/null @@ -1,310 +0,0 @@ -"""ANTLR3 runtime package""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from .constants import DEFAULT_CHANNEL, EOF, INVALID_TOKEN_TYPE - -############################################################################ -# -# basic token interface -# -############################################################################ - -class Token(object): - """@brief Abstract token baseclass.""" - - TOKEN_NAMES_MAP = None - - @classmethod - def registerTokenNamesMap(cls, tokenNamesMap): - """@brief Store a mapping from token type to token name. - - This enables token.typeName to give something more meaningful - than, e.g., '6'. - """ - cls.TOKEN_NAMES_MAP = tokenNamesMap - cls.TOKEN_NAMES_MAP[EOF] = "EOF" - - def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None, - index=-1, line=0, charPositionInLine=-1, input=None): - # We use -1 for index and charPositionInLine as an invalid index - self._type = type - self._channel = channel - self._text = text - self._index = index - self._line = 0 - self._charPositionInLine = charPositionInLine - self.input = input - - # To override a property, you'll need to override both the getter and setter. - @property - def text(self): - return self._text - - @text.setter - def text(self, value): - self._text = value - - - @property - def type(self): - return self._type - - @type.setter - def type(self, value): - self._type = value - - # For compatibility - def getType(self): - return self._type - - @property - def typeName(self): - if self.TOKEN_NAMES_MAP: - return self.TOKEN_NAMES_MAP.get(self._type, "INVALID_TOKEN_TYPE") - else: - return str(self._type) - - @property - def line(self): - """Lines are numbered 1..n.""" - return self._line - - @line.setter - def line(self, value): - self._line = value - - - @property - def charPositionInLine(self): - """Columns are numbered 0..n-1.""" - return self._charPositionInLine - - @charPositionInLine.setter - def charPositionInLine(self, pos): - self._charPositionInLine = pos - - - @property - def channel(self): - return self._channel - - @channel.setter - def channel(self, value): - self._channel = value - - - @property - def index(self): - """ - An index from 0..n-1 of the token object in the input stream. - This must be valid in order to use the ANTLRWorks debugger. - """ - return self._index - - @index.setter - def index(self, value): - self._index = value - - - def getInputStream(self): - """@brief From what character stream was this token created. - - You don't have to implement but it's nice to know where a Token - comes from if you have include files etc... on the input.""" - - raise NotImplementedError - - def setInputStream(self, input): - """@brief From what character stream was this token created. - - You don't have to implement but it's nice to know where a Token - comes from if you have include files etc... on the input.""" - - raise NotImplementedError - - -############################################################################ -# -# token implementations -# -# Token -# +- CommonToken -# \- ClassicToken -# -############################################################################ - -class CommonToken(Token): - """@brief Basic token implementation. - - This implementation does not copy the text from the input stream upon - creation, but keeps start/stop pointers into the stream to avoid - unnecessary copy operations. - - """ - - def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None, - input=None, start=None, stop=None, oldToken=None): - - if oldToken: - super().__init__(oldToken.type, oldToken.channel, oldToken.text, - oldToken.index, oldToken.line, - oldToken.charPositionInLine, oldToken.input) - if isinstance(oldToken, CommonToken): - self.start = oldToken.start - self.stop = oldToken.stop - else: - self.start = start - self.stop = stop - - else: - super().__init__(type=type, channel=channel, input=input) - - # We need to be able to change the text once in a while. If - # this is non-null, then getText should return this. Note that - # start/stop are not affected by changing this. - self._text = text - - # The char position into the input buffer where this token starts - self.start = start - - # The char position into the input buffer where this token stops - # This is the index of the last char, *not* the index after it! - self.stop = stop - - - @property - def text(self): - # Could be the empty string, and we want to return that. - if self._text is not None: - return self._text - - if not self.input: - return None - - if self.start < self.input.size() and self.stop < self.input.size(): - return self.input.substring(self.start, self.stop) - - return '' - - @text.setter - def text(self, value): - """ - Override the text for this token. getText() will return this text - rather than pulling from the buffer. Note that this does not mean - that start/stop indexes are not valid. It means that that input - was converted to a new string in the token object. - """ - self._text = value - - - def getInputStream(self): - return self.input - - def setInputStream(self, input): - self.input = input - - - def __str__(self): - if self.type == EOF: - return "" - - channelStr = "" - if self.channel > 0: - channelStr = ",channel=" + str(self.channel) - - txt = self.text - if txt: - # Put 2 backslashes in front of each character - txt = txt.replace("\n", r"\\n") - txt = txt.replace("\r", r"\\r") - txt = txt.replace("\t", r"\\t") - else: - txt = "" - - return ("[@{0.index},{0.start}:{0.stop}={txt!r}," - "<{0.typeName}>{channelStr}," - "{0.line}:{0.charPositionInLine}]" - .format(self, txt=txt, channelStr=channelStr)) - - -class ClassicToken(Token): - """@brief Alternative token implementation. - - A Token object like we'd use in ANTLR 2.x; has an actual string created - and associated with this object. These objects are needed for imaginary - tree nodes that have payload objects. We need to create a Token object - that has a string; the tree node will point at this token. CommonToken - has indexes into a char stream and hence cannot be used to introduce - new strings. - """ - - def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL, - oldToken=None): - if oldToken: - super().__init__(type=oldToken.type, channel=oldToken.channel, - text=oldToken.text, line=oldToken.line, - charPositionInLine=oldToken.charPositionInLine) - - else: - super().__init__(type=type, channel=channel, text=text, - index=None, line=None, charPositionInLine=None) - - - def getInputStream(self): - return None - - def setInputStream(self, input): - pass - - - def toString(self): - channelStr = "" - if self.channel > 0: - channelStr = ",channel=" + str(self.channel) - - txt = self.text - if not txt: - txt = "" - - return ("[@{0.index!r},{txt!r},<{0.type!r}>{channelStr}," - "{0.line!r}:{0.charPositionInLine!r}]" - .format(self, txt=txt, channelStr=channelStr)) - - __str__ = toString - __repr__ = toString - - -INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) - -# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR -# will avoid creating a token for this symbol and try to fetch another. -SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py deleted file mode 100644 index 50a6eed4..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py +++ /dev/null @@ -1,2829 +0,0 @@ -""" @package antlr3.tree -@brief ANTLR3 runtime package, tree module - -This module contains all support classes for AST construction and tree parsers. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -# lot's of docstrings are missing, don't complain for now... -# pylint: disable-msg=C0111 - -import re - -from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE -from antlr3.recognizers import BaseRecognizer, RuleReturnScope -from antlr3.streams import IntStream -from antlr3.tokens import CommonToken, Token, INVALID_TOKEN -from antlr3.exceptions import MismatchedTreeNodeException, \ - MissingTokenException, UnwantedTokenException, MismatchedTokenException, \ - NoViableAltException - - -############################################################################ -# -# tree related exceptions -# -############################################################################ - - -class RewriteCardinalityException(RuntimeError): - """ - @brief Base class for all exceptions thrown during AST rewrite construction. - - This signifies a case where the cardinality of two or more elements - in a subrule are different: (ID INT)+ where |ID|!=|INT| - """ - - def __init__(self, elementDescription): - RuntimeError.__init__(self, elementDescription) - - self.elementDescription = elementDescription - - - def getMessage(self): - return self.elementDescription - - -class RewriteEarlyExitException(RewriteCardinalityException): - """@brief No elements within a (...)+ in a rewrite rule""" - - def __init__(self, elementDescription=None): - RewriteCardinalityException.__init__(self, elementDescription) - - -class RewriteEmptyStreamException(RewriteCardinalityException): - """ - @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream - """ - - pass - - -############################################################################ -# -# basic Tree and TreeAdaptor interfaces -# -############################################################################ - -class Tree(object): - """ - @brief Abstract baseclass for tree nodes. - - What does a tree look like? ANTLR has a number of support classes - such as CommonTreeNodeStream that work on these kinds of trees. You - don't have to make your trees implement this interface, but if you do, - you'll be able to use more support code. - - NOTE: When constructing trees, ANTLR can build any kind of tree; it can - even use Token objects as trees if you add a child list to your tokens. - - This is a tree node without any payload; just navigation and factory stuff. - """ - - - def getChild(self, i): - raise NotImplementedError - - - def getChildCount(self): - raise NotImplementedError - - - def getParent(self): - """Tree tracks parent and child index now > 3.0""" - - raise NotImplementedError - - def setParent(self, t): - """Tree tracks parent and child index now > 3.0""" - - raise NotImplementedError - - - def hasAncestor(self, ttype): - """Walk upwards looking for ancestor with this token type.""" - - raise NotImplementedError - - def getAncestor(self, ttype): - """Walk upwards and get first ancestor with this token type.""" - - raise NotImplementedError - - def getAncestors(self): - """Return a list of all ancestors of this node. - - The first node of list is the root and the last is the parent of - this node. - """ - - raise NotImplementedError - - - def getChildIndex(self): - """This node is what child index? 0..n-1""" - - raise NotImplementedError - - def setChildIndex(self, index): - """This node is what child index? 0..n-1""" - - raise NotImplementedError - - - def freshenParentAndChildIndexes(self): - """Set the parent and child index values for all children""" - - raise NotImplementedError - - - def addChild(self, t): - """ - Add t as a child to this node. If t is null, do nothing. If t - is nil, add all children of t to this' children. - """ - - raise NotImplementedError - - - def setChild(self, i, t): - """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" - - raise NotImplementedError - - - def deleteChild(self, i): - raise NotImplementedError - - - def replaceChildren(self, startChildIndex, stopChildIndex, t): - """ - Delete children from start to stop and replace with t even if t is - a list (nil-root tree). num of children can increase or decrease. - For huge child lists, inserting children can force walking rest of - children to set their childindex; could be slow. - """ - - raise NotImplementedError - - - def isNil(self): - """ - Indicates the node is a nil node but may still have children, meaning - the tree is a flat list. - """ - - raise NotImplementedError - - - def getTokenStartIndex(self): - """ - What is the smallest token index (indexing from 0) for this node - and its children? - """ - - raise NotImplementedError - - - def setTokenStartIndex(self, index): - raise NotImplementedError - - - def getTokenStopIndex(self): - """ - What is the largest token index (indexing from 0) for this node - and its children? - """ - - raise NotImplementedError - - - def setTokenStopIndex(self, index): - raise NotImplementedError - - - def dupNode(self): - raise NotImplementedError - - - def getType(self): - """Return a token type; needed for tree parsing.""" - - raise NotImplementedError - - - def getText(self): - raise NotImplementedError - - - def getLine(self): - """ - In case we don't have a token payload, what is the line for errors? - """ - - raise NotImplementedError - - - def getCharPositionInLine(self): - raise NotImplementedError - - - def toStringTree(self): - raise NotImplementedError - - - def toString(self): - raise NotImplementedError - - - -class TreeAdaptor(object): - """ - @brief Abstract baseclass for tree adaptors. - - How to create and navigate trees. Rather than have a separate factory - and adaptor, I've merged them. Makes sense to encapsulate. - - This takes the place of the tree construction code generated in the - generated code in 2.x and the ASTFactory. - - I do not need to know the type of a tree at all so they are all - generic Objects. This may increase the amount of typecasting needed. :( - """ - - # C o n s t r u c t i o n - - def createWithPayload(self, payload): - """ - Create a tree node from Token object; for CommonTree type trees, - then the token just becomes the payload. This is the most - common create call. - - Override if you want another kind of node to be built. - """ - - raise NotImplementedError - - - def dupNode(self, treeNode): - """Duplicate a single tree node. - - Override if you want another kind of node to be built.""" - - raise NotImplementedError - - - def dupTree(self, tree): - """Duplicate tree recursively, using dupNode() for each node""" - - raise NotImplementedError - - - def nil(self): - """ - Return a nil node (an empty but non-null node) that can hold - a list of element as the children. If you want a flat tree (a list) - use "t=adaptor.nil(); t.addChild(x); t.addChild(y);" - """ - - raise NotImplementedError - - - def errorNode(self, input, start, stop, exc): - """ - Return a tree node representing an error. This node records the - tokens consumed during error recovery. The start token indicates the - input symbol at which the error was detected. The stop token indicates - the last symbol consumed during recovery. - - You must specify the input stream so that the erroneous text can - be packaged up in the error node. The exception could be useful - to some applications; default implementation stores ptr to it in - the CommonErrorNode. - - This only makes sense during token parsing, not tree parsing. - Tree parsing should happen only when parsing and tree construction - succeed. - """ - - raise NotImplementedError - - - def isNil(self, tree): - """Is tree considered a nil node used to make lists of child nodes?""" - - raise NotImplementedError - - - def addChild(self, t, child): - """ - Add a child to the tree t. If child is a flat tree (a list), make all - in list children of t. Warning: if t has no children, but child does - and child isNil then you can decide it is ok to move children to t via - t.children = child.children; i.e., without copying the array. Just - make sure that this is consistent with have the user will build - ASTs. Do nothing if t or child is null. - """ - - raise NotImplementedError - - - def becomeRoot(self, newRoot, oldRoot): - """ - If oldRoot is a nil root, just copy or move the children to newRoot. - If not a nil root, make oldRoot a child of newRoot. - - old=^(nil a b c), new=r yields ^(r a b c) - old=^(a b c), new=r yields ^(r ^(a b c)) - - If newRoot is a nil-rooted single child tree, use the single - child as the new root node. - - old=^(nil a b c), new=^(nil r) yields ^(r a b c) - old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) - - If oldRoot was null, it's ok, just return newRoot (even if isNil). - - old=null, new=r yields r - old=null, new=^(nil r) yields ^(nil r) - - Return newRoot. Throw an exception if newRoot is not a - simple node or nil root with a single child node--it must be a root - node. If newRoot is ^(nil x) return x as newRoot. - - Be advised that it's ok for newRoot to point at oldRoot's - children; i.e., you don't have to copy the list. We are - constructing these nodes so we should have this control for - efficiency. - """ - - raise NotImplementedError - - - def rulePostProcessing(self, root): - """ - Given the root of the subtree created for this rule, post process - it to do any simplifications or whatever you want. A required - behavior is to convert ^(nil singleSubtree) to singleSubtree - as the setting of start/stop indexes relies on a single non-nil root - for non-flat trees. - - Flat trees such as for lists like "idlist : ID+ ;" are left alone - unless there is only one ID. For a list, the start/stop indexes - are set in the nil node. - - This method is executed after all rule tree construction and right - before setTokenBoundaries(). - """ - - raise NotImplementedError - - - def getUniqueID(self, node): - """For identifying trees. - - How to identify nodes so we can say "add node to a prior node"? - Even becomeRoot is an issue. Use System.identityHashCode(node) - usually. - """ - - raise NotImplementedError - - - # R e w r i t e R u l e s - - def createFromToken(self, tokenType, fromToken, text=None): - """ - Create a new node derived from a token, with a new token type and - (optionally) new text. - - This is invoked from an imaginary node ref on right side of a - rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"]. - - This should invoke createToken(Token). - """ - - raise NotImplementedError - - - def createFromType(self, tokenType, text): - """Create a new node derived from a token, with a new token type. - - This is invoked from an imaginary node ref on right side of a - rewrite rule as IMAG["IMAG"]. - - This should invoke createToken(int,String). - """ - - raise NotImplementedError - - - # C o n t e n t - - def getType(self, t): - """For tree parsing, I need to know the token type of a node""" - - raise NotImplementedError - - - def setType(self, t, type): - """Node constructors can set the type of a node""" - - raise NotImplementedError - - - def getText(self, t): - raise NotImplementedError - - def setText(self, t, text): - """Node constructors can set the text of a node""" - - raise NotImplementedError - - - def getToken(self, t): - """Return the token object from which this node was created. - - Currently used only for printing an error message. - The error display routine in BaseRecognizer needs to - display where the input the error occurred. If your - tree of limitation does not store information that can - lead you to the token, you can create a token filled with - the appropriate information and pass that back. See - BaseRecognizer.getErrorMessage(). - """ - - raise NotImplementedError - - - def setTokenBoundaries(self, t, startToken, stopToken): - """ - Where are the bounds in the input token stream for this node and - all children? Each rule that creates AST nodes will call this - method right before returning. Flat trees (i.e., lists) will - still usually have a nil root node just to hold the children list. - That node would contain the start/stop indexes then. - """ - - raise NotImplementedError - - - def getTokenStartIndex(self, t): - """ - Get the token start index for this subtree; return -1 if no such index - """ - - raise NotImplementedError - - - def getTokenStopIndex(self, t): - """ - Get the token stop index for this subtree; return -1 if no such index - """ - - raise NotImplementedError - - - # N a v i g a t i o n / T r e e P a r s i n g - - def getChild(self, t, i): - """Get a child 0..n-1 node""" - - raise NotImplementedError - - - def setChild(self, t, i, child): - """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" - - raise NotImplementedError - - - def deleteChild(self, t, i): - """Remove ith child and shift children down from right.""" - - raise NotImplementedError - - - def getChildCount(self, t): - """How many children? If 0, then this is a leaf node""" - - raise NotImplementedError - - - def getParent(self, t): - """ - Who is the parent node of this node; if null, implies node is root. - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def setParent(self, t, parent): - """ - Who is the parent node of this node; if null, implies node is root. - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def getChildIndex(self, t): - """ - What index is this node in the child list? Range: 0..n-1 - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def setChildIndex(self, t, index): - """ - What index is this node in the child list? Range: 0..n-1 - If your node type doesn't handle this, it's ok but the tree rewrites - in tree parsers need this functionality. - """ - - raise NotImplementedError - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - """ - Replace from start to stop child index of parent with t, which might - be a list. Number of children may be different - after this call. - - If parent is null, don't do anything; must be at root of overall tree. - Can't replace whatever points to the parent externally. Do nothing. - """ - - raise NotImplementedError - - - # Misc - - def create(self, *args): - """ - Deprecated, use createWithPayload, createFromToken or createFromType. - - This method only exists to mimic the Java interface of TreeAdaptor. - - """ - - if len(args) == 1 and isinstance(args[0], Token): - # Object create(Token payload); -## warnings.warn( -## "Using create() is deprecated, use createWithPayload()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createWithPayload(args[0]) - - if (len(args) == 2 - and isinstance(args[0], int) - and isinstance(args[1], Token)): - # Object create(int tokenType, Token fromToken); -## warnings.warn( -## "Using create() is deprecated, use createFromToken()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromToken(args[0], args[1]) - - if (len(args) == 3 - and isinstance(args[0], int) - and isinstance(args[1], Token) - and isinstance(args[2], str)): - # Object create(int tokenType, Token fromToken, String text); -## warnings.warn( -## "Using create() is deprecated, use createFromToken()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromToken(args[0], args[1], args[2]) - - if (len(args) == 2 - and isinstance(args[0], int) - and isinstance(args[1], str)): - # Object create(int tokenType, String text); -## warnings.warn( -## "Using create() is deprecated, use createFromType()", -## DeprecationWarning, -## stacklevel=2 -## ) - return self.createFromType(args[0], args[1]) - - raise TypeError( - "No create method with this signature found: {}" - .format(', '.join(type(v).__name__ for v in args))) - - -############################################################################ -# -# base implementation of Tree and TreeAdaptor -# -# Tree -# \- BaseTree -# -# TreeAdaptor -# \- BaseTreeAdaptor -# -############################################################################ - - -class BaseTree(Tree): - """ - @brief A generic tree implementation with no payload. - - You must subclass to - actually have any user data. ANTLR v3 uses a list of children approach - instead of the child-sibling approach in v2. A flat tree (a list) is - an empty node whose children represent the list. An empty, but - non-null node is called "nil". - """ - - # BaseTree is abstract, no need to complain about not implemented abstract - # methods - # pylint: disable-msg=W0223 - - def __init__(self, node=None): - """ - Create a new node from an existing node does nothing for BaseTree - as there are no fields other than the children list, which cannot - be copied as the children are not considered part of this node. - """ - - super().__init__() - self.children = [] - self.parent = None - self.childIndex = 0 - - - def getChild(self, i): - try: - return self.children[i] - except IndexError: - return None - - - def getChildren(self): - """@brief Get the children internal List - - Note that if you directly mess with - the list, do so at your own risk. - """ - - # FIXME: mark as deprecated - return self.children - - - def getFirstChildWithType(self, treeType): - for child in self.children: - if child.getType() == treeType: - return child - - return None - - - def getChildCount(self): - return len(self.children) - - - def addChild(self, childTree): - """Add t as child of this node. - - Warning: if t has no children, but child does - and child isNil then this routine moves children to t via - t.children = child.children; i.e., without copying the array. - """ - - # this implementation is much simpler and probably less efficient - # than the mumbo-jumbo that Ter did for the Java runtime. - - if childTree is None: - return - - if childTree.isNil(): - # t is an empty node possibly with children - - if self.children is childTree.children: - raise ValueError("attempt to add child list to itself") - - # fix parent pointer and childIndex for new children - for idx, child in enumerate(childTree.children): - child.parent = self - child.childIndex = len(self.children) + idx - - self.children += childTree.children - - else: - # child is not nil (don't care about children) - self.children.append(childTree) - childTree.parent = self - childTree.childIndex = len(self.children) - 1 - - - def addChildren(self, children): - """Add all elements of kids list as children of this node""" - - self.children += children - - - def setChild(self, i, t): - if t is None: - return - - if t.isNil(): - raise ValueError("Can't set single child to a list") - - self.children[i] = t - t.parent = self - t.childIndex = i - - - def deleteChild(self, i): - killed = self.children[i] - - del self.children[i] - - # walk rest and decrement their child indexes - for idx, child in enumerate(self.children[i:]): - child.childIndex = i + idx - - return killed - - - def replaceChildren(self, startChildIndex, stopChildIndex, newTree): - """ - Delete children from start to stop and replace with t even if t is - a list (nil-root tree). num of children can increase or decrease. - For huge child lists, inserting children can force walking rest of - children to set their childindex; could be slow. - """ - - if (startChildIndex >= len(self.children) - or stopChildIndex >= len(self.children)): - raise IndexError("indexes invalid") - - replacingHowMany = stopChildIndex - startChildIndex + 1 - - # normalize to a list of children to add: newChildren - if newTree.isNil(): - newChildren = newTree.children - - else: - newChildren = [newTree] - - replacingWithHowMany = len(newChildren) - delta = replacingHowMany - replacingWithHowMany - - - if delta == 0: - # if same number of nodes, do direct replace - for idx, child in enumerate(newChildren): - self.children[idx + startChildIndex] = child - child.parent = self - child.childIndex = idx + startChildIndex - - else: - # length of children changes... - - # ...delete replaced segment... - del self.children[startChildIndex:stopChildIndex+1] - - # ...insert new segment... - self.children[startChildIndex:startChildIndex] = newChildren - - # ...and fix indeces - self.freshenParentAndChildIndexes(startChildIndex) - - - def isNil(self): - return False - - - def freshenParentAndChildIndexes(self, offset=0): - for idx, child in enumerate(self.children[offset:]): - child.childIndex = idx + offset - child.parent = self - - - def sanityCheckParentAndChildIndexes(self, parent=None, i=-1): - if parent != self.parent: - raise ValueError( - "parents don't match; expected {!r} found {!r}" - .format(parent, self.parent)) - - if i != self.childIndex: - raise ValueError( - "child indexes don't match; expected {} found {}" - .format(i, self.childIndex)) - - for idx, child in enumerate(self.children): - child.sanityCheckParentAndChildIndexes(self, idx) - - - def getChildIndex(self): - """BaseTree doesn't track child indexes.""" - - return 0 - - - def setChildIndex(self, index): - """BaseTree doesn't track child indexes.""" - - pass - - - def getParent(self): - """BaseTree doesn't track parent pointers.""" - - return None - - def setParent(self, t): - """BaseTree doesn't track parent pointers.""" - - pass - - - def hasAncestor(self, ttype): - """Walk upwards looking for ancestor with this token type.""" - return self.getAncestor(ttype) is not None - - def getAncestor(self, ttype): - """Walk upwards and get first ancestor with this token type.""" - t = self.getParent() - while t is not None: - if t.getType() == ttype: - return t - t = t.getParent() - - return None - - def getAncestors(self): - """Return a list of all ancestors of this node. - - The first node of list is the root and the last is the parent of - this node. - """ - if self.getParent() is None: - return None - - ancestors = [] - t = self.getParent() - while t is not None: - ancestors.insert(0, t) # insert at start - t = t.getParent() - - return ancestors - - - def toStringTree(self): - """Print out a whole tree not just a node""" - - if len(self.children) == 0: - return self.toString() - - buf = [] - if not self.isNil(): - buf.append('(') - buf.append(self.toString()) - buf.append(' ') - - for i, child in enumerate(self.children): - if i > 0: - buf.append(' ') - buf.append(child.toStringTree()) - - if not self.isNil(): - buf.append(')') - - return ''.join(buf) - - - def getLine(self): - return 0 - - - def getCharPositionInLine(self): - return 0 - - - def toString(self): - """Override to say how a node (not a tree) should look as text""" - - raise NotImplementedError - - - -class BaseTreeAdaptor(TreeAdaptor): - """ - @brief A TreeAdaptor that works with any Tree implementation. - """ - - # BaseTreeAdaptor is abstract, no need to complain about not implemented - # abstract methods - # pylint: disable-msg=W0223 - - def nil(self): - return self.createWithPayload(None) - - - def errorNode(self, input, start, stop, exc): - """ - create tree node that holds the start and stop tokens associated - with an error. - - If you specify your own kind of tree nodes, you will likely have to - override this method. CommonTree returns Token.INVALID_TOKEN_TYPE - if no token payload but you might have to set token type for diff - node type. - - You don't have to subclass CommonErrorNode; you will likely need to - subclass your own tree node class to avoid class cast exception. - """ - - return CommonErrorNode(input, start, stop, exc) - - - def isNil(self, tree): - return tree.isNil() - - - def dupTree(self, t, parent=None): - """ - This is generic in the sense that it will work with any kind of - tree (not just Tree interface). It invokes the adaptor routines - not the tree node routines to do the construction. - """ - - if t is None: - return None - - newTree = self.dupNode(t) - - # ensure new subtree root has parent/child index set - - # same index in new tree - self.setChildIndex(newTree, self.getChildIndex(t)) - - self.setParent(newTree, parent) - - for i in range(self.getChildCount(t)): - child = self.getChild(t, i) - newSubTree = self.dupTree(child, t) - self.addChild(newTree, newSubTree) - - return newTree - - - def addChild(self, tree, child): - """ - Add a child to the tree t. If child is a flat tree (a list), make all - in list children of t. Warning: if t has no children, but child does - and child isNil then you can decide it is ok to move children to t via - t.children = child.children; i.e., without copying the array. Just - make sure that this is consistent with have the user will build - ASTs. - """ - - #if isinstance(child, Token): - # child = self.createWithPayload(child) - - if tree is not None and child is not None: - tree.addChild(child) - - - def becomeRoot(self, newRoot, oldRoot): - """ - If oldRoot is a nil root, just copy or move the children to newRoot. - If not a nil root, make oldRoot a child of newRoot. - - old=^(nil a b c), new=r yields ^(r a b c) - old=^(a b c), new=r yields ^(r ^(a b c)) - - If newRoot is a nil-rooted single child tree, use the single - child as the new root node. - - old=^(nil a b c), new=^(nil r) yields ^(r a b c) - old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) - - If oldRoot was null, it's ok, just return newRoot (even if isNil). - - old=null, new=r yields r - old=null, new=^(nil r) yields ^(nil r) - - Return newRoot. Throw an exception if newRoot is not a - simple node or nil root with a single child node--it must be a root - node. If newRoot is ^(nil x) return x as newRoot. - - Be advised that it's ok for newRoot to point at oldRoot's - children; i.e., you don't have to copy the list. We are - constructing these nodes so we should have this control for - efficiency. - """ - - if isinstance(newRoot, Token): - newRoot = self.create(newRoot) - - if oldRoot is None: - return newRoot - - if not isinstance(newRoot, CommonTree): - newRoot = self.createWithPayload(newRoot) - - # handle ^(nil real-node) - if newRoot.isNil(): - nc = newRoot.getChildCount() - if nc == 1: - newRoot = newRoot.getChild(0) - - elif nc > 1: - # TODO: make tree run time exceptions hierarchy - raise RuntimeError("more than one node as root") - - # add oldRoot to newRoot; addChild takes care of case where oldRoot - # is a flat list (i.e., nil-rooted tree). All children of oldRoot - # are added to newRoot. - newRoot.addChild(oldRoot) - return newRoot - - - def rulePostProcessing(self, root): - """Transform ^(nil x) to x and nil to null""" - - if root is not None and root.isNil(): - if root.getChildCount() == 0: - root = None - - elif root.getChildCount() == 1: - root = root.getChild(0) - # whoever invokes rule will set parent and child index - root.setParent(None) - root.setChildIndex(-1) - - return root - - - def createFromToken(self, tokenType, fromToken, text=None): - if fromToken is None: - return self.createFromType(tokenType, text) - - assert isinstance(tokenType, int), type(tokenType).__name__ - assert isinstance(fromToken, Token), type(fromToken).__name__ - assert text is None or isinstance(text, str), type(text).__name__ - - fromToken = self.createToken(fromToken) - fromToken.type = tokenType - if text is not None: - fromToken.text = text - t = self.createWithPayload(fromToken) - return t - - - def createFromType(self, tokenType, text): - assert isinstance(tokenType, int), type(tokenType).__name__ - assert isinstance(text, str) or text is None, type(text).__name__ - - fromToken = self.createToken(tokenType=tokenType, text=text) - t = self.createWithPayload(fromToken) - return t - - - def getType(self, t): - return t.getType() - - - def setType(self, t, type): - raise RuntimeError("don't know enough about Tree node") - - - def getText(self, t): - return t.getText() - - - def setText(self, t, text): - raise RuntimeError("don't know enough about Tree node") - - - def getChild(self, t, i): - return t.getChild(i) - - - def setChild(self, t, i, child): - t.setChild(i, child) - - - def deleteChild(self, t, i): - return t.deleteChild(i) - - - def getChildCount(self, t): - return t.getChildCount() - - - def getUniqueID(self, node): - return hash(node) - - - def createToken(self, fromToken=None, tokenType=None, text=None): - """ - Tell me how to create a token for use with imaginary token nodes. - For example, there is probably no input symbol associated with imaginary - token DECL, but you need to create it as a payload or whatever for - the DECL node as in ^(DECL type ID). - - If you care what the token payload objects' type is, you should - override this method and any other createToken variant. - """ - - raise NotImplementedError - - -############################################################################ -# -# common tree implementation -# -# Tree -# \- BaseTree -# \- CommonTree -# \- CommonErrorNode -# -# TreeAdaptor -# \- BaseTreeAdaptor -# \- CommonTreeAdaptor -# -############################################################################ - - -class CommonTree(BaseTree): - """@brief A tree node that is wrapper for a Token object. - - After 3.0 release - while building tree rewrite stuff, it became clear that computing - parent and child index is very difficult and cumbersome. Better to - spend the space in every tree node. If you don't want these extra - fields, it's easy to cut them out in your own BaseTree subclass. - - """ - - def __init__(self, payload): - BaseTree.__init__(self) - - # What token indexes bracket all tokens associated with this node - # and below? - self.startIndex = -1 - self.stopIndex = -1 - - # Who is the parent node of this node; if null, implies node is root - self.parent = None - - # What index is this node in the child list? Range: 0..n-1 - self.childIndex = -1 - - # A single token is the payload - if payload is None: - self.token = None - - elif isinstance(payload, CommonTree): - self.token = payload.token - self.startIndex = payload.startIndex - self.stopIndex = payload.stopIndex - - elif payload is None or isinstance(payload, Token): - self.token = payload - - else: - raise TypeError(type(payload).__name__) - - - - def getToken(self): - return self.token - - - def dupNode(self): - return CommonTree(self) - - - def isNil(self): - return self.token is None - - - def getType(self): - if self.token is None: - return INVALID_TOKEN_TYPE - - return self.token.type - - type = property(getType) - - - def getText(self): - if self.token is None: - return None - - return self.token.text - - text = property(getText) - - - def getLine(self): - if self.token is None or self.token.line == 0: - if self.getChildCount(): - return self.getChild(0).getLine() - else: - return 0 - - return self.token.line - - line = property(getLine) - - - def getCharPositionInLine(self): - if self.token is None or self.token.charPositionInLine == -1: - if self.getChildCount(): - return self.getChild(0).getCharPositionInLine() - else: - return 0 - - else: - return self.token.charPositionInLine - - charPositionInLine = property(getCharPositionInLine) - - - def getTokenStartIndex(self): - if self.startIndex == -1 and self.token: - return self.token.index - - return self.startIndex - - def setTokenStartIndex(self, index): - self.startIndex = index - - tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex) - - - def getTokenStopIndex(self): - if self.stopIndex == -1 and self.token: - return self.token.index - - return self.stopIndex - - def setTokenStopIndex(self, index): - self.stopIndex = index - - tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex) - - - def setUnknownTokenBoundaries(self): - """For every node in this subtree, make sure it's start/stop token's - are set. Walk depth first, visit bottom up. Only updates nodes - with at least one token index < 0. - """ - - if self.children is None: - if self.startIndex < 0 or self.stopIndex < 0: - self.startIndex = self.stopIndex = self.token.index - - return - - for child in self.children: - child.setUnknownTokenBoundaries() - - if self.startIndex >= 0 and self.stopIndex >= 0: - # already set - return - - if self.children: - firstChild = self.children[0] - lastChild = self.children[-1] - self.startIndex = firstChild.getTokenStartIndex() - self.stopIndex = lastChild.getTokenStopIndex() - - - def getChildIndex(self): - #FIXME: mark as deprecated - return self.childIndex - - - def setChildIndex(self, idx): - #FIXME: mark as deprecated - self.childIndex = idx - - - def getParent(self): - #FIXME: mark as deprecated - return self.parent - - - def setParent(self, t): - #FIXME: mark as deprecated - self.parent = t - - - def toString(self): - if self.isNil(): - return "nil" - - if self.getType() == INVALID_TOKEN_TYPE: - return "" - - return self.token.text - - __str__ = toString - - - - def toStringTree(self): - if not self.children: - return self.toString() - - ret = '' - if not self.isNil(): - ret += '({!s} '.format(self) - - ret += ' '.join([child.toStringTree() for child in self.children]) - - if not self.isNil(): - ret += ')' - - return ret - - -INVALID_NODE = CommonTree(INVALID_TOKEN) - - -class CommonErrorNode(CommonTree): - """A node representing erroneous token range in token stream""" - - def __init__(self, input, start, stop, exc): - CommonTree.__init__(self, None) - - if (stop is None or (stop.index < start.index and stop.type != EOF)): - # sometimes resync does not consume a token (when LT(1) is - # in follow set. So, stop will be 1 to left to start. adjust. - # Also handle case where start is the first token and no token - # is consumed during recovery; LT(-1) will return null. - stop = start - - self.input = input - self.start = start - self.stop = stop - self.trappedException = exc - - - def isNil(self): - return False - - - def getType(self): - return INVALID_TOKEN_TYPE - - - def getText(self): - if isinstance(self.start, Token): - i = self.start.index - j = self.stop.index - if self.stop.type == EOF: - j = self.input.size() - - badText = self.input.toString(i, j) - - elif isinstance(self.start, Tree): - badText = self.input.toString(self.start, self.stop) - - else: - # people should subclass if they alter the tree type so this - # next one is for sure correct. - badText = "" - - return badText - - - def toString(self): - if isinstance(self.trappedException, MissingTokenException): - return ("") - - elif isinstance(self.trappedException, UnwantedTokenException): - return ("") - - elif isinstance(self.trappedException, MismatchedTokenException): - return ("") - - elif isinstance(self.trappedException, NoViableAltException): - return ("") - - return "" - - __str__ = toString - - -class CommonTreeAdaptor(BaseTreeAdaptor): - """ - @brief A TreeAdaptor that works with any Tree implementation. - - It provides - really just factory methods; all the work is done by BaseTreeAdaptor. - If you would like to have different tokens created than ClassicToken - objects, you need to override this and then set the parser tree adaptor to - use your subclass. - - To get your parser to build nodes of a different type, override - create(Token), errorNode(), and to be safe, YourTreeClass.dupNode(). - dupNode is called to duplicate nodes during rewrite operations. - """ - - def dupNode(self, treeNode): - """ - Duplicate a node. This is part of the factory; - override if you want another kind of node to be built. - - I could use reflection to prevent having to override this - but reflection is slow. - """ - - if treeNode is None: - return None - - return treeNode.dupNode() - - - def createWithPayload(self, payload): - return CommonTree(payload) - - - def createToken(self, fromToken=None, tokenType=None, text=None): - """ - Tell me how to create a token for use with imaginary token nodes. - For example, there is probably no input symbol associated with imaginary - token DECL, but you need to create it as a payload or whatever for - the DECL node as in ^(DECL type ID). - - If you care what the token payload objects' type is, you should - override this method and any other createToken variant. - """ - - if fromToken is not None: - return CommonToken(oldToken=fromToken) - - return CommonToken(type=tokenType, text=text) - - - def setTokenBoundaries(self, t, startToken, stopToken): - """ - Track start/stop token for subtree root created for a rule. - Only works with Tree nodes. For rules that match nothing, - seems like this will yield start=i and stop=i-1 in a nil node. - Might be useful info so I'll not force to be i..i. - """ - - if t is None: - return - - start = 0 - stop = 0 - - if startToken is not None: - start = startToken.index - - if stopToken is not None: - stop = stopToken.index - - t.setTokenStartIndex(start) - t.setTokenStopIndex(stop) - - - def getTokenStartIndex(self, t): - if t is None: - return -1 - return t.getTokenStartIndex() - - - def getTokenStopIndex(self, t): - if t is None: - return -1 - return t.getTokenStopIndex() - - - def getText(self, t): - if t is None: - return None - return t.text - - - def getType(self, t): - if t is None: - return INVALID_TOKEN_TYPE - - return t.type - - - def getToken(self, t): - """ - What is the Token associated with this node? If - you are not using CommonTree, then you must - override this in your own adaptor. - """ - - if isinstance(t, CommonTree): - return t.getToken() - - return None # no idea what to do - - - def getChild(self, t, i): - if t is None: - return None - return t.getChild(i) - - - def getChildCount(self, t): - if t is None: - return 0 - return t.getChildCount() - - - def getParent(self, t): - return t.getParent() - - - def setParent(self, t, parent): - t.setParent(parent) - - - def getChildIndex(self, t): - if t is None: - return 0 - return t.getChildIndex() - - - def setChildIndex(self, t, index): - t.setChildIndex(index) - - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - if parent is not None: - parent.replaceChildren(startChildIndex, stopChildIndex, t) - - -############################################################################ -# -# streams -# -# TreeNodeStream -# \- BaseTree -# \- CommonTree -# -# TreeAdaptor -# \- BaseTreeAdaptor -# \- CommonTreeAdaptor -# -############################################################################ - - - -class TreeNodeStream(IntStream): - """@brief A stream of tree nodes - - It accessing nodes from a tree of some kind. - """ - - # TreeNodeStream is abstract, no need to complain about not implemented - # abstract methods - # pylint: disable-msg=W0223 - - def get(self, i): - """Get a tree node at an absolute index i; 0..n-1. - If you don't want to buffer up nodes, then this method makes no - sense for you. - """ - - raise NotImplementedError - - - def LT(self, k): - """ - Get tree node at current input pointer + i ahead where i=1 is next node. - i<0 indicates nodes in the past. So LT(-1) is previous node, but - implementations are not required to provide results for k < -1. - LT(0) is undefined. For i>=n, return null. - Return null for LT(0) and any index that results in an absolute address - that is negative. - - This is analogous to the LT() method of the TokenStream, but this - returns a tree node instead of a token. Makes code gen identical - for both parser and tree grammars. :) - """ - - raise NotImplementedError - - - def getTreeSource(self): - """ - Where is this stream pulling nodes from? This is not the name, but - the object that provides node objects. - """ - - raise NotImplementedError - - - def getTokenStream(self): - """ - If the tree associated with this stream was created from a TokenStream, - you can specify it here. Used to do rule $text attribute in tree - parser. Optional unless you use tree parser rule text attribute - or output=template and rewrite=true options. - """ - - raise NotImplementedError - - - def getTreeAdaptor(self): - """ - What adaptor can tell me how to interpret/navigate nodes and - trees. E.g., get text of a node. - """ - - raise NotImplementedError - - - def setUniqueNavigationNodes(self, uniqueNavigationNodes): - """ - As we flatten the tree, we use UP, DOWN nodes to represent - the tree structure. When debugging we need unique nodes - so we have to instantiate new ones. When doing normal tree - parsing, it's slow and a waste of memory to create unique - navigation nodes. Default should be false; - """ - - raise NotImplementedError - - - def reset(self): - """ - Reset the tree node stream in such a way that it acts like - a freshly constructed stream. - """ - - raise NotImplementedError - - - def toString(self, start, stop): - """ - Return the text of all nodes from start to stop, inclusive. - If the stream does not buffer all the nodes then it can still - walk recursively from start until stop. You can always return - null or "" too, but users should not access $ruleLabel.text in - an action of course in that case. - """ - - raise NotImplementedError - - - # REWRITING TREES (used by tree parser) - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - """ - Replace from start to stop child index of parent with t, which might - be a list. Number of children may be different - after this call. The stream is notified because it is walking the - tree and might need to know you are monkeying with the underlying - tree. Also, it might be able to modify the node stream to avoid - restreaming for future phases. - - If parent is null, don't do anything; must be at root of overall tree. - Can't replace whatever points to the parent externally. Do nothing. - """ - - raise NotImplementedError - - -class CommonTreeNodeStream(TreeNodeStream): - """@brief A buffered stream of tree nodes. - - Nodes can be from a tree of ANY kind. - - This node stream sucks all nodes out of the tree specified in - the constructor during construction and makes pointers into - the tree using an array of Object pointers. The stream necessarily - includes pointers to DOWN and UP and EOF nodes. - - This stream knows how to mark/release for backtracking. - - This stream is most suitable for tree interpreters that need to - jump around a lot or for tree parsers requiring speed (at cost of memory). - There is some duplicated functionality here with UnBufferedTreeNodeStream - but just in bookkeeping, not tree walking etc... - - @see UnBufferedTreeNodeStream - """ - - def __init__(self, *args): - TreeNodeStream.__init__(self) - - if len(args) == 1: - adaptor = CommonTreeAdaptor() - tree = args[0] - - nodes = None - down = None - up = None - eof = None - - elif len(args) == 2: - adaptor = args[0] - tree = args[1] - - nodes = None - down = None - up = None - eof = None - - elif len(args) == 3: - parent = args[0] - start = args[1] - stop = args[2] - - adaptor = parent.adaptor - tree = parent.root - - nodes = parent.nodes[start:stop] - down = parent.down - up = parent.up - eof = parent.eof - - else: - raise TypeError("Invalid arguments") - - # all these navigation nodes are shared and hence they - # cannot contain any line/column info - if down is not None: - self.down = down - else: - self.down = adaptor.createFromType(DOWN, "DOWN") - - if up is not None: - self.up = up - else: - self.up = adaptor.createFromType(UP, "UP") - - if eof is not None: - self.eof = eof - else: - self.eof = adaptor.createFromType(EOF, "EOF") - - # The complete mapping from stream index to tree node. - # This buffer includes pointers to DOWN, UP, and EOF nodes. - # It is built upon ctor invocation. The elements are type - # Object as we don't what the trees look like. - - # Load upon first need of the buffer so we can set token types - # of interest for reverseIndexing. Slows us down a wee bit to - # do all of the if p==-1 testing everywhere though. - if nodes is not None: - self.nodes = nodes - else: - self.nodes = [] - - # Pull nodes from which tree? - self.root = tree - - # IF this tree (root) was created from a token stream, track it. - self.tokens = None - - # What tree adaptor was used to build these trees - self.adaptor = adaptor - - # Reuse same DOWN, UP navigation nodes unless this is true - self.uniqueNavigationNodes = False - - # The index into the nodes list of the current node (next node - # to consume). If -1, nodes array not filled yet. - self.p = -1 - - # Track the last mark() call result value for use in rewind(). - self.lastMarker = None - - # Stack of indexes used for push/pop calls - self.calls = [] - - - def fillBuffer(self): - """Walk tree with depth-first-search and fill nodes buffer. - Don't do DOWN, UP nodes if its a list (t is isNil). - """ - - self._fillBuffer(self.root) - self.p = 0 # buffer of nodes intialized now - - - def _fillBuffer(self, t): - nil = self.adaptor.isNil(t) - - if not nil: - self.nodes.append(t) # add this node - - # add DOWN node if t has children - n = self.adaptor.getChildCount(t) - if not nil and n > 0: - self.addNavigationNode(DOWN) - - # and now add all its children - for c in range(n): - self._fillBuffer(self.adaptor.getChild(t, c)) - - # add UP node if t has children - if not nil and n > 0: - self.addNavigationNode(UP) - - - def getNodeIndex(self, node): - """What is the stream index for node? 0..n-1 - Return -1 if node not found. - """ - - if self.p == -1: - self.fillBuffer() - - for i, t in enumerate(self.nodes): - if t == node: - return i - - return -1 - - - def addNavigationNode(self, ttype): - """ - As we flatten the tree, we use UP, DOWN nodes to represent - the tree structure. When debugging we need unique nodes - so instantiate new ones when uniqueNavigationNodes is true. - """ - - navNode = None - - if ttype == DOWN: - if self.hasUniqueNavigationNodes(): - navNode = self.adaptor.createFromType(DOWN, "DOWN") - - else: - navNode = self.down - - else: - if self.hasUniqueNavigationNodes(): - navNode = self.adaptor.createFromType(UP, "UP") - - else: - navNode = self.up - - self.nodes.append(navNode) - - - def get(self, i): - if self.p == -1: - self.fillBuffer() - - return self.nodes[i] - - - def LT(self, k): - if self.p == -1: - self.fillBuffer() - - if k == 0: - return None - - if k < 0: - return self.LB(-k) - - if self.p + k - 1 >= len(self.nodes): - return self.eof - - return self.nodes[self.p + k - 1] - - - def getCurrentSymbol(self): - return self.LT(1) - - - def LB(self, k): - """Look backwards k nodes""" - - if k == 0: - return None - - if self.p - k < 0: - return None - - return self.nodes[self.p - k] - - - def isEOF(self, obj): - return self.adaptor.getType(obj) == EOF - - - def getTreeSource(self): - return self.root - - - def getSourceName(self): - return self.getTokenStream().getSourceName() - - - def getTokenStream(self): - return self.tokens - - - def setTokenStream(self, tokens): - self.tokens = tokens - - - def getTreeAdaptor(self): - return self.adaptor - - - def hasUniqueNavigationNodes(self): - return self.uniqueNavigationNodes - - - def setUniqueNavigationNodes(self, uniqueNavigationNodes): - self.uniqueNavigationNodes = uniqueNavigationNodes - - - def consume(self): - if self.p == -1: - self.fillBuffer() - - self.p += 1 - - - def LA(self, i): - return self.adaptor.getType(self.LT(i)) - - - def mark(self): - if self.p == -1: - self.fillBuffer() - - - self.lastMarker = self.index() - return self.lastMarker - - - def release(self, marker=None): - # no resources to release - - pass - - - def index(self): - return self.p - - - def rewind(self, marker=None): - if marker is None: - marker = self.lastMarker - - self.seek(marker) - - - def seek(self, index): - if self.p == -1: - self.fillBuffer() - - self.p = index - - - def push(self, index): - """ - Make stream jump to a new location, saving old location. - Switch back with pop(). - """ - - self.calls.append(self.p) # save current index - self.seek(index) - - - def pop(self): - """ - Seek back to previous index saved during last push() call. - Return top of stack (return index). - """ - - ret = self.calls.pop(-1) - self.seek(ret) - return ret - - - def reset(self): - self.p = 0 - self.lastMarker = 0 - self.calls = [] - - - def size(self): - if self.p == -1: - self.fillBuffer() - - return len(self.nodes) - - - # TREE REWRITE INTERFACE - - def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): - if parent is not None: - self.adaptor.replaceChildren( - parent, startChildIndex, stopChildIndex, t - ) - - - def __str__(self): - """Used for testing, just return the token type stream""" - - if self.p == -1: - self.fillBuffer() - - return ' '.join([str(self.adaptor.getType(node)) - for node in self.nodes - ]) - - - def toString(self, start, stop): - if start is None or stop is None: - return None - - if self.p == -1: - self.fillBuffer() - - #System.out.println("stop: "+stop); - #if ( start instanceof CommonTree ) - # System.out.print("toString: "+((CommonTree)start).getToken()+", "); - #else - # System.out.println(start); - #if ( stop instanceof CommonTree ) - # System.out.println(((CommonTree)stop).getToken()); - #else - # System.out.println(stop); - - # if we have the token stream, use that to dump text in order - if self.tokens is not None: - beginTokenIndex = self.adaptor.getTokenStartIndex(start) - endTokenIndex = self.adaptor.getTokenStopIndex(stop) - - # if it's a tree, use start/stop index from start node - # else use token range from start/stop nodes - if self.adaptor.getType(stop) == UP: - endTokenIndex = self.adaptor.getTokenStopIndex(start) - - elif self.adaptor.getType(stop) == EOF: - endTokenIndex = self.size() -2 # don't use EOF - - return self.tokens.toString(beginTokenIndex, endTokenIndex) - - # walk nodes looking for start - i, t = 0, None - for i, t in enumerate(self.nodes): - if t == start: - break - - # now walk until we see stop, filling string buffer with text - buf = [] - t = self.nodes[i] - while t != stop: - text = self.adaptor.getText(t) - if text is None: - text = " " + self.adaptor.getType(t) - - buf.append(text) - i += 1 - t = self.nodes[i] - - # include stop node too - text = self.adaptor.getText(stop) - if text is None: - text = " " +self.adaptor.getType(stop) - - buf.append(text) - - return ''.join(buf) - - - ## iterator interface - def __iter__(self): - if self.p == -1: - self.fillBuffer() - - for node in self.nodes: - yield node - - -############################################################################# -# -# tree parser -# -############################################################################# - -class TreeParser(BaseRecognizer): - """@brief Baseclass for generated tree parsers. - - A parser for a stream of tree nodes. "tree grammars" result in a subclass - of this. All the error reporting and recovery is shared with Parser via - the BaseRecognizer superclass. - """ - - def __init__(self, input, state=None): - BaseRecognizer.__init__(self, state) - - self.input = None - self.setTreeNodeStream(input) - - - def reset(self): - BaseRecognizer.reset(self) # reset all recognizer state variables - if self.input is not None: - self.input.seek(0) # rewind the input - - - def setTreeNodeStream(self, input): - """Set the input stream""" - - self.input = input - - - def getTreeNodeStream(self): - return self.input - - - def getSourceName(self): - return self.input.getSourceName() - - - def getCurrentInputSymbol(self, input): - return input.LT(1) - - - def getMissingSymbol(self, input, e, expectedTokenType, follow): - tokenText = "" - adaptor = input.adaptor - return adaptor.createToken( - CommonToken(type=expectedTokenType, text=tokenText)) - - - # precompiled regex used by inContext - dotdot = ".*[^.]\\.\\.[^.].*" - doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*" - dotdotPattern = re.compile(dotdot) - doubleEtcPattern = re.compile(doubleEtc) - - def inContext(self, context, adaptor=None, tokenName=None, t=None): - """Check if current node in input has a context. - - Context means sequence of nodes towards root of tree. For example, - you might say context is "MULT" which means my parent must be MULT. - "CLASS VARDEF" says current node must be child of a VARDEF and whose - parent is a CLASS node. You can use "..." to mean zero-or-more nodes. - "METHOD ... VARDEF" means my parent is VARDEF and somewhere above - that is a METHOD node. The first node in the context is not - necessarily the root. The context matcher stops matching and returns - true when it runs out of context. There is no way to force the first - node to be the root. - """ - - return self._inContext( - self.input.getTreeAdaptor(), self.tokenNames, - self.input.LT(1), context) - - @classmethod - def _inContext(cls, adaptor, tokenNames, t, context): - """The worker for inContext. - - It's static and full of parameters for testing purposes. - """ - - if cls.dotdotPattern.match(context): - # don't allow "..", must be "..." - raise ValueError("invalid syntax: ..") - - if cls.doubleEtcPattern.match(context): - # don't allow double "..." - raise ValueError("invalid syntax: ... ...") - - # ensure spaces around ... - context = context.replace("...", " ... ") - context = context.strip() - nodes = context.split() - - ni = len(nodes) - 1 - t = adaptor.getParent(t) - while ni >= 0 and t is not None: - if nodes[ni] == "...": - # walk upwards until we see nodes[ni-1] then continue walking - if ni == 0: - # ... at start is no-op - return True - goal = nodes[ni-1] - ancestor = cls._getAncestor(adaptor, tokenNames, t, goal) - if ancestor is None: - return False - t = ancestor - ni -= 1 - - name = tokenNames[adaptor.getType(t)] - if name != nodes[ni]: - return False - - # advance to parent and to previous element in context node list - ni -= 1 - t = adaptor.getParent(t) - - # at root but more nodes to match - if t is None and ni >= 0: - return False - - return True - - @staticmethod - def _getAncestor(adaptor, tokenNames, t, goal): - """Helper for static inContext.""" - while t is not None: - name = tokenNames[adaptor.getType(t)] - if name == goal: - return t - t = adaptor.getParent(t) - - return None - - - def matchAny(self): - """ - Match '.' in tree parser has special meaning. Skip node or - entire tree if node has children. If children, scan until - corresponding UP node. - """ - - self._state.errorRecovery = False - - look = self.input.LT(1) - if self.input.getTreeAdaptor().getChildCount(look) == 0: - self.input.consume() # not subtree, consume 1 node and return - return - - # current node is a subtree, skip to corresponding UP. - # must count nesting level to get right UP - level = 0 - tokenType = self.input.getTreeAdaptor().getType(look) - while tokenType != EOF and not (tokenType == UP and level==0): - self.input.consume() - look = self.input.LT(1) - tokenType = self.input.getTreeAdaptor().getType(look) - if tokenType == DOWN: - level += 1 - - elif tokenType == UP: - level -= 1 - - self.input.consume() # consume UP - - - def mismatch(self, input, ttype, follow): - """ - We have DOWN/UP nodes in the stream that have no line info; override. - plus we want to alter the exception type. Don't try to recover - from tree parser errors inline... - """ - - raise MismatchedTreeNodeException(ttype, input) - - - def getErrorHeader(self, e): - """ - Prefix error message with the grammar name because message is - always intended for the programmer because the parser built - the input tree not the user. - """ - - return (self.getGrammarFileName() + - ": node from {}line {}:{}".format( - "after " if e.approximateLineInfo else '', - e.line, - e.charPositionInLine)) - - def getErrorMessage(self, e): - """ - Tree parsers parse nodes they usually have a token object as - payload. Set the exception token and do the default behavior. - """ - - if isinstance(self, TreeParser): - adaptor = e.input.getTreeAdaptor() - e.token = adaptor.getToken(e.node) - if e.token is not None: # could be an UP/DOWN node - e.token = CommonToken( - type=adaptor.getType(e.node), - text=adaptor.getText(e.node) - ) - - return BaseRecognizer.getErrorMessage(self, e) - - - def traceIn(self, ruleName, ruleIndex): - BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) - - - def traceOut(self, ruleName, ruleIndex): - BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) - - -############################################################################# -# -# tree visitor -# -############################################################################# - -class TreeVisitor(object): - """Do a depth first walk of a tree, applying pre() and post() actions - we go. - """ - - def __init__(self, adaptor=None): - if adaptor is not None: - self.adaptor = adaptor - else: - self.adaptor = CommonTreeAdaptor() - - def visit(self, t, pre_action=None, post_action=None): - """Visit every node in tree t and trigger an action for each node - before/after having visited all of its children. Bottom up walk. - Execute both actions even if t has no children. Ignore return - results from transforming children since they will have altered - the child list of this node (their parent). Return result of - applying post action to this node. - - The Python version differs from the Java version by taking two - callables 'pre_action' and 'post_action' instead of a class instance - that wraps those methods. Those callables must accept a TreeNode as - their single argument and return the (potentially transformed or - replaced) TreeNode. - """ - - isNil = self.adaptor.isNil(t) - if pre_action is not None and not isNil: - # if rewritten, walk children of new t - t = pre_action(t) - - idx = 0 - while idx < self.adaptor.getChildCount(t): - child = self.adaptor.getChild(t, idx) - self.visit(child, pre_action, post_action) - idx += 1 - - if post_action is not None and not isNil: - t = post_action(t) - - return t - -############################################################################# -# -# tree iterator -# -############################################################################# - -class TreeIterator(object): - """ - Return a node stream from a doubly-linked tree whose nodes - know what child index they are. - - Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure. - """ - - def __init__(self, tree, adaptor=None): - if adaptor is None: - adaptor = CommonTreeAdaptor() - - self.root = tree - self.adaptor = adaptor - - self.first_time = True - self.tree = tree - - # If we emit UP/DOWN nodes, we need to spit out multiple nodes per - # next() call. - self.nodes = [] - - # navigation nodes to return during walk and at end - self.down = adaptor.createFromType(DOWN, "DOWN") - self.up = adaptor.createFromType(UP, "UP") - self.eof = adaptor.createFromType(EOF, "EOF") - - - def reset(self): - self.first_time = True - self.tree = self.root - self.nodes = [] - - - def __iter__(self): - return self - - - def has_next(self): - if self.first_time: - return self.root is not None - - if len(self.nodes) > 0: - return True - - if self.tree is None: - return False - - if self.adaptor.getChildCount(self.tree) > 0: - return True - - # back at root? - return self.adaptor.getParent(self.tree) is not None - - - def __next__(self): - if not self.has_next(): - raise StopIteration - - if self.first_time: - # initial condition - self.first_time = False - if self.adaptor.getChildCount(self.tree) == 0: - # single node tree (special) - self.nodes.append(self.eof) - return self.tree - - return self.tree - - # if any queued up, use those first - if len(self.nodes) > 0: - return self.nodes.pop(0) - - # no nodes left? - if self.tree is None: - return self.eof - - # next node will be child 0 if any children - if self.adaptor.getChildCount(self.tree) > 0: - self.tree = self.adaptor.getChild(self.tree, 0) - # real node is next after DOWN - self.nodes.append(self.tree) - return self.down - - # if no children, look for next sibling of tree or ancestor - parent = self.adaptor.getParent(self.tree) - # while we're out of siblings, keep popping back up towards root - while (parent is not None - and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)): - # we're moving back up - self.nodes.append(self.up) - self.tree = parent - parent = self.adaptor.getParent(self.tree) - - # no nodes left? - if parent is None: - self.tree = None # back at root? nothing left then - self.nodes.append(self.eof) # add to queue, might have UP nodes in there - return self.nodes.pop(0) - - # must have found a node with an unvisited sibling - # move to it and return it - nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1 - self.tree = self.adaptor.getChild(parent, nextSiblingIndex) - self.nodes.append(self.tree) # add to queue, might have UP nodes in there - return self.nodes.pop(0) - - - -############################################################################# -# -# streams for rule rewriting -# -############################################################################# - -class RewriteRuleElementStream(object): - """@brief Internal helper class. - - A generic list of elements tracked in an alternative to be used in - a -> rewrite rule. We need to subclass to fill in the next() method, - which returns either an AST node wrapped around a token payload or - an existing subtree. - - Once you start next()ing, do not try to add more elements. It will - break the cursor tracking I believe. - - @see org.antlr.runtime.tree.RewriteRuleSubtreeStream - @see org.antlr.runtime.tree.RewriteRuleTokenStream - - TODO: add mechanism to detect/puke on modification after reading from - stream - """ - - def __init__(self, adaptor, elementDescription, elements=None): - # Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(), - # which bumps it to 1 meaning no more elements. - self.cursor = 0 - - # Track single elements w/o creating a list. Upon 2nd add, alloc list - self.singleElement = None - - # The list of tokens or subtrees we are tracking - self.elements = None - - # Once a node / subtree has been used in a stream, it must be dup'd - # from then on. Streams are reset after subrules so that the streams - # can be reused in future subrules. So, reset must set a dirty bit. - # If dirty, then next() always returns a dup. - self.dirty = False - - # The element or stream description; usually has name of the token or - # rule reference that this list tracks. Can include rulename too, but - # the exception would track that info. - self.elementDescription = elementDescription - - self.adaptor = adaptor - - if isinstance(elements, (list, tuple)): - # Create a stream, but feed off an existing list - self.singleElement = None - self.elements = elements - - else: - # Create a stream with one element - self.add(elements) - - - def reset(self): - """ - Reset the condition of this stream so that it appears we have - not consumed any of its elements. Elements themselves are untouched. - Once we reset the stream, any future use will need duplicates. Set - the dirty bit. - """ - - self.cursor = 0 - self.dirty = True - - - def add(self, el): - if el is None: - return - - if self.elements is not None: # if in list, just add - self.elements.append(el) - return - - if self.singleElement is None: # no elements yet, track w/o list - self.singleElement = el - return - - # adding 2nd element, move to list - self.elements = [] - self.elements.append(self.singleElement) - self.singleElement = None - self.elements.append(el) - - - def nextTree(self): - """ - Return the next element in the stream. If out of elements, throw - an exception unless size()==1. If size is 1, then return elements[0]. - - Return a duplicate node/subtree if stream is out of elements and - size==1. If we've already used the element, dup (dirty bit set). - """ - - if (self.dirty - or (self.cursor >= len(self) and len(self) == 1) - ): - # if out of elements and size is 1, dup - el = self._next() - return self.dup(el) - - # test size above then fetch - el = self._next() - return el - - - def _next(self): - """ - do the work of getting the next element, making sure that it's - a tree node or subtree. Deal with the optimization of single- - element list versus list of size > 1. Throw an exception - if the stream is empty or we're out of elements and size>1. - protected so you can override in a subclass if necessary. - """ - - if len(self) == 0: - raise RewriteEmptyStreamException(self.elementDescription) - - if self.cursor >= len(self): # out of elements? - if len(self) == 1: # if size is 1, it's ok; return and we'll dup - return self.toTree(self.singleElement) - - # out of elements and size was not 1, so we can't dup - raise RewriteCardinalityException(self.elementDescription) - - # we have elements - if self.singleElement is not None: - self.cursor += 1 # move cursor even for single element list - return self.toTree(self.singleElement) - - # must have more than one in list, pull from elements - o = self.toTree(self.elements[self.cursor]) - self.cursor += 1 - return o - - - def dup(self, el): - """ - When constructing trees, sometimes we need to dup a token or AST - subtree. Dup'ing a token means just creating another AST node - around it. For trees, you must call the adaptor.dupTree() unless - the element is for a tree root; then it must be a node dup. - """ - - raise NotImplementedError - - - def toTree(self, el): - """ - Ensure stream emits trees; tokens must be converted to AST nodes. - AST nodes can be passed through unmolested. - """ - - return el - - - def hasNext(self): - return ( (self.singleElement is not None and self.cursor < 1) - or (self.elements is not None - and self.cursor < len(self.elements) - ) - ) - - - def size(self): - if self.singleElement is not None: - return 1 - - if self.elements is not None: - return len(self.elements) - - return 0 - - __len__ = size - - - def getDescription(self): - """Deprecated. Directly access elementDescription attribute""" - - return self.elementDescription - - -class RewriteRuleTokenStream(RewriteRuleElementStream): - """@brief Internal helper class.""" - - def toTree(self, el): - # Don't convert to a tree unless they explicitly call nextTree. - # This way we can do hetero tree nodes in rewrite. - return el - - - def nextNode(self): - t = self._next() - return self.adaptor.createWithPayload(t) - - - def nextToken(self): - return self._next() - - - def dup(self, el): - raise TypeError("dup can't be called for a token stream.") - - -class RewriteRuleSubtreeStream(RewriteRuleElementStream): - """@brief Internal helper class.""" - - def nextNode(self): - """ - Treat next element as a single node even if it's a subtree. - This is used instead of next() when the result has to be a - tree root node. Also prevents us from duplicating recently-added - children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration - must dup the type node, but ID has been added. - - Referencing a rule result twice is ok; dup entire tree as - we can't be adding trees as root; e.g., expr expr. - - Hideous code duplication here with super.next(). Can't think of - a proper way to refactor. This needs to always call dup node - and super.next() doesn't know which to call: dup node or dup tree. - """ - - if (self.dirty - or (self.cursor >= len(self) and len(self) == 1) - ): - # if out of elements and size is 1, dup (at most a single node - # since this is for making root nodes). - el = self._next() - return self.adaptor.dupNode(el) - - # test size above then fetch - el = self._next() - while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1: - el = self.adaptor.getChild(el, 0) - - # dup just the root (want node here) - return self.adaptor.dupNode(el) - - - def dup(self, el): - return self.adaptor.dupTree(el) - - - -class RewriteRuleNodeStream(RewriteRuleElementStream): - """ - Queues up nodes matched on left side of -> in a tree parser. This is - the analog of RewriteRuleTokenStream for normal parsers. - """ - - def nextNode(self): - return self._next() - - - def toTree(self, el): - return self.adaptor.dupNode(el) - - - def dup(self, el): - # we dup every node, so don't have to worry about calling dup; short- - #circuited next() so it doesn't call. - raise TypeError("dup can't be called for a node stream.") - - -class TreeRuleReturnScope(RuleReturnScope): - """ - This is identical to the ParserRuleReturnScope except that - the start property is a tree nodes not Token object - when you are parsing trees. To be generic the tree node types - have to be Object. - """ - - def __init__(self): - super().__init__() - self.start = None - self.tree = None - - - def getStart(self): - return self.start - - - def getTree(self): - return self.tree diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/treewizard.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/treewizard.py deleted file mode 100644 index 0fefe0fe..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/treewizard.py +++ /dev/null @@ -1,619 +0,0 @@ -""" @package antlr3.tree -@brief ANTLR3 runtime package, treewizard module - -A utility module to create ASTs at runtime. -See for an overview. Note that the API of the Python implementation is slightly different. - -""" - -# begin[licence] -# -# [The "BSD licence"] -# Copyright (c) 2005-2012 Terence Parr -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# end[licence] - -from .constants import INVALID_TOKEN_TYPE -from .tokens import CommonToken -from .tree import CommonTree, CommonTreeAdaptor - - -def computeTokenTypes(tokenNames): - """ - Compute a dict that is an inverted index of - tokenNames (which maps int token types to names). - """ - - if tokenNames: - return dict((name, type) for type, name in enumerate(tokenNames)) - - return {} - - -## token types for pattern parser -EOF = -1 -BEGIN = 1 -END = 2 -ID = 3 -ARG = 4 -PERCENT = 5 -COLON = 6 -DOT = 7 - -class TreePatternLexer(object): - def __init__(self, pattern): - ## The tree pattern to lex like "(A B C)" - self.pattern = pattern - - ## Index into input string - self.p = -1 - - ## Current char - self.c = None - - ## How long is the pattern in char? - self.n = len(pattern) - - ## Set when token type is ID or ARG - self.sval = None - - self.error = False - - self.consume() - - - __idStartChar = frozenset( - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_' - ) - __idChar = __idStartChar | frozenset('0123456789') - - def nextToken(self): - self.sval = "" - while self.c != EOF: - if self.c in (' ', '\n', '\r', '\t'): - self.consume() - continue - - if self.c in self.__idStartChar: - self.sval += self.c - self.consume() - while self.c in self.__idChar: - self.sval += self.c - self.consume() - - return ID - - if self.c == '(': - self.consume() - return BEGIN - - if self.c == ')': - self.consume() - return END - - if self.c == '%': - self.consume() - return PERCENT - - if self.c == ':': - self.consume() - return COLON - - if self.c == '.': - self.consume() - return DOT - - if self.c == '[': # grab [x] as a string, returning x - self.consume() - while self.c != ']': - if self.c == '\\': - self.consume() - if self.c != ']': - self.sval += '\\' - - self.sval += self.c - - else: - self.sval += self.c - - self.consume() - - self.consume() - return ARG - - self.consume() - self.error = True - return EOF - - return EOF - - - def consume(self): - self.p += 1 - if self.p >= self.n: - self.c = EOF - - else: - self.c = self.pattern[self.p] - - -class TreePatternParser(object): - def __init__(self, tokenizer, wizard, adaptor): - self.tokenizer = tokenizer - self.wizard = wizard - self.adaptor = adaptor - self.ttype = tokenizer.nextToken() # kickstart - - - def pattern(self): - if self.ttype == BEGIN: - return self.parseTree() - - elif self.ttype == ID: - node = self.parseNode() - if self.ttype == EOF: - return node - - return None # extra junk on end - - return None - - - def parseTree(self): - if self.ttype != BEGIN: - return None - - self.ttype = self.tokenizer.nextToken() - root = self.parseNode() - if root is None: - return None - - while self.ttype in (BEGIN, ID, PERCENT, DOT): - if self.ttype == BEGIN: - subtree = self.parseTree() - self.adaptor.addChild(root, subtree) - - else: - child = self.parseNode() - if child is None: - return None - - self.adaptor.addChild(root, child) - - if self.ttype != END: - return None - - self.ttype = self.tokenizer.nextToken() - return root - - - def parseNode(self): - # "%label:" prefix - label = None - - if self.ttype == PERCENT: - self.ttype = self.tokenizer.nextToken() - if self.ttype != ID: - return None - - label = self.tokenizer.sval - self.ttype = self.tokenizer.nextToken() - if self.ttype != COLON: - return None - - self.ttype = self.tokenizer.nextToken() # move to ID following colon - - # Wildcard? - if self.ttype == DOT: - self.ttype = self.tokenizer.nextToken() - wildcardPayload = CommonToken(0, ".") - node = WildcardTreePattern(wildcardPayload) - if label is not None: - node.label = label - return node - - # "ID" or "ID[arg]" - if self.ttype != ID: - return None - - tokenName = self.tokenizer.sval - self.ttype = self.tokenizer.nextToken() - - if tokenName == "nil": - return self.adaptor.nil() - - text = tokenName - # check for arg - arg = None - if self.ttype == ARG: - arg = self.tokenizer.sval - text = arg - self.ttype = self.tokenizer.nextToken() - - # create node - treeNodeType = self.wizard.getTokenType(tokenName) - if treeNodeType == INVALID_TOKEN_TYPE: - return None - - node = self.adaptor.createFromType(treeNodeType, text) - if label is not None and isinstance(node, TreePattern): - node.label = label - - if arg is not None and isinstance(node, TreePattern): - node.hasTextArg = True - - return node - - -class TreePattern(CommonTree): - """ - When using %label:TOKENNAME in a tree for parse(), we must - track the label. - """ - - def __init__(self, payload): - super().__init__(payload) - - self.label = None - self.hasTextArg = None - - - def toString(self): - if self.label: - return '%' + self.label + ':' + super().toString() - - else: - return super().toString() - - -class WildcardTreePattern(TreePattern): - pass - - -class TreePatternTreeAdaptor(CommonTreeAdaptor): - """This adaptor creates TreePattern objects for use during scan()""" - - def createWithPayload(self, payload): - return TreePattern(payload) - - -class TreeWizard(object): - """ - Build and navigate trees with this object. Must know about the names - of tokens so you have to pass in a map or array of token names (from which - this class can build the map). I.e., Token DECL means nothing unless the - class can translate it to a token type. - - In order to create nodes and navigate, this class needs a TreeAdaptor. - - This class can build a token type -> node index for repeated use or for - iterating over the various nodes with a particular type. - - This class works in conjunction with the TreeAdaptor rather than moving - all this functionality into the adaptor. An adaptor helps build and - navigate trees using methods. This class helps you do it with string - patterns like "(A B C)". You can create a tree from that pattern or - match subtrees against it. - """ - - def __init__(self, adaptor=None, tokenNames=None, typeMap=None): - if adaptor is None: - self.adaptor = CommonTreeAdaptor() - - else: - self.adaptor = adaptor - - if typeMap is None: - self.tokenNameToTypeMap = computeTokenTypes(tokenNames) - - else: - if tokenNames: - raise ValueError("Can't have both tokenNames and typeMap") - - self.tokenNameToTypeMap = typeMap - - - def getTokenType(self, tokenName): - """Using the map of token names to token types, return the type.""" - - if tokenName in self.tokenNameToTypeMap: - return self.tokenNameToTypeMap[tokenName] - else: - return INVALID_TOKEN_TYPE - - - def create(self, pattern): - """ - Create a tree or node from the indicated tree pattern that closely - follows ANTLR tree grammar tree element syntax: - - (root child1 ... child2). - - You can also just pass in a node: ID - - Any node can have a text argument: ID[foo] - (notice there are no quotes around foo--it's clear it's a string). - - nil is a special name meaning "give me a nil node". Useful for - making lists: (nil A B C) is a list of A B C. - """ - - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, self.adaptor) - return parser.pattern() - - - def index(self, tree): - """Walk the entire tree and make a node name to nodes mapping. - - For now, use recursion but later nonrecursive version may be - more efficient. Returns a dict int -> list where the list is - of your AST node type. The int is the token type of the node. - """ - - m = {} - self._index(tree, m) - return m - - - def _index(self, t, m): - """Do the work for index""" - - if t is None: - return - - ttype = self.adaptor.getType(t) - elements = m.get(ttype) - if elements is None: - m[ttype] = elements = [] - - elements.append(t) - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self._index(child, m) - - - def find(self, tree, what): - """Return a list of matching token. - - what may either be an integer specifzing the token type to find or - a string with a pattern that must be matched. - - """ - - if isinstance(what, int): - return self._findTokenType(tree, what) - - elif isinstance(what, str): - return self._findPattern(tree, what) - - else: - raise TypeError("'what' must be string or integer") - - - def _findTokenType(self, t, ttype): - """Return a List of tree nodes with token type ttype""" - - nodes = [] - - def visitor(tree, parent, childIndex, labels): - nodes.append(tree) - - self.visit(t, ttype, visitor) - - return nodes - - - def _findPattern(self, t, pattern): - """Return a List of subtrees matching pattern.""" - - subtrees = [] - - # Create a TreePattern from the pattern - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - # don't allow invalid patterns - if (tpattern is None or tpattern.isNil() - or isinstance(tpattern, WildcardTreePattern)): - return None - - rootTokenType = tpattern.getType() - - def visitor(tree, parent, childIndex, label): - if self._parse(tree, tpattern, None): - subtrees.append(tree) - - self.visit(t, rootTokenType, visitor) - - return subtrees - - - def visit(self, tree, what, visitor): - """Visit every node in tree matching what, invoking the visitor. - - If what is a string, it is parsed as a pattern and only matching - subtrees will be visited. - The implementation uses the root node of the pattern in combination - with visit(t, ttype, visitor) so nil-rooted patterns are not allowed. - Patterns with wildcard roots are also not allowed. - - If what is an integer, it is used as a token type and visit will match - all nodes of that type (this is faster than the pattern match). - The labels arg of the visitor action method is never set (it's None) - since using a token type rather than a pattern doesn't let us set a - label. - """ - - if isinstance(what, int): - self._visitType(tree, None, 0, what, visitor) - - elif isinstance(what, str): - self._visitPattern(tree, what, visitor) - - else: - raise TypeError("'what' must be string or integer") - - - def _visitType(self, t, parent, childIndex, ttype, visitor): - """Do the recursive work for visit""" - - if t is None: - return - - if self.adaptor.getType(t) == ttype: - visitor(t, parent, childIndex, None) - - for i in range(self.adaptor.getChildCount(t)): - child = self.adaptor.getChild(t, i) - self._visitType(child, t, i, ttype, visitor) - - - def _visitPattern(self, tree, pattern, visitor): - """ - For all subtrees that match the pattern, execute the visit action. - """ - - # Create a TreePattern from the pattern - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - # don't allow invalid patterns - if (tpattern is None or tpattern.isNil() - or isinstance(tpattern, WildcardTreePattern)): - return - - rootTokenType = tpattern.getType() - - def rootvisitor(tree, parent, childIndex, labels): - labels = {} - if self._parse(tree, tpattern, labels): - visitor(tree, parent, childIndex, labels) - - self.visit(tree, rootTokenType, rootvisitor) - - - def parse(self, t, pattern, labels=None): - """ - Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels - on the various nodes and '.' (dot) as the node/subtree wildcard, - return true if the pattern matches and fill the labels Map with - the labels pointing at the appropriate nodes. Return false if - the pattern is malformed or the tree does not match. - - If a node specifies a text arg in pattern, then that must match - for that node in t. - """ - - tokenizer = TreePatternLexer(pattern) - parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) - tpattern = parser.pattern() - - return self._parse(t, tpattern, labels) - - - def _parse(self, t1, tpattern, labels): - """ - Do the work for parse. Check to see if the tpattern fits the - structure and token types in t1. Check text if the pattern has - text arguments on nodes. Fill labels map with pointers to nodes - in tree matched against nodes in pattern with labels. - """ - - # make sure both are non-null - if t1 is None or tpattern is None: - return False - - # check roots (wildcard matches anything) - if not isinstance(tpattern, WildcardTreePattern): - if self.adaptor.getType(t1) != tpattern.getType(): - return False - - # if pattern has text, check node text - if (tpattern.hasTextArg - and self.adaptor.getText(t1) != tpattern.getText()): - return False - - if tpattern.label is not None and labels is not None: - # map label in pattern to node in t1 - labels[tpattern.label] = t1 - - # check children - n1 = self.adaptor.getChildCount(t1) - n2 = tpattern.getChildCount() - if n1 != n2: - return False - - for i in range(n1): - child1 = self.adaptor.getChild(t1, i) - child2 = tpattern.getChild(i) - if not self._parse(child1, child2, labels): - return False - - return True - - - def equals(self, t1, t2, adaptor=None): - """ - Compare t1 and t2; return true if token types/text, structure match - exactly. - The trees are examined in their entirety so that (A B) does not match - (A B C) nor (A (B C)). - """ - - if adaptor is None: - adaptor = self.adaptor - - return self._equals(t1, t2, adaptor) - - - def _equals(self, t1, t2, adaptor): - # make sure both are non-null - if t1 is None or t2 is None: - return False - - # check roots - if adaptor.getType(t1) != adaptor.getType(t2): - return False - - if adaptor.getText(t1) != adaptor.getText(t2): - return False - - # check children - n1 = adaptor.getChildCount(t1) - n2 = adaptor.getChildCount(t2) - if n1 != n2: - return False - - for i in range(n1): - child1 = adaptor.getChild(t1, i) - child2 = adaptor.getChild(t2, i) - if not self._equals(child1, child2, adaptor): - return False - - return True diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/doxyfile b/thirdparty/antlr3-antlr-3.5/runtime/Python3/doxyfile deleted file mode 100644 index 9f159197..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/doxyfile +++ /dev/null @@ -1,270 +0,0 @@ -# -*- mode: doxymacs -*- - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- -DOXYFILE_ENCODING = UTF-8 -PROJECT_NAME = "ANTLR Python3 API" -PROJECT_NUMBER = 3.3 -OUTPUT_DIRECTORY = api -CREATE_SUBDIRS = NO -OUTPUT_LANGUAGE = English -BRIEF_MEMBER_DESC = YES -REPEAT_BRIEF = YES -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the -ALWAYS_DETAILED_SEC = YES -INLINE_INHERITED_MEMB = NO -FULL_PATH_NAMES = YES -STRIP_FROM_PATH = build/doc/ -STRIP_FROM_INC_PATH = -SHORT_NAMES = NO -JAVADOC_AUTOBRIEF = NO -MULTILINE_CPP_IS_BRIEF = NO -DETAILS_AT_TOP = NO -INHERIT_DOCS = YES -SEPARATE_MEMBER_PAGES = NO -TAB_SIZE = 8 -ALIASES = -OPTIMIZE_OUTPUT_FOR_C = NO -OPTIMIZE_OUTPUT_JAVA = YES -BUILTIN_STL_SUPPORT = NO -CPP_CLI_SUPPORT = NO -DISTRIBUTE_GROUP_DOC = NO -SUBGROUPING = YES -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- -EXTRACT_ALL = YES -EXTRACT_PRIVATE = YES -EXTRACT_STATIC = YES -EXTRACT_LOCAL_CLASSES = YES -EXTRACT_LOCAL_METHODS = NO -HIDE_UNDOC_MEMBERS = NO -HIDE_UNDOC_CLASSES = NO -HIDE_FRIEND_COMPOUNDS = NO -HIDE_IN_BODY_DOCS = NO -INTERNAL_DOCS = NO -CASE_SENSE_NAMES = NO -HIDE_SCOPE_NAMES = NO -SHOW_INCLUDE_FILES = YES -INLINE_INFO = YES -SORT_MEMBER_DOCS = YES -SORT_BRIEF_DOCS = NO -SORT_BY_SCOPE_NAME = NO -GENERATE_TODOLIST = YES -GENERATE_TESTLIST = NO -GENERATE_BUGLIST = NO -GENERATE_DEPRECATEDLIST= NO -ENABLED_SECTIONS = -MAX_INITIALIZER_LINES = 30 -SHOW_USED_FILES = YES -SHOW_DIRECTORIES = NO -FILE_VERSION_FILTER = -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- -QUIET = NO -WARNINGS = YES -WARN_IF_UNDOCUMENTED = YES -WARN_IF_DOC_ERROR = YES -WARN_NO_PARAMDOC = NO -WARN_FORMAT = "$file:$line: $text" -WARN_LOGFILE = -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- -INPUT = build/doc -INPUT_ENCODING = UTF-8 -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.d \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.idl \ - *.odl \ - *.cs \ - *.php \ - *.php3 \ - *.inc \ - *.m \ - *.mm \ - *.dox \ - *.py -RECURSIVE = YES -EXCLUDE = build/doc/antlr3/__init__.py -EXCLUDE_SYMLINKS = NO -EXCLUDE_PATTERNS = -EXCLUDE_SYMBOLS = dfa exceptions recognizers streams tokens constants -EXAMPLE_PATH = -EXAMPLE_PATTERNS = * -EXAMPLE_RECURSIVE = NO -IMAGE_PATH = -INPUT_FILTER = -FILTER_PATTERNS = -FILTER_SOURCE_FILES = NO -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- -SOURCE_BROWSER = YES -INLINE_SOURCES = NO -STRIP_CODE_COMMENTS = YES -REFERENCED_BY_RELATION = NO -REFERENCES_RELATION = NO -REFERENCES_LINK_SOURCE = YES -USE_HTAGS = NO -VERBATIM_HEADERS = YES -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- -ALPHABETICAL_INDEX = NO -COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- -GENERATE_HTML = YES -HTML_OUTPUT = . -HTML_FILE_EXTENSION = .html -HTML_HEADER = -HTML_FOOTER = -HTML_STYLESHEET = -HTML_ALIGN_MEMBERS = YES -GENERATE_HTMLHELP = NO -CHM_FILE = -HHC_LOCATION = -GENERATE_CHI = NO -BINARY_TOC = NO -TOC_EXPAND = NO -DISABLE_INDEX = NO -ENUM_VALUES_PER_LINE = 4 -GENERATE_TREEVIEW = NO -TREEVIEW_WIDTH = 250 -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- -GENERATE_LATEX = NO -LATEX_OUTPUT = latex -LATEX_CMD_NAME = latex -MAKEINDEX_CMD_NAME = makeindex -COMPACT_LATEX = NO -PAPER_TYPE = a4wide -EXTRA_PACKAGES = -LATEX_HEADER = -PDF_HYPERLINKS = NO -USE_PDFLATEX = YES -LATEX_BATCHMODE = NO -LATEX_HIDE_INDICES = NO -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- -GENERATE_RTF = NO -RTF_OUTPUT = rtf -COMPACT_RTF = NO -RTF_HYPERLINKS = NO -RTF_STYLESHEET_FILE = -RTF_EXTENSIONS_FILE = -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- -GENERATE_MAN = NO -MAN_OUTPUT = man -MAN_EXTENSION = .3 -MAN_LINKS = NO -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- -GENERATE_XML = NO -XML_OUTPUT = xml -XML_SCHEMA = -XML_DTD = -XML_PROGRAMLISTING = YES -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- -GENERATE_AUTOGEN_DEF = NO -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- -GENERATE_PERLMOD = NO -PERLMOD_LATEX = NO -PERLMOD_PRETTY = YES -PERLMOD_MAKEVAR_PREFIX = -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- -ENABLE_PREPROCESSING = YES -MACRO_EXPANSION = YES -EXPAND_ONLY_PREDEF = NO -SEARCH_INCLUDES = YES -INCLUDE_PATH = -INCLUDE_FILE_PATTERNS = -PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS -EXPAND_AS_DEFINED = -SKIP_FUNCTION_MACROS = YES -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- -TAGFILES = -GENERATE_TAGFILE = -ALLEXTERNALS = NO -EXTERNAL_GROUPS = YES -PERL_PATH = /usr/bin/perl -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- -CLASS_DIAGRAMS = NO -MSCGEN_PATH = -HIDE_UNDOC_RELATIONS = YES -HAVE_DOT = YES -CLASS_GRAPH = YES -COLLABORATION_GRAPH = YES -GROUP_GRAPHS = YES -UML_LOOK = NO -TEMPLATE_RELATIONS = NO -INCLUDE_GRAPH = YES -INCLUDED_BY_GRAPH = YES -CALL_GRAPH = NO -CALLER_GRAPH = NO -GRAPHICAL_HIERARCHY = YES -DIRECTORY_GRAPH = YES -DOT_IMAGE_FORMAT = png -DOT_PATH = -DOTFILE_DIRS = -DOT_GRAPH_MAX_NODES = 50 -DOT_TRANSPARENT = NO -DOT_MULTI_TARGETS = NO -GENERATE_LEGEND = YES -DOT_CLEANUP = YES -#--------------------------------------------------------------------------- -# Configuration::additions related to the search engine -#--------------------------------------------------------------------------- -SEARCHENGINE = NO - - -#--------------------------------------------------------------------------- -# doxypy integration -#--------------------------------------------------------------------------- -FILTER_SOURCE_FILES = YES -INPUT_FILTER = "python doxypy.py" diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/ez_setup.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/ez_setup.py deleted file mode 100644 index 3ea2e667..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/ez_setup.py +++ /dev/null @@ -1,485 +0,0 @@ -#!python -"""Bootstrap distribute installation - -If you want to use setuptools in your package's setup.py, just include this -file in the same directory with it, and add this to the top of your setup.py:: - - from distribute_setup import use_setuptools - use_setuptools() - -If you want to require a specific version of setuptools, set a download -mirror, or use an alternate download directory, you can do so by supplying -the appropriate options to ``use_setuptools()``. - -This file can also be run as a script to install or upgrade setuptools. -""" -import os -import sys -import time -import fnmatch -import tempfile -import tarfile -from distutils import log - -try: - from site import USER_SITE -except ImportError: - USER_SITE = None - -try: - import subprocess - - def _python_cmd(*args): - args = (sys.executable,) + args - return subprocess.call(args) == 0 - -except ImportError: - # will be used for python 2.3 - def _python_cmd(*args): - args = (sys.executable,) + args - # quoting arguments if windows - if sys.platform == 'win32': - def quote(arg): - if ' ' in arg: - return '"%s"' % arg - return arg - args = [quote(arg) for arg in args] - return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 - -DEFAULT_VERSION = "0.6.14" -DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" -SETUPTOOLS_FAKED_VERSION = "0.6c11" - -SETUPTOOLS_PKG_INFO = """\ -Metadata-Version: 1.0 -Name: setuptools -Version: %s -Summary: xxxx -Home-page: xxx -Author: xxx -Author-email: xxx -License: xxx -Description: xxx -""" % SETUPTOOLS_FAKED_VERSION - - -def _install(tarball): - # extracting the tarball - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - tar = tarfile.open(tarball) - _extractall(tar) - tar.close() - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - - # installing - log.warn('Installing Distribute') - if not _python_cmd('setup.py', 'install'): - log.warn('Something went wrong during the installation.') - log.warn('See the error message above.') - finally: - os.chdir(old_wd) - - -def _build_egg(egg, tarball, to_dir): - # extracting the tarball - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - tar = tarfile.open(tarball) - _extractall(tar) - tar.close() - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - - # building an egg - log.warn('Building a Distribute egg in %s', to_dir) - _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) - - finally: - os.chdir(old_wd) - # returning the result - log.warn(egg) - if not os.path.exists(egg): - raise IOError('Could not build the egg.') - - -def _do_download(version, download_base, to_dir, download_delay): - egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' - % (version, sys.version_info[0], sys.version_info[1])) - if not os.path.exists(egg): - tarball = download_setuptools(version, download_base, - to_dir, download_delay) - _build_egg(egg, tarball, to_dir) - sys.path.insert(0, egg) - import setuptools - setuptools.bootstrap_install_from = egg - - -def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=os.curdir, download_delay=15, no_fake=True): - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - was_imported = 'pkg_resources' in sys.modules or \ - 'setuptools' in sys.modules - try: - try: - import pkg_resources - if not hasattr(pkg_resources, '_distribute'): - if not no_fake: - _fake_setuptools() - raise ImportError - except ImportError: - return _do_download(version, download_base, to_dir, download_delay) - try: - pkg_resources.require("distribute>="+version) - return - except pkg_resources.VersionConflict: - e = sys.exc_info()[1] - if was_imported: - sys.stderr.write( - "The required version of distribute (>=%s) is not available,\n" - "and can't be installed while this script is running. Please\n" - "install a more recent version first, using\n" - "'easy_install -U distribute'." - "\n\n(Currently using %r)\n" % (version, e.args[0])) - sys.exit(2) - else: - del pkg_resources, sys.modules['pkg_resources'] # reload ok - return _do_download(version, download_base, to_dir, - download_delay) - except pkg_resources.DistributionNotFound: - return _do_download(version, download_base, to_dir, - download_delay) - finally: - if not no_fake: - _create_fake_setuptools_pkg_info(to_dir) - -def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=os.curdir, delay=15): - """Download distribute from a specified location and return its filename - - `version` should be a valid distribute version number that is available - as an egg for download under the `download_base` URL (which should end - with a '/'). `to_dir` is the directory where the egg will be downloaded. - `delay` is the number of seconds to pause before an actual download - attempt. - """ - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - try: - from urllib.request import urlopen - except ImportError: - from urllib2 import urlopen - tgz_name = "distribute-%s.tar.gz" % version - url = download_base + tgz_name - saveto = os.path.join(to_dir, tgz_name) - src = dst = None - if not os.path.exists(saveto): # Avoid repeated downloads - try: - log.warn("Downloading %s", url) - src = urlopen(url) - # Read/write all in one block, so we don't create a corrupt file - # if the download is interrupted. - data = src.read() - dst = open(saveto, "wb") - dst.write(data) - finally: - if src: - src.close() - if dst: - dst.close() - return os.path.realpath(saveto) - -def _no_sandbox(function): - def __no_sandbox(*args, **kw): - try: - from setuptools.sandbox import DirectorySandbox - if not hasattr(DirectorySandbox, '_old'): - def violation(*args): - pass - DirectorySandbox._old = DirectorySandbox._violation - DirectorySandbox._violation = violation - patched = True - else: - patched = False - except ImportError: - patched = False - - try: - return function(*args, **kw) - finally: - if patched: - DirectorySandbox._violation = DirectorySandbox._old - del DirectorySandbox._old - - return __no_sandbox - -def _patch_file(path, content): - """Will backup the file then patch it""" - existing_content = open(path).read() - if existing_content == content: - # already patched - log.warn('Already patched.') - return False - log.warn('Patching...') - _rename_path(path) - f = open(path, 'w') - try: - f.write(content) - finally: - f.close() - return True - -_patch_file = _no_sandbox(_patch_file) - -def _same_content(path, content): - return open(path).read() == content - -def _rename_path(path): - new_name = path + '.OLD.%s' % time.time() - log.warn('Renaming %s into %s', path, new_name) - os.rename(path, new_name) - return new_name - -def _remove_flat_installation(placeholder): - if not os.path.isdir(placeholder): - log.warn('Unkown installation at %s', placeholder) - return False - found = False - for file in os.listdir(placeholder): - if fnmatch.fnmatch(file, 'setuptools*.egg-info'): - found = True - break - if not found: - log.warn('Could not locate setuptools*.egg-info') - return - - log.warn('Removing elements out of the way...') - pkg_info = os.path.join(placeholder, file) - if os.path.isdir(pkg_info): - patched = _patch_egg_dir(pkg_info) - else: - patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) - - if not patched: - log.warn('%s already patched.', pkg_info) - return False - # now let's move the files out of the way - for element in ('setuptools', 'pkg_resources.py', 'site.py'): - element = os.path.join(placeholder, element) - if os.path.exists(element): - _rename_path(element) - else: - log.warn('Could not find the %s element of the ' - 'Setuptools distribution', element) - return True - -_remove_flat_installation = _no_sandbox(_remove_flat_installation) - -def _after_install(dist): - log.warn('After install bootstrap.') - placeholder = dist.get_command_obj('install').install_purelib - _create_fake_setuptools_pkg_info(placeholder) - -def _create_fake_setuptools_pkg_info(placeholder): - if not placeholder or not os.path.exists(placeholder): - log.warn('Could not find the install location') - return - pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) - setuptools_file = 'setuptools-%s-py%s.egg-info' % \ - (SETUPTOOLS_FAKED_VERSION, pyver) - pkg_info = os.path.join(placeholder, setuptools_file) - if os.path.exists(pkg_info): - log.warn('%s already exists', pkg_info) - return - - log.warn('Creating %s', pkg_info) - f = open(pkg_info, 'w') - try: - f.write(SETUPTOOLS_PKG_INFO) - finally: - f.close() - - pth_file = os.path.join(placeholder, 'setuptools.pth') - log.warn('Creating %s', pth_file) - f = open(pth_file, 'w') - try: - f.write(os.path.join(os.curdir, setuptools_file)) - finally: - f.close() - -_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info) - -def _patch_egg_dir(path): - # let's check if it's already patched - pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') - if os.path.exists(pkg_info): - if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): - log.warn('%s already patched.', pkg_info) - return False - _rename_path(path) - os.mkdir(path) - os.mkdir(os.path.join(path, 'EGG-INFO')) - pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') - f = open(pkg_info, 'w') - try: - f.write(SETUPTOOLS_PKG_INFO) - finally: - f.close() - return True - -_patch_egg_dir = _no_sandbox(_patch_egg_dir) - -def _before_install(): - log.warn('Before install bootstrap.') - _fake_setuptools() - - -def _under_prefix(location): - if 'install' not in sys.argv: - return True - args = sys.argv[sys.argv.index('install')+1:] - for index, arg in enumerate(args): - for option in ('--root', '--prefix'): - if arg.startswith('%s=' % option): - top_dir = arg.split('root=')[-1] - return location.startswith(top_dir) - elif arg == option: - if len(args) > index: - top_dir = args[index+1] - return location.startswith(top_dir) - if arg == '--user' and USER_SITE is not None: - return location.startswith(USER_SITE) - return True - - -def _fake_setuptools(): - log.warn('Scanning installed packages') - try: - import pkg_resources - except ImportError: - # we're cool - log.warn('Setuptools or Distribute does not seem to be installed.') - return - ws = pkg_resources.working_set - try: - setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools', - replacement=False)) - except TypeError: - # old distribute API - setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools')) - - if setuptools_dist is None: - log.warn('No setuptools distribution found') - return - # detecting if it was already faked - setuptools_location = setuptools_dist.location - log.warn('Setuptools installation detected at %s', setuptools_location) - - # if --root or --preix was provided, and if - # setuptools is not located in them, we don't patch it - if not _under_prefix(setuptools_location): - log.warn('Not patching, --root or --prefix is installing Distribute' - ' in another location') - return - - # let's see if its an egg - if not setuptools_location.endswith('.egg'): - log.warn('Non-egg installation') - res = _remove_flat_installation(setuptools_location) - if not res: - return - else: - log.warn('Egg installation') - pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') - if (os.path.exists(pkg_info) and - _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): - log.warn('Already patched.') - return - log.warn('Patching...') - # let's create a fake egg replacing setuptools one - res = _patch_egg_dir(setuptools_location) - if not res: - return - log.warn('Patched done.') - _relaunch() - - -def _relaunch(): - log.warn('Relaunching...') - # we have to relaunch the process - # pip marker to avoid a relaunch bug - if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']: - sys.argv[0] = 'setup.py' - args = [sys.executable] + sys.argv - sys.exit(subprocess.call(args)) - - -def _extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - import copy - import operator - from tarfile import ExtractError - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 448 # decimal for oct 0700 - self.extract(tarinfo, path) - - # Reverse sort directories. - if sys.version_info < (2, 4): - def sorter(dir1, dir2): - return cmp(dir1.name, dir2.name) - directories.sort(sorter) - directories.reverse() - else: - directories.sort(key=operator.attrgetter('name'), reverse=True) - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError: - e = sys.exc_info()[1] - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - -def main(argv, version=DEFAULT_VERSION): - """Install or upgrade setuptools and EasyInstall""" - tarball = download_setuptools() - _install(tarball) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/mkdoxy.sh b/thirdparty/antlr3-antlr-3.5/runtime/Python3/mkdoxy.sh deleted file mode 100755 index 36fffff3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/mkdoxy.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -if [ -e doxygen.sh ]; then - . doxygen.sh -fi - -rm -fr build/doc -mkdir -p build/doc/antlr3 - -for f in __init__ exceptions constants dfa tokens streams recognizers; do - sed -e '/begin\[licence\]/,/end\[licence\]/d' antlr3/$f.py \ - >>build/doc/antlr3.py -done - -touch build/doc/antlr3/__init__.py - -cp -f antlr3/tree.py build/doc/antlr3 -cp -f antlr3/treewizard.py build/doc/antlr3 - -doxygen doxyfile diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/pylintrc b/thirdparty/antlr3-antlr-3.5/runtime/Python3/pylintrc deleted file mode 100644 index 1ded6268..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/pylintrc +++ /dev/null @@ -1,253 +0,0 @@ -# lint Python modules using external checkers - -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Profiled execution. -profile=no - -# Add to the black list. It should be a base name, not a -# path. You may set this option multiple times. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). -# W0622: Redefining built-in '...' -# C0103: Invalid name -# R0904: Too many public methods -# R0201: Method could be a function -# C0302: Too many lines in a module -# R0902: Too many instance attributes -# R0913: Too many arguments -# R0912: Too many branches -# R0903: Too few public methods -# C0111: Missing docstring -# W0403: Relative import -# W0401: Wildcard import -# W0142: */** magic -disable=W0622,C0103,R0904,R0201,C0302,R0902,R0913,R0912,R0903,C0111,W0403,W0401,W0142 - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html -output-format=text - -# Include message's id in output -include-ids=yes - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -comment=no - - -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match functions or classes name which do -# not require a docstring -no-docstring-rgx=__.*__ - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the beginning of the name of dummy variables -# (i.e. not used). -dummy-variables-rgx=_|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. -generated-members=REQUEST,acl_users,aq_parent - - -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branchs=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,string,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/setup.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/setup.py deleted file mode 100644 index 92d9ac4e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/setup.py +++ /dev/null @@ -1,289 +0,0 @@ - -import sys -if sys.version_info < (3, 2): - print('This antlr3 module requires Python 3.2 or later. You can ' - 'download Python 3 from\nhttps://python.org/, ' - 'or visit http://www.antlr.org/ for the Python target.') - sys.exit(1) - -# bootstrapping setuptools -import ez_setup -ez_setup.use_setuptools() - -import os -import textwrap -from distutils.errors import * -from distutils.command.clean import clean as _clean -from distutils.cmd import Command -from setuptools import setup -from distutils import log - -from distutils.core import setup - -class clean(_clean): - """Also cleanup local temp files.""" - - def run(self): - _clean.run(self) - - import fnmatch - - # kill temporary files - patterns = [ - # generic tempfiles - '*~', '*.bak', '*.pyc', - - # tempfiles generated by ANTLR runs - 't[0-9]*Lexer.py', 't[0-9]*Parser.py', - '*.tokens', '*__.g', - ] - - for path in ('antlr3', 'unittests', 'tests'): - path = os.path.join(os.path.dirname(__file__), path) - if os.path.isdir(path): - for root, dirs, files in os.walk(path, topdown=True): - graveyard = [] - for pat in patterns: - graveyard.extend(fnmatch.filter(files, pat)) - - for name in graveyard: - filePath = os.path.join(root, name) - - try: - log.info("removing '%s'", filePath) - os.unlink(filePath) - except OSError as exc: - log.warn( - "Failed to delete '%s': %s", - filePath, exc - ) - - -class TestError(DistutilsError): - pass - - -# grml.. the class name appears in the --help output: -# ... -# Options for 'CmdUnitTest' command -# ... -# so I have to use a rather ugly name... -class unittest(Command): - """Run unit tests for package""" - - description = "run unit tests for package" - - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - testDir = os.path.join(os.path.dirname(__file__), 'unittests') - if not os.path.isdir(testDir): - raise DistutilsFileError( - "There is no 'unittests' directory. Did you fetch the " - "development version?", - ) - - import glob - import imp - import unittest - import traceback - import io - - suite = unittest.TestSuite() - loadFailures = [] - - # collect tests from all unittests/test*.py files - testFiles = [] - for testPath in glob.glob(os.path.join(testDir, 'test*.py')): - testFiles.append(testPath) - - testFiles.sort() - for testPath in testFiles: - testID = os.path.basename(testPath)[:-3] - - try: - modFile, modPathname, modDescription \ - = imp.find_module(testID, [testDir]) - - testMod = imp.load_module( - testID, modFile, modPathname, modDescription - ) - - suite.addTests( - unittest.defaultTestLoader.loadTestsFromModule(testMod) - ) - - except Exception: - buf = io.StringIO() - traceback.print_exc(file=buf) - - loadFailures.append( - (os.path.basename(testPath), buf.getvalue()) - ) - - runner = unittest.TextTestRunner(verbosity=2) - result = runner.run(suite) - - for testName, error in loadFailures: - sys.stderr.write('\n' + '='*70 + '\n') - sys.stderr.write( - "Failed to load test module {}\n".format(testName) - ) - sys.stderr.write(error) - sys.stderr.write('\n') - - if not result.wasSuccessful() or loadFailures: - raise TestError( - "Unit test suite failed!", - ) - - -class functest(Command): - """Run functional tests for package""" - - description = "run functional tests for package" - - user_options = [ - ('testcase=', None, - "testcase to run [default: run all]"), - ('antlr-version=', None, - "ANTLR version to use [default: HEAD (in ../../build)]"), - ('antlr-jar=', None, - "Explicit path to an antlr jar (overrides --antlr-version)"), - ] - - boolean_options = [] - - def initialize_options(self): - self.testcase = None - self.antlr_version = 'HEAD' - self.antlr_jar = None - - def finalize_options(self): - pass - - def run(self): - import glob - import imp - import unittest - import traceback - import io - - testDir = os.path.join(os.path.dirname(__file__), 'tests') - if not os.path.isdir(testDir): - raise DistutilsFileError( - "There is not 'tests' directory. Did you fetch the " - "development version?", - ) - - # make sure, relative imports from testcases work - sys.path.insert(0, testDir) - - rootDir = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..')) - - if self.antlr_jar is not None: - classpath = [self.antlr_jar] - elif self.antlr_version == 'HEAD': - classpath = [ - os.path.join(rootDir, 'tool', 'target', 'classes'), - os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes') - ] - else: - classpath = [ - os.path.join(rootDir, 'archive', - 'antlr-{}.jar'.format(self.antlr_version)) - ] - - classpath.extend([ - os.path.join(rootDir, 'lib', 'antlr-3.4.1-SNAPSHOT.jar'), - os.path.join(rootDir, 'lib', 'antlr-runtime-3.4.jar'), - os.path.join(rootDir, 'lib', 'ST-4.0.5.jar'), - ]) - os.environ['CLASSPATH'] = ':'.join(classpath) - - os.environ['ANTLRVERSION'] = self.antlr_version - - suite = unittest.TestSuite() - loadFailures = [] - - # collect tests from all tests/t*.py files - testFiles = [] - test_glob = 't[0-9][0-9][0-9]*.py' - for testPath in glob.glob(os.path.join(testDir, test_glob)): - if testPath.endswith('Lexer.py') or testPath.endswith('Parser.py'): - continue - - # if a single testcase has been selected, filter out all other - # tests - if (self.testcase is not None - and not os.path.basename(testPath)[:-3].startswith(self.testcase)): - continue - - testFiles.append(testPath) - - testFiles.sort() - for testPath in testFiles: - testID = os.path.basename(testPath)[:-3] - - try: - modFile, modPathname, modDescription \ - = imp.find_module(testID, [testDir]) - - testMod = imp.load_module( - testID, modFile, modPathname, modDescription) - - suite.addTests( - unittest.defaultTestLoader.loadTestsFromModule(testMod)) - - except Exception: - buf = io.StringIO() - traceback.print_exc(file=buf) - - loadFailures.append( - (os.path.basename(testPath), buf.getvalue())) - - runner = unittest.TextTestRunner(verbosity=2) - - result = runner.run(suite) - - for testName, error in loadFailures: - sys.stderr.write('\n' + '='*70 + '\n') - sys.stderr.write( - "Failed to load test module {}\n".format(testName) - ) - sys.stderr.write(error) - sys.stderr.write('\n') - - if not result.wasSuccessful() or loadFailures: - raise TestError( - "Functional test suite failed!", - ) - - -setup(name='antlr_python3_runtime', - version='3.4', - packages=['antlr3'], - - author="Benjamin S Wolf", - author_email="jokeserver+antlr3@gmail.com", - url="http://www.antlr.org/", - download_url="http://www.antlr.org/download.html", - license="BSD", - description="Runtime package for ANTLR3", - long_description=textwrap.dedent('''\ - This is the runtime package for ANTLR3, which is required to use parsers - generated by ANTLR3. - '''), - cmdclass={'unittest': unittest, - 'functest': functest, - 'clean': clean - }, - ) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.g deleted file mode 100644 index c3633166..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t001lexer; -options { - language = Python3; -} - -ZERO: '0'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.py deleted file mode 100644 index 9450e8eb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t001lexer.py +++ /dev/null @@ -1,57 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t001lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('0') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testIteratorInterface(self): - stream = antlr3.StringStream('0') - lexer = self.getLexer(stream) - - types = [token.type for token in lexer] - - self.assertEqual(types, [self.lexerModule.ZERO]) - - - def testMalformedInput(self): - stream = antlr3.StringStream('1') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.MismatchedTokenException as exc: - self.assertEqual(exc.expecting, '0') - self.assertEqual(exc.unexpectedType, '1') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.g deleted file mode 100644 index f794d9bb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.g +++ /dev/null @@ -1,7 +0,0 @@ -lexer grammar t002lexer; -options { - language = Python3; -} - -ZERO: '0'; -ONE: '1'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.py deleted file mode 100644 index 37824ba6..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t002lexer.py +++ /dev/null @@ -1,50 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t002lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('01') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.ONE) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.g deleted file mode 100644 index 22253d21..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.g +++ /dev/null @@ -1,8 +0,0 @@ -lexer grammar t003lexer; -options { - language = Python3; -} - -ZERO: '0'; -ONE: '1'; -FOOZE: 'fooze'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.py deleted file mode 100644 index da9421fe..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t003lexer.py +++ /dev/null @@ -1,53 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t003lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('0fooze1') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.ZERO) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOOZE) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.ONE) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.g deleted file mode 100644 index 4a08d432..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t004lexer; -options { - language = Python3; -} - -FOO: 'f' 'o'*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py deleted file mode 100644 index 633427e3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py +++ /dev/null @@ -1,70 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t004lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('ffofoofooo') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 0) - self.assertEqual(token.text, 'f') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 1) - self.assertEqual(token.stop, 2) - self.assertEqual(token.text, 'fo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 3) - self.assertEqual(token.stop, 5) - self.assertEqual(token.text, 'foo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 6) - self.assertEqual(token.stop, 9) - self.assertEqual(token.text, 'fooo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.MismatchedTokenException as exc: - self.assertEqual(exc.expecting, 'f') - self.assertEqual(exc.unexpectedType, '2') - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.g deleted file mode 100644 index 247a3440..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t005lexer; -options { - language = Python3; -} - -FOO: 'f' 'o'+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.py deleted file mode 100644 index e5ee1658..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t005lexer.py +++ /dev/null @@ -1,75 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t005lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofoofooo') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 1) - self.assertEqual(token.text, 'fo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 2) - self.assertEqual(token.stop, 4) - self.assertEqual(token.text, 'foo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 5) - self.assertEqual(token.stop, 8) - self.assertEqual(token.text, 'fooo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput1(self): - stream = antlr3.StringStream('2') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.MismatchedTokenException as exc: - self.assertEqual(exc.expecting, 'f') - self.assertEqual(exc.unexpectedType, '2') - - - def testMalformedInput2(self): - stream = antlr3.StringStream('f') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail() - - except antlr3.EarlyExitException as exc: - self.assertEqual(exc.unexpectedType, antlr3.EOF) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.g deleted file mode 100644 index b7f4f4a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t006lexer; -options { - language = Python3; -} - -FOO: 'f' ('o' | 'a')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.py deleted file mode 100644 index daa5d29d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.py +++ /dev/null @@ -1,61 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t006lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofaaooa') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 1) - self.assertEqual(token.text, 'fo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 2) - self.assertEqual(token.stop, 7) - self.assertEqual(token.text, 'faaooa') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('fofoaooaoa2') - lexer = self.getLexer(stream) - - lexer.nextToken() - lexer.nextToken() - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.MismatchedTokenException as exc: - self.assertEqual(exc.expecting, 'f') - self.assertEqual(exc.unexpectedType, '2') - self.assertEqual(exc.charPositionInLine, 10) - self.assertEqual(exc.line, 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.g deleted file mode 100644 index e55b4b70..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t007lexer; -options { - language = Python3; -} - -FOO: 'f' ('o' | 'a' 'b'+)*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.py deleted file mode 100644 index 02abb771..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t007lexer.py +++ /dev/null @@ -1,59 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t007lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('fofababbooabb') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 1) - self.assertEqual(token.text, 'fo') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 2) - self.assertEqual(token.stop, 12) - self.assertEqual(token.text, 'fababbooabb') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('foaboao') - lexer = self.getLexer(stream) - - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.EarlyExitException as exc: - self.assertEqual(exc.unexpectedType, 'o') - self.assertEqual(exc.charPositionInLine, 6) - self.assertEqual(exc.line, 1) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.g deleted file mode 100644 index 2a7904e8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t008lexer; -options { - language = Python3; -} - -FOO: 'f' 'a'?; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.py deleted file mode 100644 index f3b1ed9f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t008lexer.py +++ /dev/null @@ -1,66 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t008lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('ffaf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 0) - self.assertEqual(token.text, 'f') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 1) - self.assertEqual(token.stop, 2) - self.assertEqual(token.text, 'fa') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.FOO) - self.assertEqual(token.start, 3) - self.assertEqual(token.stop, 3) - self.assertEqual(token.text, 'f') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('fafb') - lexer = self.getLexer(stream) - - lexer.nextToken() - lexer.nextToken() - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.MismatchedTokenException as exc: - self.assertEqual(exc.unexpectedType, 'b') - self.assertEqual(exc.charPositionInLine, 3) - self.assertEqual(exc.line, 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.g deleted file mode 100644 index a04b5b43..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.g +++ /dev/null @@ -1,6 +0,0 @@ -lexer grammar t009lexer; -options { - language = Python3; -} - -DIGIT: '0' .. '9'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.py deleted file mode 100644 index bf60bce8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t009lexer.py +++ /dev/null @@ -1,67 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t009lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('085') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.DIGIT) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 0) - self.assertEqual(token.text, '0') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.DIGIT) - self.assertEqual(token.start, 1) - self.assertEqual(token.stop, 1) - self.assertEqual(token.text, '8') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.DIGIT) - self.assertEqual(token.start, 2) - self.assertEqual(token.stop, 2) - self.assertEqual(token.text, '5') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('2a') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.MismatchedSetException as exc: - # TODO: This should provide more useful information - self.assertIsNone(exc.expecting) - self.assertEqual(exc.unexpectedType, 'a') - self.assertEqual(exc.charPositionInLine, 1) - self.assertEqual(exc.line, 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.g deleted file mode 100644 index 3a7524d4..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.g +++ /dev/null @@ -1,7 +0,0 @@ -lexer grammar t010lexer; -options { - language = Python3; -} - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.py deleted file mode 100644 index 9cedea30..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t010lexer.py +++ /dev/null @@ -1,78 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t010lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 5) - self.assertEqual(token.text, 'foobar') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.WS) - self.assertEqual(token.start, 6) - self.assertEqual(token.stop, 6) - self.assertEqual(token.text, ' ') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 7) - self.assertEqual(token.stop, 11) - self.assertEqual(token.text, '_Ab98') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.WS) - self.assertEqual(token.start, 12) - self.assertEqual(token.stop, 14) - self.assertEqual(token.text, ' \n ') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 15) - self.assertEqual(token.stop, 20) - self.assertEqual(token.text, 'A12sdf') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('a-b') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, '-') - self.assertEqual(exc.charPositionInLine, 1) - self.assertEqual(exc.line, 1) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.g deleted file mode 100644 index 17d01eae..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.g +++ /dev/null @@ -1,19 +0,0 @@ -lexer grammar t011lexer; -options { - language = Python3; -} - -IDENTIFIER: - ('a'..'z'|'A'..'Z'|'_') - ('a'..'z' - |'A'..'Z' - |'0'..'9' - |'_' - { - print("Underscore") - print("foo") - } - )* - ; - -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.py deleted file mode 100644 index b417826b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t011lexer.py +++ /dev/null @@ -1,78 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t011lexer(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 0) - self.assertEqual(token.stop, 5) - self.assertEqual(token.text, 'foobar') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.WS) - self.assertEqual(token.start, 6) - self.assertEqual(token.stop, 6) - self.assertEqual(token.text, ' ') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 7) - self.assertEqual(token.stop, 11) - self.assertEqual(token.text, '_Ab98') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.WS) - self.assertEqual(token.start, 12) - self.assertEqual(token.stop, 14) - self.assertEqual(token.text, ' \n ') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.IDENTIFIER) - self.assertEqual(token.start, 15) - self.assertEqual(token.stop, 20) - self.assertEqual(token.text, 'A12sdf') - - token = lexer.nextToken() - self.assertEqual(token.type, self.lexerModule.EOF) - - - def testMalformedInput(self): - stream = antlr3.StringStream('a-b') - lexer = self.getLexer(stream) - - lexer.nextToken() - try: - token = lexer.nextToken() - self.fail(token) - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, '-') - self.assertEqual(exc.charPositionInLine, 1) - self.assertEqual(exc.line, 1) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.input b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.input deleted file mode 100644 index 1815a9f2..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.input +++ /dev/null @@ -1,21 +0,0 @@ - - - - - -]> - - -Text - -öäüß -& -< - - - - \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.output b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.output deleted file mode 100644 index 825c37fc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.output +++ /dev/null @@ -1,39 +0,0 @@ -XML declaration -Attr: version='1.0' -ROOTELEMENT: component -INTERNAL DTD: [ - - - - -] -Start Tag: component -Attr: attr="val'ue" -Attr: attr2='val"ue' -PCDATA: " -" -Comment: "" -PCDATA: " -Text -" -CDATA: "" -PCDATA: " -öäüß -& -< -" -PI: xtal -Attr: cursor='11' -PCDATA: " -" -Empty Element: sub -PCDATA: " -" -Start Tag: sub -End Tag: sub -PCDATA: " -" -End Tag: component diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.py deleted file mode 100644 index 40d67bbe..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXML.py +++ /dev/null @@ -1,120 +0,0 @@ -import antlr3 -import testbase -import unittest -import os -import sys -from io import StringIO -import textwrap - -class t012lexerXML(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar('t012lexerXMLLexer.g') - - - def lexerClass(self, base): - class TLexer(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TLexer - - - def testValid(self): - inputPath = os.path.splitext(__file__)[0] + '.input' - with open(inputPath) as f: - data = f.read() - stream = antlr3.StringStream(data) - lexer = self.getLexer(stream) - - while True: - token = lexer.nextToken() - if token.type == self.lexerModule.EOF: - break - - - output = lexer.outbuf.getvalue() - - outputPath = os.path.splitext(__file__)[0] + '.output' - - with open(outputPath) as f: - testOutput = f.read() - - self.assertEqual(output, testOutput) - - - def testMalformedInput1(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - # Should raise NoViableAltException before hitting EOF - if token.type == antlr3.EOF: - self.fail() - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, '>') - self.assertEqual(exc.charPositionInLine, 11) - self.assertEqual(exc.line, 2) - - - def testMalformedInput2(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - # Should raise NoViableAltException before hitting EOF - if token.type == antlr3.EOF: - self.fail() - - except antlr3.MismatchedSetException as exc: - self.assertEqual(exc.unexpectedType, 't') - self.assertEqual(exc.charPositionInLine, 2) - self.assertEqual(exc.line, 1) - - - def testMalformedInput3(self): - input = textwrap.dedent("""\ - - - - """) - - stream = antlr3.StringStream(input) - lexer = self.getLexer(stream) - - try: - while True: - token = lexer.nextToken() - # Should raise NoViableAltException before hitting EOF - if token.type == antlr3.EOF: - self.fail() - - except antlr3.NoViableAltException as exc: - self.assertEqual(exc.unexpectedType, 'a') - self.assertEqual(exc.charPositionInLine, 11) - self.assertEqual(exc.line, 2) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXMLLexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXMLLexer.g deleted file mode 100644 index 23e566a9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t012lexerXMLLexer.g +++ /dev/null @@ -1,132 +0,0 @@ -lexer grammar t012lexerXMLLexer; -options { - language = Python3; -} - -@header { -from io import StringIO -} - -@lexer::init { -self.outbuf = StringIO() -} - -@lexer::members { -def output(self, line): - self.outbuf.write(line + "\n") -} - -DOCUMENT - : XMLDECL? WS? DOCTYPE? WS? ELEMENT WS? - ; - -fragment DOCTYPE - : - '' - ; - -fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ; - -fragment PI : - '' - ; - -fragment XMLDECL : - '' - ; - - -fragment ELEMENT - : ( START_TAG - (ELEMENT - | t=PCDATA - {self.output('PCDATA: "{}"'.format($t.text))} - | t=CDATA - {self.output('CDATA: "{}"'.format($t.text))} - | t=COMMENT - {self.output('Comment: "{}"'.format($t.text))} - | pi=PI - )* - END_TAG - | EMPTY_ELEMENT - ) - ; - -fragment START_TAG - : '<' WS? name=GENERIC_ID WS? - {self.output("Start Tag: "+name.text)} - ( ATTRIBUTE WS? )* '>' - ; - -fragment EMPTY_ELEMENT - : '<' WS? name=GENERIC_ID WS? - {self.output("Empty Element: "+name.text)} - ( ATTRIBUTE WS? )* '/>' - ; - -fragment ATTRIBUTE - : name=GENERIC_ID WS? '=' WS? value=VALUE - {self.output("Attr: {}={}".format(name.text, value.text))} - ; - -fragment END_TAG - : '' - {self.output("End Tag: "+name.text)} - ; - -fragment COMMENT - : '' - ; - -fragment CDATA - : '' - ; - -fragment PCDATA : (~'<')+ ; - -fragment VALUE : - ( '\"' (~'\"')* '\"' - | '\'' (~'\'')* '\'' - ) - ; - -fragment GENERIC_ID - : ( LETTER | '_' | ':') - ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )* - ; - -fragment LETTER - : 'a'..'z' - | 'A'..'Z' - ; - -fragment WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.g deleted file mode 100644 index bf97d774..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.g +++ /dev/null @@ -1,23 +0,0 @@ -grammar t013parser; -options { - language = Python3; -} - -@parser::init { -self.identifiers = [] -self.reportedErrors = [] -} - -@parser::members { -def foundIdentifier(self, name): - self.identifiers.append(name) - -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - -document: - t=IDENTIFIER {self.foundIdentifier($t.text)} - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.py deleted file mode 100644 index 4562e36f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t013parser.py +++ /dev/null @@ -1,35 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t013parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - self.assertEqual(parser.reportedErrors, []) - self.assertEqual(parser.identifiers, ['foobar']) - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.g deleted file mode 100644 index 3d58d186..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.g +++ /dev/null @@ -1,35 +0,0 @@ -grammar t014parser; -options { - language = Python3; -} - -@parser::init { -self.events = [] -self.reportedErrors = [] -} - -@parser::members { -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - - -document: - ( declaration - | call - )* - EOF - ; - -declaration: - 'var' t=IDENTIFIER ';' - {self.events.append(('decl', $t.text))} - ; - -call: - t=IDENTIFIER '(' ')' ';' - {self.events.append(('call', $t.text))} - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; -WS: (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.py deleted file mode 100644 index ae071d7c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t014parser.py +++ /dev/null @@ -1,66 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t014parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('var foobar; gnarz(); var blupp; flupp ( ) ;') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - self.assertEqual(parser.reportedErrors, []) - self.assertEqual(parser.events, - [('decl', 'foobar'), ('call', 'gnarz'), - ('decl', 'blupp'), ('call', 'flupp')]) - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('var; foo();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors) - self.assertEqual(parser.events, []) - - - def testMalformedInput2(self): - cStream = antlr3.StringStream('var foobar(); gnarz();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors) - self.assertEqual(parser.events, [('call', 'gnarz')]) - - - def testMalformedInput3(self): - cStream = antlr3.StringStream('gnarz(; flupp();') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.document() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors) - self.assertEqual(parser.events, [('call', 'gnarz'), ('call', 'flupp')]) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.g deleted file mode 100644 index 54f17ece..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.g +++ /dev/null @@ -1,54 +0,0 @@ -grammar t015calc; -options { - language = Python3; -} - -@header { -import math -} - -@parser::init { -self.reportedErrors = [] -} - -@parser::members { -def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) -} - -evaluate returns [result]: r=expression {result = r}; - -expression returns [result]: r=mult ( - '+' r2=mult {r += r2} - | '-' r2=mult {r -= r2} - )* {result = r}; - -mult returns [result]: r=log ( - '*' r2=log {r *= r2} - | '/' r2=log {r /= r2} -// | '%' r2=log {r %= r2} - )* {result = r}; - -log returns [result]: 'ln' r=exp {result = math.log(r)} - | r=exp {result = r} - ; - -exp returns [result]: r=atom ('^' r2=atom {r = math.pow(r,r2)} )? {result = r} - ; - -atom returns [result]: - n=INTEGER {result = int($n.text)} - | n=DECIMAL {result = float($n.text)} - | '(' r=expression {result = r} ')' - | 'PI' {result = math.pi} - | 'E' {result = math.e} - ; - -INTEGER: DIGIT+; - -DECIMAL: DIGIT+ '.' DIGIT+; - -fragment -DIGIT: '0'..'9'; - -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.py deleted file mode 100644 index a7a56390..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t015calc.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t015calc(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def _evaluate(self, expr, expected, errors=[]): - cStream = antlr3.StringStream(expr) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - result = parser.evaluate() - self.assertEqual(result, expected) - self.assertEqual(len(parser.reportedErrors), len(errors), - parser.reportedErrors) - - - def testValid01(self): - self._evaluate("1 + 2", 3) - - - def testValid02(self): - self._evaluate("1 + 2 * 3", 7) - - - def testValid03(self): - self._evaluate("10 / 2", 5) - - - def testValid04(self): - self._evaluate("6 + 2*(3+1) - 4", 10) - - - def testMalformedInput(self): - self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"]) - - # FIXME: most parse errors result in TypeErrors in action code, because - # rules return None, which is then added/multiplied... to integers. - # evaluate("6 - foo 2", 4, ["some error"]) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.g deleted file mode 100644 index f6def132..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.g +++ /dev/null @@ -1,31 +0,0 @@ -grammar t016actions; -options { - language = Python3; -} - -declaration returns [name] - : functionHeader ';' - {$name = $functionHeader.name} - ; - -functionHeader returns [name] - : type ID - {$name = $ID.text} - ; - -type - : 'int' - | 'char' - | 'void' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.py deleted file mode 100644 index 60ea53ac..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.py +++ /dev/null @@ -1,20 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t016actions(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream("int foo;") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - name = parser.declaration() - self.assertEqual(name, 'foo') - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.g deleted file mode 100644 index 20b4724a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.g +++ /dev/null @@ -1,91 +0,0 @@ -grammar t017parser; - -options { - language = Python3; -} - -program - : declaration+ - ; - -declaration - : variable - | functionHeader ';' - | functionHeader block - ; - -variable - : type declarator ';' - ; - -declarator - : ID - ; - -functionHeader - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - ; - -formalParameter - : type declarator - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : '{' - variable* - stat* - '}' - ; - -stat: forStat - | expr ';' - | block - | assignStat ';' - | ';' - ; - -forStat - : 'for' '(' assignStat ';' expr ';' assignStat ')' block - ; - -assignStat - : ID '=' expr - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('==' | '<') aexpr )? - ; - -aexpr - : atom ( '+' atom )* - ; - -atom - : ID - | INT - | '(' expr ')' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.py deleted file mode 100644 index 3add2ad3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t017parser.py +++ /dev/null @@ -1,58 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t017parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - def parserClass(self, base): - class TestParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.reportedErrors = [] - - - def emitErrorMessage(self, msg): - self.reportedErrors.append(msg) - - return TestParser - - - def testValid(self): - cStream = antlr3.StringStream("int foo;") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - self.assertEqual(parser.reportedErrors, []) - - - def testMalformedInput1(self): - cStream = antlr3.StringStream('int foo() { 1+2 }') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors) - - - def testMalformedInput2(self): - cStream = antlr3.StringStream('int foo() { 1+; 1+2 }') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - # FIXME: currently strings with formatted errors are collected - # can't check error locations yet - self.assertEqual(len(parser.reportedErrors), 2, parser.reportedErrors) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.g deleted file mode 100644 index 40d8857b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.g +++ /dev/null @@ -1,111 +0,0 @@ -grammar t018llstar; - -options { - language = Python3; -} - -@header { -from io import StringIO -} - -@init { -self.output = StringIO() -} - -program - : declaration+ - ; - -/** In this rule, the functionHeader left prefix on the last two - * alternatives is not LL(k) for a fixed k. However, it is - * LL(*). The LL(*) algorithm simply scans ahead until it sees - * either the ';' or the '{' of the block and then it picks - * the appropriate alternative. Lookhead can be arbitrarily - * long in theory, but is <=10 in most cases. Works great. - * Use ANTLRWorks to see the lookahead use (step by Location) - * and look for blue tokens in the input window pane. :) - */ -declaration - : variable - | functionHeader ';' - {self.output.write($functionHeader.name+" is a declaration\n")} - | functionHeader block - {self.output.write($functionHeader.name+" is a definition\n")} - ; - -variable - : type declarator ';' - ; - -declarator - : ID - ; - -functionHeader returns [name] - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - {$name = $ID.text} - ; - -formalParameter - : type declarator - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : '{' - variable* - stat* - '}' - ; - -stat: forStat - | expr ';' - | block - | assignStat ';' - | ';' - ; - -forStat - : 'for' '(' assignStat ';' expr ';' assignStat ')' block - ; - -assignStat - : ID '=' expr - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('==' | '<') aexpr )? - ; - -aexpr - : atom ( '+' atom )* - ; - -atom - : ID - | INT - | '(' expr ')' - ; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.input b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.input deleted file mode 100644 index 1aa5a0d0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.input +++ /dev/null @@ -1,12 +0,0 @@ -char c; -int x; - -void bar(int x); - -int foo(int y, char d) { - int i; - for (i=0; i<3; i=i+1) { - x=3; - y=5; - } -} diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.output b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.output deleted file mode 100644 index 757c53aa..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.output +++ /dev/null @@ -1,2 +0,0 @@ -bar is a declaration -foo is a definition diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.py deleted file mode 100644 index 9cc3e227..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t018llstar.py +++ /dev/null @@ -1,31 +0,0 @@ -import antlr3 -import testbase -import unittest -import os -import sys -from io import StringIO - -class t018llstar(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - inputPath = os.path.splitext(__file__)[0] + '.input' - with open(inputPath) as f: - cStream = antlr3.StringStream(f.read()) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.program() - - output = parser.output.getvalue() - - outputPath = os.path.splitext(__file__)[0] + '.output' - with open(outputPath) as f: - testOutput = f.read() - - self.assertEqual(output, testOutput) - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.g deleted file mode 100644 index 0b986a00..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.g +++ /dev/null @@ -1,64 +0,0 @@ -lexer grammar t019lexer; -options { - language=Python3; - filter=true; -} - -IMPORT - : 'import' WS name=QIDStar WS? ';' - ; - -/** Avoids having "return foo;" match as a field */ -RETURN - : 'return' (options {greedy=false;}:.)* ';' - ; - -CLASS - : 'class' WS name=ID WS? ('extends' WS QID WS?)? - ('implements' WS QID WS? (',' WS? QID WS?)*)? '{' - ; - -COMMENT - : '/*' (options {greedy=false;} : . )* '*/' - ; - -STRING - : '"' (options {greedy=false;}: ESC | .)* '"' - ; - -CHAR - : '\'' (options {greedy=false;}: ESC | .)* '\'' - ; - -WS : (' '|'\t'|'\n')+ - ; - -fragment -QID : ID ('.' ID)* - ; - -/** QID cannot see beyond end of token so using QID '.*'? somewhere won't - * ever match since k=1 lookahead in the QID loop of '.' will make it loop. - * I made this rule to compensate. - */ -fragment -QIDStar - : ID ('.' ID)* '.*'? - ; - -fragment -TYPE: QID '[]'? - ; - -fragment -ARG : TYPE WS ID - ; - -fragment -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')* - ; - -fragment -ESC : '\\' ('"'|'\''|'\\') - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.input b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.input deleted file mode 100644 index d01e1c1a..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t019lexer.input +++ /dev/null @@ -1,13 +0,0 @@ -import org.antlr.runtime.*; - -public class Main { - public static void main(String[] args) throws Exception { - for (int i=0; i ID // only visible, if b was called with True - | NUM - ; - - -/* rule scopes, from the book, final beta, p.148 */ - -c returns [res] -scope { - symbols -} -@init { - $c::symbols = set(); -} - : '{' c1* c2+ '}' - { $res = $c::symbols; } - ; - -c1 - : 'int' ID {$c::symbols.add($ID.text)} ';' - ; - -c2 - : ID '=' NUM ';' - { - if $ID.text not in $c::symbols: - raise RuntimeError($ID.text) - } - ; - -/* recursive rule scopes, from the book, final beta, p.150 */ - -d returns [res] -scope { - symbols -} -@init { - $d::symbols = set(); -} - : '{' d1* d2* '}' - { $res = $d::symbols; } - ; - -d1 - : 'int' ID {$d::symbols.add($ID.text)} ';' - ; - -d2 - : ID '=' NUM ';' - { - for s in reversed(range(len($d))): - if $ID.text in $d[s]::symbols: - break - else: - raise RuntimeError($ID.text) - } - | d - ; - -/* recursive rule scopes, access bottom-most scope */ - -e returns [res] -scope { - a -} -@after { - $res = $e::a; -} - : NUM { $e[0]::a = int($NUM.text); } - | '{' e '}' - ; - - -/* recursive rule scopes, access with negative index */ - -f returns [res] -scope { - a -} -@after { - $res = $f::a; -} - : NUM { $f[-2]::a = int($NUM.text); } - | '{' f '}' - ; - - -/* tokens */ - -ID : ('a'..'z')+ - ; - -NUM : ('0'..'9')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t022scopes.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t022scopes.py deleted file mode 100644 index 5dc1f2c0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t022scopes.py +++ /dev/null @@ -1,159 +0,0 @@ -import antlr3 -import testbase -import unittest -import textwrap - - -class t022scopes(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def emitErrorMessage(self, msg): - # report errors to /dev/null - pass - - def reportError(self, re): - # no error recovery yet, just crash! - raise re - - return TParser - - - def testa1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - - def testb1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - self.assertRaises(antlr3.RecognitionException, parser.b, False) - - - def testb2(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.b(True) - - - def testc1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - symbols = parser.c() - - self.assertEqual( - symbols, - set(['i', 'j']) - ) - - - def testc2(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - x = 4; - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - self.assertRaisesRegex(RuntimeError, r'x', parser.c) - - - def testd1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { - int i; - int j; - i = 0; - { - int i; - int x; - x = 5; - } - } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - symbols = parser.d() - - self.assertEqual( - symbols, - set(['i', 'j']) - ) - - - def teste1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { { { 12 } } } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.e() - - self.assertEqual(res, 12) - - - def testf1(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { { { 12 } } } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.f() - - self.assertIsNone(res) - - - def testf2(self): - cStream = antlr3.StringStream( - textwrap.dedent('''\ - { { 12 } } - ''')) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - res = parser.f() - - self.assertIsNone(res) - - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.g deleted file mode 100644 index bc94b8dd..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.g +++ /dev/null @@ -1,18 +0,0 @@ -grammar t023scopes; - -options { - language=Python3; -} - -prog -scope { -name -} - : ID {$prog::name=$ID.text;} - ; - -ID : ('a'..'z')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.py deleted file mode 100644 index 4c33b8af..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t023scopes.py +++ /dev/null @@ -1,20 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t023scopes(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.prog() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.g deleted file mode 100644 index a744de38..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.g +++ /dev/null @@ -1,19 +0,0 @@ -grammar t024finally; - -options { - language=Python3; -} - -prog returns [events] -@init {events = []} -@after {events.append('after')} - : ID {raise RuntimeError} - ; - catch [RuntimeError] {events.append('catch')} - finally {events.append('finally')} - -ID : ('a'..'z')+ - ; - -WS : (' '|'\n'|'\r')+ {$channel=HIDDEN} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.py deleted file mode 100644 index 24d0b71e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t024finally.py +++ /dev/null @@ -1,23 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t024finally(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.prog() - - self.assertEqual(events, ['catch', 'finally']) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.g deleted file mode 100644 index 05093759..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.g +++ /dev/null @@ -1,18 +0,0 @@ -lexer grammar t025lexerRulePropertyRef; -options { - language = Python3; -} - -@lexer::init { -self.properties = [] -} - -IDENTIFIER: - ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - { -self.properties.append( - ($text, $type, $line, $pos, $index, $channel, $start, $stop) -) - } - ; -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.py deleted file mode 100644 index 5b23c25b..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t025lexerRulePropertyRef.py +++ /dev/null @@ -1,54 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t025lexerRulePropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(stream) - - while True: - token = lexer.nextToken() - if token.type == antlr3.EOF: - break - - self.assertEqual(len(lexer.properties), 3, lexer.properties) - - text, type, line, pos, index, channel, start, stop = lexer.properties[0] - self.assertEqual(text, 'foobar', lexer.properties[0]) - self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[0]) - self.assertEqual(line, 1, lexer.properties[0]) - self.assertEqual(pos, 0, lexer.properties[0]) - self.assertEqual(index, -1, lexer.properties[0]) - self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[0]) - self.assertEqual(start, 0, lexer.properties[0]) - self.assertEqual(stop, 5, lexer.properties[0]) - - text, type, line, pos, index, channel, start, stop = lexer.properties[1] - self.assertEqual(text, '_Ab98', lexer.properties[1]) - self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[1]) - self.assertEqual(line, 1, lexer.properties[1]) - self.assertEqual(pos, 7, lexer.properties[1]) - self.assertEqual(index, -1, lexer.properties[1]) - self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[1]) - self.assertEqual(start, 7, lexer.properties[1]) - self.assertEqual(stop, 11, lexer.properties[1]) - - text, type, line, pos, index, channel, start, stop = lexer.properties[2] - self.assertEqual(text, 'A12sdf', lexer.properties[2]) - self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[2]) - self.assertEqual(line, 2, lexer.properties[2]) - self.assertEqual(pos, 1, lexer.properties[2]) - self.assertEqual(index, -1, lexer.properties[2]) - self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[2]) - self.assertEqual(start, 15, lexer.properties[2]) - self.assertEqual(stop, 20, lexer.properties[2]) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.g deleted file mode 100644 index 124be343..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.g +++ /dev/null @@ -1,39 +0,0 @@ -grammar t026actions; -options { - language = Python3; -} - -@lexer::init { - self.foobar = 'attribute;' -} - -prog -@init { - self.capture('init;') -} -@after { - self.capture('after;') -} - : IDENTIFIER EOF - ; - catch [ RecognitionException as exc ] { - self.capture('catch;') - raise - } - finally { - self.capture('finally;') - } - - -IDENTIFIER - : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - { - # a comment - self.capture('action;') - self.capture('{!r} {!r} {!r} {!r} {!r} {!r} {!r} {!r};'.format($text, $type, $line, $pos, $index, $channel, $start, $stop)) - if True: - self.capture(self.foobar) - } - ; - -WS: (' ' | '\n')+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.py deleted file mode 100644 index 20dc88b8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t026actions.py +++ /dev/null @@ -1,68 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t026actions(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TLexer - - - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.prog() - - self.assertEqual( - parser._output, - 'init;after;finally;') - self.assertEqual( - lexer._output, - "action;'foobar' 4 1 0 -1 0 0 5;attribute;action;" - "'_Ab98' 4 1 7 -1 0 7 11;attribute;action;" - "'A12sdf' 4 2 1 -1 0 15 20;attribute;") - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.g deleted file mode 100644 index 5c633a2c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.g +++ /dev/null @@ -1,8 +0,0 @@ -lexer grammar t027eof; - -options { - language=Python3; -} - -END: EOF; -SPACE: ' '; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.py deleted file mode 100644 index cf543b58..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t027eof.py +++ /dev/null @@ -1,25 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t027eof(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - @testbase.broken("That's not how EOF is supposed to be used", Exception) - def testValid1(self): - cStream = antlr3.StringStream(' ') - lexer = self.getLexer(cStream) - - tok = lexer.nextToken() - self.assertEqual(tok.type, self.lexerModule.SPACE, tok) - - tok = lexer.nextToken() - self.assertEqual(tok.type, self.lexerModule.END, tok) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t028labelExpr.g.disabled b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t028labelExpr.g.disabled deleted file mode 100644 index d3ba76cb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t028labelExpr.g.disabled +++ /dev/null @@ -1,5 +0,0 @@ -lexer grammar t028labelExpr; -ETAGO: (' ' ' '<'; -CDATA: '<'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.g deleted file mode 100644 index 169892a0..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.g +++ /dev/null @@ -1,16 +0,0 @@ -lexer grammar t029synpredgate; -options { - language = Python3; -} - -FOO - : ('ab')=> A - | ('ac')=> B - ; - -fragment -A: 'a'; - -fragment -B: 'a'; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.py deleted file mode 100644 index b6586889..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t029synpredgate.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t029synpredgate(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - stream = antlr3.StringStream('ac') - lexer = self.getLexer(stream) - token = lexer.nextToken() - - -if __name__ == '__main__': - unittest.main() - - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.g deleted file mode 100644 index 51451c41..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.g +++ /dev/null @@ -1,26 +0,0 @@ -grammar t030specialStates; -options { - language = Python3; -} - -@init { -self.cond = True -} - -@members { -def recover(self, input, re): - # no error recovery yet, just crash! - raise re -} - -r - : ( {self.cond}? NAME - | {not self.cond}? NAME WS+ NAME - ) - ( WS+ NAME )? - EOF - ; - -NAME: ('a'..'z') ('a'..'z' | '0'..'9')+; -NUMBER: ('0'..'9')+; -WS: ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.py deleted file mode 100644 index 86c4f7cd..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t030specialStates.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t030specialStates(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foo') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - - def testValid2(self): - cStream = antlr3.StringStream('foo name1') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - - def testValid3(self): - cStream = antlr3.StringStream('bar name1') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.cond = False - events = parser.r() - - - def testValid4(self): - cStream = antlr3.StringStream('bar name1 name2') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.cond = False - events = parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.g deleted file mode 100644 index de7d46ed..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t031emptyAlt; -options { - language = Python3; -} - -r - : NAME - ( {self.cond}?=> WS+ NAME - | - ) - EOF - ; - -NAME: ('a'..'z') ('a'..'z' | '0'..'9')+; -NUMBER: ('0'..'9')+; -WS: ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.py deleted file mode 100644 index fcae8e1d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t031emptyAlt.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t031emptyAlt(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid1(self): - cStream = antlr3.StringStream('foo') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.g deleted file mode 100644 index 557f51fc..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.g +++ /dev/null @@ -1,8 +0,0 @@ -grammar t032subrulePredict; -options { - language = Python3; -} - -a: 'BEGIN' b WS+ 'END'; -b: ( WS+ 'A' )+; -WS: ' '; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.py deleted file mode 100644 index 7b62add8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t032subrulePredict.py +++ /dev/null @@ -1,44 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t032subrulePredict(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream( - 'BEGIN A END' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - - @testbase.broken("DFA tries to look beyond end of rule b", Exception) - def testValid2(self): - cStream = antlr3.StringStream( - ' A' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.b() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.g deleted file mode 100644 index 447fac38..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.g +++ /dev/null @@ -1,515 +0,0 @@ -grammar t033backtracking; -options { - language=Python3; - backtrack=true; - memoize=true; - k=2; -} - -scope Symbols { - types; -} - -@members { - def isTypeName(self, name): - for scope in reversed(self.Symbols_stack): - if name in scope.types: - return True - - return False - -} - -translation_unit -scope Symbols; // entire file is a scope -@init { - $Symbols::types = set() -} - : external_declaration+ - ; - -/** Either a function definition or any other kind of C decl/def. - * The LL(*) analysis algorithm fails to deal with this due to - * recursion in the declarator rules. I'm putting in a - * manual predicate here so that we don't backtrack over - * the entire function. Further, you get a better error - * as errors within the function itself don't make it fail - * to predict that it's a function. Weird errors previously. - * Remember: the goal is to avoid backtrack like the plague - * because it makes debugging, actions, and errors harder. - * - * Note that k=1 results in a much smaller predictor for the - * fixed lookahead; k=2 made a few extra thousand lines. ;) - * I'll have to optimize that in the future. - */ -external_declaration -options {k=1;} - : ( declaration_specifiers? declarator declaration* '{' )=> function_definition - | declaration - ; - -function_definition -scope Symbols; // put parameters and locals into same scope for now -@init { - $Symbols::types = set() -} - : declaration_specifiers? declarator -// ( declaration+ compound_statement // K&R style -// | compound_statement // ANSI style -// ) - ; - -declaration -scope { - isTypedef; -} -@init { - $declaration::isTypedef = False -} - : 'typedef' declaration_specifiers? {$declaration::isTypedef = True} - init_declarator_list ';' // special case, looking for typedef - | declaration_specifiers init_declarator_list? ';' - ; - -declaration_specifiers - : ( storage_class_specifier - | type_specifier - | type_qualifier - )+ - ; - -init_declarator_list - : init_declarator (',' init_declarator)* - ; - -init_declarator - : declarator //('=' initializer)? - ; - -storage_class_specifier - : 'extern' - | 'static' - | 'auto' - | 'register' - ; - -type_specifier - : 'void' - | 'char' - | 'short' - | 'int' - | 'long' - | 'float' - | 'double' - | 'signed' - | 'unsigned' -// | struct_or_union_specifier -// | enum_specifier - | type_id - ; - -type_id - : {self.isTypeName(self.input.LT(1).getText())}? IDENTIFIER -// {System.out.println($IDENTIFIER.text+" is a type");} - ; - -// struct_or_union_specifier -// options {k=3;} -// scope Symbols; // structs are scopes -// @init { -// $Symbols::types = set() -// } -// : struct_or_union IDENTIFIER? '{' struct_declaration_list '}' -// | struct_or_union IDENTIFIER -// ; - -// struct_or_union -// : 'struct' -// | 'union' -// ; - -// struct_declaration_list -// : struct_declaration+ -// ; - -// struct_declaration -// : specifier_qualifier_list struct_declarator_list ';' -// ; - -// specifier_qualifier_list -// : ( type_qualifier | type_specifier )+ -// ; - -// struct_declarator_list -// : struct_declarator (',' struct_declarator)* -// ; - -// struct_declarator -// : declarator (':' constant_expression)? -// | ':' constant_expression -// ; - -// enum_specifier -// options {k=3;} -// : 'enum' '{' enumerator_list '}' -// | 'enum' IDENTIFIER '{' enumerator_list '}' -// | 'enum' IDENTIFIER -// ; - -// enumerator_list -// : enumerator (',' enumerator)* -// ; - -// enumerator -// : IDENTIFIER ('=' constant_expression)? -// ; - -type_qualifier - : 'const' - | 'volatile' - ; - -declarator - : pointer? direct_declarator - | pointer - ; - -direct_declarator - : ( IDENTIFIER - { - if $declaration and $declaration::isTypedef: - $Symbols::types.add($IDENTIFIER.text) - print("define type "+$IDENTIFIER.text) - } - | '(' declarator ')' - ) - declarator_suffix* - ; - -declarator_suffix - : /*'[' constant_expression ']' - |*/ '[' ']' -// | '(' parameter_type_list ')' -// | '(' identifier_list ')' - | '(' ')' - ; - -pointer - : '*' type_qualifier+ pointer? - | '*' pointer - | '*' - ; - -// parameter_type_list -// : parameter_list (',' '...')? -// ; - -// parameter_list -// : parameter_declaration (',' parameter_declaration)* -// ; - -// parameter_declaration -// : declaration_specifiers (declarator|abstract_declarator)* -// ; - -// identifier_list -// : IDENTIFIER (',' IDENTIFIER)* -// ; - -// type_name -// : specifier_qualifier_list abstract_declarator? -// ; - -// abstract_declarator -// : pointer direct_abstract_declarator? -// | direct_abstract_declarator -// ; - -// direct_abstract_declarator -// : ( '(' abstract_declarator ')' | abstract_declarator_suffix ) abstract_declarator_suffix* -// ; - -// abstract_declarator_suffix -// : '[' ']' -// | '[' constant_expression ']' -// | '(' ')' -// | '(' parameter_type_list ')' -// ; - -// initializer -// : assignment_expression -// | '{' initializer_list ','? '}' -// ; - -// initializer_list -// : initializer (',' initializer)* -// ; - -// // E x p r e s s i o n s - -// argument_expression_list -// : assignment_expression (',' assignment_expression)* -// ; - -// additive_expression -// : (multiplicative_expression) ('+' multiplicative_expression | '-' multiplicative_expression)* -// ; - -// multiplicative_expression -// : (cast_expression) ('*' cast_expression | '/' cast_expression | '%' cast_expression)* -// ; - -// cast_expression -// : '(' type_name ')' cast_expression -// | unary_expression -// ; - -// unary_expression -// : postfix_expression -// | '++' unary_expression -// | '--' unary_expression -// | unary_operator cast_expression -// | 'sizeof' unary_expression -// | 'sizeof' '(' type_name ')' -// ; - -// postfix_expression -// : primary_expression -// ( '[' expression ']' -// | '(' ')' -// | '(' argument_expression_list ')' -// | '.' IDENTIFIER -// | '*' IDENTIFIER -// | '->' IDENTIFIER -// | '++' -// | '--' -// )* -// ; - -// unary_operator -// : '&' -// | '*' -// | '+' -// | '-' -// | '~' -// | '!' -// ; - -// primary_expression -// : IDENTIFIER -// | constant -// | '(' expression ')' -// ; - -// constant -// : HEX_LITERAL -// | OCTAL_LITERAL -// | DECIMAL_LITERAL -// | CHARACTER_LITERAL -// | STRING_LITERAL -// | FLOATING_POINT_LITERAL -// ; - -// ///// - -// expression -// : assignment_expression (',' assignment_expression)* -// ; - -// constant_expression -// : conditional_expression -// ; - -// assignment_expression -// : lvalue assignment_operator assignment_expression -// | conditional_expression -// ; - -// lvalue -// : unary_expression -// ; - -// assignment_operator -// : '=' -// | '*=' -// | '/=' -// | '%=' -// | '+=' -// | '-=' -// | '<<=' -// | '>>=' -// | '&=' -// | '^=' -// | '|=' -// ; - -// conditional_expression -// : logical_or_expression ('?' expression ':' conditional_expression)? -// ; - -// logical_or_expression -// : logical_and_expression ('||' logical_and_expression)* -// ; - -// logical_and_expression -// : inclusive_or_expression ('&&' inclusive_or_expression)* -// ; - -// inclusive_or_expression -// : exclusive_or_expression ('|' exclusive_or_expression)* -// ; - -// exclusive_or_expression -// : and_expression ('^' and_expression)* -// ; - -// and_expression -// : equality_expression ('&' equality_expression)* -// ; -// equality_expression -// : relational_expression (('=='|'!=') relational_expression)* -// ; - -// relational_expression -// : shift_expression (('<'|'>'|'<='|'>=') shift_expression)* -// ; - -// shift_expression -// : additive_expression (('<<'|'>>') additive_expression)* -// ; - -// // S t a t e m e n t s - -// statement -// : labeled_statement -// | compound_statement -// | expression_statement -// | selection_statement -// | iteration_statement -// | jump_statement -// ; - -// labeled_statement -// : IDENTIFIER ':' statement -// | 'case' constant_expression ':' statement -// | 'default' ':' statement -// ; - -// compound_statement -// scope Symbols; // blocks have a scope of symbols -// @init { -// $Symbols::types = {} -// } -// : '{' declaration* statement_list? '}' -// ; - -// statement_list -// : statement+ -// ; - -// expression_statement -// : ';' -// | expression ';' -// ; - -// selection_statement -// : 'if' '(' expression ')' statement (options {k=1; backtrack=false;}:'else' statement)? -// | 'switch' '(' expression ')' statement -// ; - -// iteration_statement -// : 'while' '(' expression ')' statement -// | 'do' statement 'while' '(' expression ')' ';' -// | 'for' '(' expression_statement expression_statement expression? ')' statement -// ; - -// jump_statement -// : 'goto' IDENTIFIER ';' -// | 'continue' ';' -// | 'break' ';' -// | 'return' ';' -// | 'return' expression ';' -// ; - -IDENTIFIER - : LETTER (LETTER|'0'..'9')* - ; - -fragment -LETTER - : '$' - | 'A'..'Z' - | 'a'..'z' - | '_' - ; - -CHARACTER_LITERAL - : '\'' ( EscapeSequence | ~('\''|'\\') ) '\'' - ; - -STRING_LITERAL - : '"' ( EscapeSequence | ~('\\'|'"') )* '"' - ; - -HEX_LITERAL : '0' ('x'|'X') HexDigit+ IntegerTypeSuffix? ; - -DECIMAL_LITERAL : ('0' | '1'..'9' '0'..'9'*) IntegerTypeSuffix? ; - -OCTAL_LITERAL : '0' ('0'..'7')+ IntegerTypeSuffix? ; - -fragment -HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ; - -fragment -IntegerTypeSuffix - : ('u'|'U')? ('l'|'L') - | ('u'|'U') ('l'|'L')? - ; - -FLOATING_POINT_LITERAL - : ('0'..'9')+ '.' ('0'..'9')* Exponent? FloatTypeSuffix? - | '.' ('0'..'9')+ Exponent? FloatTypeSuffix? - | ('0'..'9')+ Exponent FloatTypeSuffix? - | ('0'..'9')+ Exponent? FloatTypeSuffix - ; - -fragment -Exponent : ('e'|'E') ('+'|'-')? ('0'..'9')+ ; - -fragment -FloatTypeSuffix : ('f'|'F'|'d'|'D') ; - -fragment -EscapeSequence - : '\\' ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\') - | OctalEscape - ; - -fragment -OctalEscape - : '\\' ('0'..'3') ('0'..'7') ('0'..'7') - | '\\' ('0'..'7') ('0'..'7') - | '\\' ('0'..'7') - ; - -fragment -UnicodeEscape - : '\\' 'u' HexDigit HexDigit HexDigit HexDigit - ; - -WS : (' '|'\r'|'\t'|'\u000C'|'\n') {$channel=HIDDEN;} - ; - -COMMENT - : '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;} - ; - -LINE_COMMENT - : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - ; - -// ignore #line info for now -LINE_COMMAND - : '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;} - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.py deleted file mode 100644 index 8b5c66a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t033backtracking.py +++ /dev/null @@ -1,31 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t033backtracking(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - @testbase.broken("Some bug in the tool", SyntaxError) - def testValid1(self): - cStream = antlr3.StringStream('int a;') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.translation_unit() - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.g deleted file mode 100644 index 5a0a35ed..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.g +++ /dev/null @@ -1,30 +0,0 @@ -grammar t034tokenLabelPropertyRef; -options { - language = Python3; -} - -a: t=A - { - print($t.text) - print($t.type) - print($t.line) - print($t.pos) - print($t.channel) - print($t.index) - #print($t.tree) - } - ; - -A: 'a'..'z'; - -WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.py deleted file mode 100644 index b94de131..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t034tokenLabelPropertyRef.py +++ /dev/null @@ -1,40 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t034tokenLabelPropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.g deleted file mode 100644 index 3725d346..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t035ruleLabelPropertyRef; -options { - language = Python3; -} - -a returns [bla]: t=b - { - $bla = $t.start, $t.stop, $t.text - } - ; - -b: A+; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.py deleted file mode 100644 index 3347801c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t035ruleLabelPropertyRef.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t035ruleLabelPropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a a a a ') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - start, stop, text = parser.a() - - # first token of rule b is the 2nd token (counting hidden tokens) - self.assertEqual(start.index, 1, start) - - # first token of rule b is the 7th token (counting hidden tokens) - self.assertEqual(stop.index, 7, stop) - - self.assertEqual(text, "a a a a") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.g deleted file mode 100644 index a3fc8a3d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.g +++ /dev/null @@ -1,25 +0,0 @@ -grammar t036multipleReturnValues; -options { - language = Python3; -} - -a returns [foo, bar]: A - { - $foo = "foo"; - $bar = "bar"; - } - ; - -A: 'a'..'z'; - -WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.py deleted file mode 100644 index 8dd65bea..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t036multipleReturnValues.py +++ /dev/null @@ -1,43 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t036multipleReturnValues(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - ret = parser.a() - self.assertEqual(ret.foo, 'foo') - self.assertEqual(ret.bar, 'bar') - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.g deleted file mode 100644 index 2069db13..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.g +++ /dev/null @@ -1,15 +0,0 @@ -grammar t037rulePropertyRef; -options { - language = Python3; -} - -a returns [bla] -@after { - $bla = $start, $stop, $text -} - : A+ - ; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.py deleted file mode 100644 index bba4f3c9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t037rulePropertyRef.py +++ /dev/null @@ -1,47 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t037rulePropertyRef(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' a a a a ') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - start, stop, text = parser.a().bla - - # first token of rule b is the 2nd token (counting hidden tokens) - self.assertEqual(start.index, 1, start) - - # first token of rule b is the 7th token (counting hidden tokens) - self.assertEqual(stop.index, 7, stop) - - self.assertEqual(text, "a a a a") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.g deleted file mode 100644 index 8a6967df..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.g +++ /dev/null @@ -1,28 +0,0 @@ -lexer grammar t038lexerRuleLabel; -options { - language = Python3; -} - -A: 'a'..'z' WS '0'..'9' - { - print($WS) - print($WS.type) - print($WS.line) - print($WS.pos) - print($WS.channel) - print($WS.index) - print($WS.text) - } - ; - -fragment WS : - ( ' ' - | '\t' - | ( '\n' - | '\r\n' - | '\r' - ) - )+ - { $channel = HIDDEN } - ; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.py deleted file mode 100644 index 7b2e55a3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t038lexerRuleLabel.py +++ /dev/null @@ -1,33 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t038lexerRuleLabel(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def testValid1(self): - cStream = antlr3.StringStream('a 2') - - lexer = self.getLexer(cStream) - - while True: - t = lexer.nextToken() - if t.type == antlr3.EOF: - break - print(t) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.g deleted file mode 100644 index 12aa649c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.g +++ /dev/null @@ -1,18 +0,0 @@ -grammar t039labels; -options { - language = Python3; -} - -a returns [l] - : ids+=A ( ',' ids+=(A|B) )* C D w=. ids+=. F EOF - { l = ($ids, $w) } - ; - -A: 'a'..'z'; -B: '0'..'9'; -C: a='A' { print($a) }; -D: a='FOOBAR' { print($a) }; -E: 'GNU' a=. { print($a) }; -F: 'BLARZ' a=EOF { print($a) }; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.py deleted file mode 100644 index 9744017d..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t039labels.py +++ /dev/null @@ -1,53 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t039labels(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream( - 'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ' - ) - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - ids, w = parser.a() - - self.assertEqual(len(ids), 6, ids) - self.assertEqual(ids[0].text, 'a', ids[0]) - self.assertEqual(ids[1].text, 'b', ids[1]) - self.assertEqual(ids[2].text, 'c', ids[2]) - self.assertEqual(ids[3].text, '1', ids[3]) - self.assertEqual(ids[4].text, '2', ids[4]) - self.assertEqual(ids[5].text, 'A', ids[5]) - - self.assertEqual(w.text, 'GNU1', w) - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.g deleted file mode 100644 index dbd87c04..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.g +++ /dev/null @@ -1,13 +0,0 @@ -lexer grammar t040bug80; -options { - language = Python3; -} - -ID_LIKE - : 'defined' - | {False}? Identifier - | Identifier - ; - -fragment -Identifier: 'a'..'z'+ ; // with just 'a', output compiles diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.py deleted file mode 100644 index 34c48b99..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.py +++ /dev/null @@ -1,33 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t040bug80(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def testValid1(self): - cStream = antlr3.StringStream('defined') - lexer = self.getLexer(cStream) - while True: - t = lexer.nextToken() - if t.type == antlr3.EOF: - break - print(t) - - -if __name__ == '__main__': - unittest.main() - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.g deleted file mode 100644 index 44db5bfb..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.g +++ /dev/null @@ -1,16 +0,0 @@ -grammar t041parameters; -options { - language = Python3; -} - -a[arg1, arg2] returns [l] - : A+ EOF - { - l = ($arg1, $arg2) - $arg1 = "gnarz" - } - ; - -A: 'a'..'z'; - -WS: ' '+ { $channel = HIDDEN }; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.py deleted file mode 100644 index e4bc8c07..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t041parameters.py +++ /dev/null @@ -1,45 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t041parameters(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream('a a a') - - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - r = parser.a('foo', 'bar') - - self.assertEqual(r, ('foo', 'bar')) - - -if __name__ == '__main__': - unittest.main() - - - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.g deleted file mode 100644 index 5d2b9b96..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.g +++ /dev/null @@ -1,353 +0,0 @@ -grammar t042ast; -options { - language = Python3; - output = AST; -} - -tokens { - VARDEF; - FLOAT; - EXPR; - BLOCK; - VARIABLE; - FIELD; - CALL; - INDEX; - FIELDACCESS; -} - -@init { -self.flag = False -} - -r1 - : INT ('+'^ INT)* - ; - -r2 - : 'assert'^ x=expression (':'! y=expression)? ';'! - ; - -r3 - : 'if'^ expression s1=statement ('else'! s2=statement)? - ; - -r4 - : 'while'^ expression statement - ; - -r5 - : 'return'^ expression? ';'! - ; - -r6 - : (INT|ID)+ - ; - -r7 - : INT -> - ; - -r8 - : 'var' ID ':' type -> ^('var' type ID) - ; - -r9 - : type ID ';' -> ^(VARDEF type ID) - ; - -r10 - : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))} - ; - -r11 - : expression -> ^(EXPR expression) - | -> EXPR - ; - -r12 - : ID (',' ID)* -> ID+ - ; - -r13 - : type ID (',' ID)* ';' -> ^(type ID+) - ; - -r14 - : expression? statement* type+ - -> ^(EXPR expression? statement* type+) - ; - -r15 - : INT -> INT INT - ; - -r16 - : 'int' ID (',' ID)* -> ^('int' ID)+ - ; - -r17 - : 'for' '(' start=statement ';' expression ';' next=statement ')' statement - -> ^('for' $start expression $next statement) - ; - -r18 - : t='for' -> ^(BLOCK) - ; - -r19 - : t='for' -> ^(BLOCK[$t]) - ; - -r20 - : t='for' -> ^(BLOCK[$t,"FOR"]) - ; - -r21 - : t='for' -> BLOCK - ; - -r22 - : t='for' -> BLOCK[$t] - ; - -r23 - : t='for' -> BLOCK[$t,"FOR"] - ; - -r24 - : r=statement expression -> ^($r expression) - ; - -r25 - : r+=statement (',' r+=statement)+ expression -> ^($r expression) - ; - -r26 - : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+) - ; - -r27 - : r=statement expression -> ^($r ^($r expression)) - ; - -r28 - : ('foo28a'|'foo28b') -> - ; - -r29 - : (r+=statement)* -> ^(BLOCK $r+) - ; - -r30 - : statement* -> ^(BLOCK statement?) - ; - -r31 - : modifier type ID ('=' expression)? ';' - -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?) - -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?) - -> ^(FIELD ID modifier* type expression?) - ; - -r32[which] - : ID INT -> {which==1}? ID - -> {which==2}? INT - -> // yield nothing as else-clause - ; - -r33 - : modifiers! statement - ; - -r34 - : modifiers! r34a[$modifiers.tree] - //| modifiers! r33b[$modifiers.tree] - ; - -r34a[mod] - : 'class' ID ('extends' sup=type)? - ( 'implements' i+=type (',' i+=type)*)? - '{' statement* '}' - -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* ) - ; - -r35 - : '{' 'extends' (sup=type)? '}' - -> ^('extends' $sup)? - ; - -r36 - : 'if' '(' expression ')' s1=statement - ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2) - | -> ^('if' ^(EXPR expression) $s1) - ) - ; - -r37 - : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )* - ; - -r38 - : INT ('+'^ INT)* - ; - -r39 - : (primary->primary) // set return tree to just primary - ( '(' arg=expression ')' - -> ^(CALL $r39 $arg) - | '[' ie=expression ']' - -> ^(INDEX $r39 $ie) - | '.' p=primary - -> ^(FIELDACCESS $r39 $p) - )* - ; - -r40 - : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';' - ; - -r41 - : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';' - ; - -r42 - : ids+=ID (','! ids+=ID)* - ; - -r43 returns [res] - : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]} - ; - -r44 - : ids+=ID^ (','! ids+=ID^)* - ; - -r45 - : primary^ - ; - -r46 returns [res] - : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]} - ; - -r47 - : ids+=primary (','! ids+=primary)* - ; - -r48 - : ids+=. (','! ids+=.)* - ; - -r49 - : .^ ID - ; - -r50 - : ID - -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID) - ; - -/** templates tested: - tokenLabelPropertyRef_tree -*/ -r51 returns [res] - : ID t=ID ID - { $res = $t.tree } - ; - -/** templates tested: - rulePropertyRef_tree -*/ -r52 returns [res] -@after { - $res = $tree -} - : ID - ; - -/** templates tested: - ruleLabelPropertyRef_tree -*/ -r53 returns [res] - : t=primary - { $res = $t.tree } - ; - -/** templates tested: - ruleSetPropertyRef_tree -*/ -r54 returns [res] -@after { - $tree = $t.tree; -} - : ID t=expression ID - ; - -/** backtracking */ -r55 -options { backtrack=true; k=1; } - : (modifier+ INT)=> modifier+ expression - | modifier+ statement - ; - - -/** templates tested: - rewriteTokenRef with len(args)>0 -*/ -r56 - : t=ID* -> ID[$t,'foo'] - ; - -/** templates tested: - rewriteTokenRefRoot with len(args)>0 -*/ -r57 - : t=ID* -> ^(ID[$t,'foo']) - ; - -/** templates tested: - ??? -*/ -r58 - : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^ - ; - -/** templates tested: - rewriteTokenListLabelRefRoot -*/ -r59 - : (t+=ID)+ statement -> ^($t statement)+ - ; - -primary - : ID - ; - -expression - : r1 - ; - -statement - : 'fooze' - | 'fooze2' - ; - -modifiers - : modifier+ - ; - -modifier - : 'public' - | 'private' - ; - -type - : 'int' - | 'bool' - ; - -ID : 'a'..'z' + ; -INT : '0'..'9' +; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.py deleted file mode 100644 index 559d5f12..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t042ast.py +++ /dev/null @@ -1,669 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class t042ast(testbase.ANTLRTest): -## def lexerClass(self, base): -## class TLexer(base): -## def reportError(self, re): -## # no error recovery yet, just crash! -## raise re - -## return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def parse(self, text, method, rArgs=(), **kwargs): - self.compileGrammar() #options='-trace') - - cStream = antlr3.StringStream(text) - self.lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(self.lexer) - self.parser = self.getParser(tStream) - - for attr, val in kwargs.items(): - setattr(self.parser, attr, val) - - return getattr(self.parser, method)(*rArgs) - - - def testR1(self): - r = self.parse("1 + 2", 'r1') - self.assertEqual( - r.tree.toStringTree(), - '(+ 1 2)' - ) - - - def testR2a(self): - r = self.parse("assert 2+3;", 'r2') - self.assertEqual( - r.tree.toStringTree(), - '(assert (+ 2 3))' - ) - - - def testR2b(self): - r = self.parse("assert 2+3 : 5;", 'r2') - self.assertEqual( - r.tree.toStringTree(), - '(assert (+ 2 3) 5)' - ) - - - def testR3a(self): - r = self.parse("if 1 fooze", 'r3') - self.assertEqual( - r.tree.toStringTree(), - '(if 1 fooze)' - ) - - - def testR3b(self): - r = self.parse("if 1 fooze else fooze", 'r3') - self.assertEqual( - r.tree.toStringTree(), - '(if 1 fooze fooze)' - ) - - - def testR4a(self): - r = self.parse("while 2 fooze", 'r4') - self.assertEqual( - r.tree.toStringTree(), - '(while 2 fooze)' - ) - - - def testR5a(self): - r = self.parse("return;", 'r5') - self.assertEqual( - r.tree.toStringTree(), - 'return' - ) - - - def testR5b(self): - r = self.parse("return 2+3;", 'r5') - self.assertEqual( - r.tree.toStringTree(), - '(return (+ 2 3))' - ) - - - def testR6a(self): - r = self.parse("3", 'r6') - self.assertEqual( - r.tree.toStringTree(), - '3' - ) - - - def testR6b(self): - r = self.parse("3 a", 'r6') - self.assertEqual( - r.tree.toStringTree(), - '3 a' - ) - - - def testR7(self): - r = self.parse("3", 'r7') - self.assertIsNone(r.tree) - - - def testR8(self): - r = self.parse("var foo:bool", 'r8') - self.assertEqual( - r.tree.toStringTree(), - '(var bool foo)' - ) - - - def testR9(self): - r = self.parse("int foo;", 'r9') - self.assertEqual( - r.tree.toStringTree(), - '(VARDEF int foo)' - ) - - - def testR10(self): - r = self.parse("10", 'r10') - self.assertEqual( - r.tree.toStringTree(), - '10.0' - ) - - - def testR11a(self): - r = self.parse("1+2", 'r11') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2))' - ) - - - def testR11b(self): - r = self.parse("", 'r11') - self.assertEqual( - r.tree.toStringTree(), - 'EXPR' - ) - - - def testR12a(self): - r = self.parse("foo", 'r12') - self.assertEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR12b(self): - r = self.parse("foo, bar, gnurz", 'r12') - self.assertEqual( - r.tree.toStringTree(), - 'foo bar gnurz' - ) - - - def testR13a(self): - r = self.parse("int foo;", 'r13') - self.assertEqual( - r.tree.toStringTree(), - '(int foo)' - ) - - - def testR13b(self): - r = self.parse("bool foo, bar, gnurz;", 'r13') - self.assertEqual( - r.tree.toStringTree(), - '(bool foo bar gnurz)' - ) - - - def testR14a(self): - r = self.parse("1+2 int", 'r14') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2) int)' - ) - - - def testR14b(self): - r = self.parse("1+2 int bool", 'r14') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR (+ 1 2) int bool)' - ) - - - def testR14c(self): - r = self.parse("int bool", 'r14') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR int bool)' - ) - - - def testR14d(self): - r = self.parse("fooze fooze int bool", 'r14') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR fooze fooze int bool)' - ) - - - def testR14e(self): - r = self.parse("7+9 fooze fooze int bool", 'r14') - self.assertEqual( - r.tree.toStringTree(), - '(EXPR (+ 7 9) fooze fooze int bool)' - ) - - - def testR15(self): - r = self.parse("7", 'r15') - self.assertEqual( - r.tree.toStringTree(), - '7 7' - ) - - - def testR16a(self): - r = self.parse("int foo", 'r16') - self.assertEqual( - r.tree.toStringTree(), - '(int foo)' - ) - - - def testR16b(self): - r = self.parse("int foo, bar, gnurz", 'r16') - - self.assertEqual( - r.tree.toStringTree(), - '(int foo) (int bar) (int gnurz)' - ) - - - def testR17a(self): - r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17') - self.assertEqual( - r.tree.toStringTree(), - '(for fooze (+ 1 2) fooze fooze)' - ) - - - def testR18a(self): - r = self.parse("for", 'r18') - self.assertEqual( - r.tree.toStringTree(), - 'BLOCK' - ) - - - def testR19a(self): - r = self.parse("for", 'r19') - self.assertEqual( - r.tree.toStringTree(), - 'for' - ) - - - def testR20a(self): - r = self.parse("for", 'r20') - self.assertEqual( - r.tree.toStringTree(), - 'FOR' - ) - - - def testR21a(self): - r = self.parse("for", 'r21') - self.assertEqual( - r.tree.toStringTree(), - 'BLOCK' - ) - - - def testR22a(self): - r = self.parse("for", 'r22') - self.assertEqual( - r.tree.toStringTree(), - 'for' - ) - - - def testR23a(self): - r = self.parse("for", 'r23') - self.assertEqual( - r.tree.toStringTree(), - 'FOR' - ) - - - def testR24a(self): - r = self.parse("fooze 1 + 2", 'r24') - self.assertEqual( - r.tree.toStringTree(), - '(fooze (+ 1 2))' - ) - - - def testR25a(self): - r = self.parse("fooze, fooze2 1 + 2", 'r25') - self.assertEqual( - r.tree.toStringTree(), - '(fooze (+ 1 2))' - ) - - - def testR26a(self): - r = self.parse("fooze, fooze2", 'r26') - self.assertEqual( - r.tree.toStringTree(), - '(BLOCK fooze fooze2)' - ) - - - def testR27a(self): - r = self.parse("fooze 1 + 2", 'r27') - self.assertEqual( - r.tree.toStringTree(), - '(fooze (fooze (+ 1 2)))' - ) - - - def testR28(self): - r = self.parse("foo28a", 'r28') - self.assertIsNone(r.tree) - - - def testR29(self): - self.assertRaises(RuntimeError, self.parse, "", 'r29') - - -# FIXME: broken upstream? -## def testR30(self): -## try: -## r = self.parse("fooze fooze", 'r30') -## self.fail(r.tree.toStringTree()) -## except RuntimeError: -## pass - - - def testR31a(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0) - self.assertEqual( - r.tree.toStringTree(), - '(VARDEF gnurz public int (+ 1 2))' - ) - - - def testR31b(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1) - self.assertEqual( - r.tree.toStringTree(), - '(VARIABLE gnurz public int (+ 1 2))' - ) - - - def testR31c(self): - r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2) - self.assertEqual( - r.tree.toStringTree(), - '(FIELD gnurz public int (+ 1 2))' - ) - - - def testR32a(self): - r = self.parse("gnurz 32", 'r32', [1], flag=2) - self.assertEqual( - r.tree.toStringTree(), - 'gnurz' - ) - - - def testR32b(self): - r = self.parse("gnurz 32", 'r32', [2], flag=2) - self.assertEqual( - r.tree.toStringTree(), - '32' - ) - - - def testR32c(self): - r = self.parse("gnurz 32", 'r32', [3], flag=2) - self.assertIsNone(r.tree) - - - def testR33a(self): - r = self.parse("public private fooze", 'r33') - self.assertEqual( - r.tree.toStringTree(), - 'fooze' - ) - - - def testR34a(self): - r = self.parse("public class gnurz { fooze fooze2 }", 'r34') - self.assertEqual( - r.tree.toStringTree(), - '(class gnurz public fooze fooze2)' - ) - - - def testR34b(self): - r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34') - self.assertEqual( - r.tree.toStringTree(), - '(class gnurz public (extends bool) (implements int bool) fooze fooze2)' - ) - - - def testR35(self): - self.assertRaises(RuntimeError, self.parse, "{ extends }", 'r35') - - - def testR36a(self): - r = self.parse("if ( 1 + 2 ) fooze", 'r36') - self.assertEqual( - r.tree.toStringTree(), - '(if (EXPR (+ 1 2)) fooze)' - ) - - - def testR36b(self): - r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36') - self.assertEqual( - r.tree.toStringTree(), - '(if (EXPR (+ 1 2)) fooze fooze2)' - ) - - - def testR37(self): - r = self.parse("1 + 2 + 3", 'r37') - self.assertEqual( - r.tree.toStringTree(), - '(+ (+ 1 2) 3)' - ) - - - def testR38(self): - r = self.parse("1 + 2 + 3", 'r38') - self.assertEqual( - r.tree.toStringTree(), - '(+ (+ 1 2) 3)' - ) - - - def testR39a(self): - r = self.parse("gnurz[1]", 'r39') - self.assertEqual( - r.tree.toStringTree(), - '(INDEX gnurz 1)' - ) - - - def testR39b(self): - r = self.parse("gnurz(2)", 'r39') - self.assertEqual( - r.tree.toStringTree(), - '(CALL gnurz 2)' - ) - - - def testR39c(self): - r = self.parse("gnurz.gnarz", 'r39') - self.assertEqual( - r.tree.toStringTree(), - '(FIELDACCESS gnurz gnarz)' - ) - - - def testR39d(self): - r = self.parse("gnurz.gnarz.gnorz", 'r39') - self.assertEqual( - r.tree.toStringTree(), - '(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)' - ) - - - def testR40(self): - r = self.parse("1 + 2 + 3;", 'r40') - self.assertEqual( - r.tree.toStringTree(), - '(+ 1 2 3)' - ) - - - def testR41(self): - r = self.parse("1 + 2 + 3;", 'r41') - self.assertEqual( - r.tree.toStringTree(), - '(3 (2 1))' - ) - - - def testR42(self): - r = self.parse("gnurz, gnarz, gnorz", 'r42') - self.assertEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR43(self): - r = self.parse("gnurz, gnarz, gnorz", 'r43') - self.assertIsNone(r.tree) - self.assertEqual( - r.res, - ['gnurz', 'gnarz', 'gnorz'] - ) - - - def testR44(self): - r = self.parse("gnurz, gnarz, gnorz", 'r44') - self.assertEqual( - r.tree.toStringTree(), - '(gnorz (gnarz gnurz))' - ) - - - def testR45(self): - r = self.parse("gnurz", 'r45') - self.assertEqual( - r.tree.toStringTree(), - 'gnurz' - ) - - - def testR46(self): - r = self.parse("gnurz, gnarz, gnorz", 'r46') - self.assertIsNone(r.tree) - self.assertEqual( - r.res, - ['gnurz', 'gnarz', 'gnorz'] - ) - - - def testR47(self): - r = self.parse("gnurz, gnarz, gnorz", 'r47') - self.assertEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR48(self): - r = self.parse("gnurz, gnarz, gnorz", 'r48') - self.assertEqual( - r.tree.toStringTree(), - 'gnurz gnarz gnorz' - ) - - - def testR49(self): - r = self.parse("gnurz gnorz", 'r49') - self.assertEqual( - r.tree.toStringTree(), - '(gnurz gnorz)' - ) - - - def testR50(self): - r = self.parse("gnurz", 'r50') - self.assertEqual( - r.tree.toStringTree(), - '(1.0 gnurz)' - ) - - - def testR51(self): - r = self.parse("gnurza gnurzb gnurzc", 'r51') - self.assertEqual( - r.res.toStringTree(), - 'gnurzb' - ) - - - def testR52(self): - r = self.parse("gnurz", 'r52') - self.assertEqual( - r.res.toStringTree(), - 'gnurz' - ) - - - def testR53(self): - r = self.parse("gnurz", 'r53') - self.assertEqual( - r.res.toStringTree(), - 'gnurz' - ) - - - def testR54(self): - r = self.parse("gnurza 1 + 2 gnurzb", 'r54') - self.assertEqual( - r.tree.toStringTree(), - '(+ 1 2)' - ) - - - def testR55a(self): - r = self.parse("public private 1 + 2", 'r55') - self.assertEqual( - r.tree.toStringTree(), - 'public private (+ 1 2)' - ) - - - def testR55b(self): - r = self.parse("public fooze", 'r55') - self.assertEqual( - r.tree.toStringTree(), - 'public fooze' - ) - - - def testR56(self): - r = self.parse("a b c d", 'r56') - self.assertEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR57(self): - r = self.parse("a b c d", 'r57') - self.assertEqual( - r.tree.toStringTree(), - 'foo' - ) - - - def testR59(self): - r = self.parse("a b c fooze", 'r59') - self.assertEqual( - r.tree.toStringTree(), - '(a fooze) (b fooze) (c fooze)' - ) - - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.g deleted file mode 100644 index 478b8be8..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.g +++ /dev/null @@ -1,14 +0,0 @@ -grammar t043synpred; -options { - language = Python3; -} - -a: ((s+ P)=> s+ b)? E; -b: P 'foo'; - -s: S; - - -S: ' '; -P: '+'; -E: '>'; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.py deleted file mode 100644 index 9246de27..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t043synpred.py +++ /dev/null @@ -1,39 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class t043synpred(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def lexerClass(self, base): - class TLexer(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def testValid1(self): - cStream = antlr3.StringStream(' +foo>') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - events = parser.a() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.g deleted file mode 100644 index e170bba2..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.g +++ /dev/null @@ -1,20 +0,0 @@ -grammar t044trace; -options { - language = Python3; -} - -@init { - self._stack = None -} - -a: '<' ((INT '+')=>b|c) '>'; -b: c ('+' c)*; -c: INT - { - if self._stack is None: - self._stack = self.getRuleInvocationStack() - } - ; - -INT: ('0'..'9')+; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.py deleted file mode 100644 index 2d60b61f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t044trace.py +++ /dev/null @@ -1,92 +0,0 @@ -import antlr3 -import testbase -import unittest - - -class T(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar(options='-trace') - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - def getRuleInvocationStack(self): - return self._getRuleInvocationStack(base.__module__) - - return TParser - - - def testTrace(self): - cStream = antlr3.StringStream('< 1 + 2 + 3 >') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - self.assertEqual( - lexer.traces, - [ '>T__7', 'WS', 'INT', 'WS', 'T__6', 'WS', 'INT', 'WS', 'T__6', 'WS', 'INT', 'WS', 'T__8', 'a', '>synpred1_t044trace_fragment', 'b', '>c', - 'c', 'c', '') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.a() - - self.assertEqual(parser._stack, ['a', 'b', 'c']) - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.g deleted file mode 100644 index 436aefa9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.g +++ /dev/null @@ -1,32 +0,0 @@ -grammar t045dfabug; -options { - language = Python3; - output = AST; -} - - -// this rule used to generate an infinite loop in DFA.predict -r -options { backtrack=true; } - : (modifier+ INT)=> modifier+ expression - | modifier+ statement - ; - -expression - : INT '+' INT - ; - -statement - : 'fooze' - | 'fooze2' - ; - -modifier - : 'public' - | 'private' - ; - -ID : 'a'..'z' + ; -INT : '0'..'9' +; -WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.py deleted file mode 100644 index 76be15e2..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t045dfabug.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class T(testbase.ANTLRTest): - - def testbug(self): - self.compileGrammar() - - cStream = antlr3.StringStream("public fooze") - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - - parser.r() - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.g deleted file mode 100644 index 58e40711..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.g +++ /dev/null @@ -1,54 +0,0 @@ -grammar t046rewrite; -options { - language=Python3; -} - -program -@init { - start = self.input.LT(1) -} - : method+ - { - self.input.insertBefore(start,"public class Wrapper {\n") - self.input.insertAfter($method.stop, "\n}\n") - } - ; - -method - : m='method' ID '(' ')' body - {self.input.replace($m, "public void");} - ; - -body -scope { - decls -} -@init { - $body::decls = set() -} - : lcurly='{' stat* '}' - { - for it in $body::decls: - self.input.insertAfter($lcurly, "\nint "+it+";") - } - ; - -stat: ID '=' expr ';' {$body::decls.add($ID.text);} - ; - -expr: mul ('+' mul)* - ; - -mul : atom ('*' atom)* - ; - -atom: ID - | INT - ; - -ID : ('a'..'z'|'A'..'Z')+ ; - -INT : ('0'..'9')+ ; - -WS : (' '|'\t'|'\n')+ {$channel=HIDDEN;} - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.py deleted file mode 100644 index be1f4aa1..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t046rewrite.py +++ /dev/null @@ -1,52 +0,0 @@ -import unittest -import textwrap -import antlr3 -import testbase - -class T(testbase.ANTLRTest): - def testRewrite(self): - self.compileGrammar() - - input = textwrap.dedent( - '''\ - method foo() { - i = 3; - k = i; - i = k*4; - } - - method bar() { - j = i*2; - } - ''') - - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - parser = self.getParser(tStream) - parser.program() - - expectedOutput = textwrap.dedent('''\ - public class Wrapper { - public void foo() { - int k; - int i; - i = 3; - k = i; - i = k*4; - } - - public void bar() { - int j; - j = i*2; - } - } - - ''') - - self.assertEqual(str(tStream), expectedOutput) - - -if __name__ == '__main__': - unittest.main() - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.g deleted file mode 100644 index 30cd25e3..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.g +++ /dev/null @@ -1,113 +0,0 @@ -grammar t047treeparser; -options { - language=Python3; - output=AST; -} - -tokens { - VAR_DEF; - ARG_DEF; - FUNC_HDR; - FUNC_DECL; - FUNC_DEF; - BLOCK; -} - -program - : declaration+ - ; - -declaration - : variable - | functionHeader ';' -> ^(FUNC_DECL functionHeader) - | functionHeader block -> ^(FUNC_DEF functionHeader block) - ; - -variable - : type declarator ';' -> ^(VAR_DEF type declarator) - ; - -declarator - : ID - ; - -functionHeader - : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' - -> ^(FUNC_HDR type ID formalParameter+) - ; - -formalParameter - : type declarator -> ^(ARG_DEF type declarator) - ; - -type - : 'int' - | 'char' - | 'void' - | ID - ; - -block - : lc='{' - variable* - stat* - '}' - -> ^(BLOCK[$lc,"BLOCK"] variable* stat*) - ; - -stat: forStat - | expr ';'! - | block - | assignStat ';'! - | ';'! - ; - -forStat - : 'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block - -> ^('for' $start expr $next block) - ; - -assignStat - : ID EQ expr -> ^(EQ ID expr) - ; - -expr: condExpr - ; - -condExpr - : aexpr ( ('=='^ | '<'^) aexpr )? - ; - -aexpr - : atom ( '+'^ atom )* - ; - -atom - : ID - | INT - | '(' expr ')' -> expr - ; - -FOR : 'for' ; -INT_TYPE : 'int' ; -CHAR: 'char'; -VOID: 'void'; - -ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* - ; - -INT : ('0'..'9')+ - ; - -EQ : '=' ; -EQEQ : '==' ; -LT : '<' ; -PLUS : '+' ; - -WS : ( ' ' - | '\t' - | '\r' - | '\n' - )+ - { $channel=HIDDEN } - ; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.py deleted file mode 100644 index 5b866b29..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t047treeparser.py +++ /dev/null @@ -1,122 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.traces = [] - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def setUp(self): - self.compileGrammar() - self.compileGrammar('t047treeparserWalker.g', options='-trace') - - - def testWalker(self): - input = textwrap.dedent( - '''\ - char c; - int x; - - void bar(int x); - - int foo(int y, char d) { - int i; - for (i=0; i<3; i=i+1) { - x=3; - y=5; - } - } - ''') - - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - r = parser.program() - - self.assertEqual( - r.tree.toStringTree(), - "(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))" - ) - - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = self.getWalker(nodes) - walker.program() - - # FIXME: need to crosscheck with Java target (compile walker with - # -trace option), if this is the real list. For now I'm happy that - # it does not crash ;) - self.assertEqual( - walker.traces, - [ '>program', '>declaration', '>variable', '>type', 'declarator', 'declaration', '>variable', '>type', 'declarator', - 'declaration', - '>functionHeader', '>type', 'formalParameter', - '>type', 'declarator', 'declaration', '>functionHeader', '>type', 'formalParameter', '>type', 'declarator', - 'formalParameter', '>type', - 'declarator', 'block', '>variable', '>type', 'declarator', 'stat', '>forStat', - '>expr', '>expr', '>atom', 'expr', - '>expr', '>atom', 'expr', '>atom', 'expr', '>expr', '>expr', '>atom', 'expr', '>atom', 'block', '>stat', '>expr', '>expr', '>atom', 'stat', '>expr', '>expr', '>atom', ' within boundaries of ' - r'previous '), - tokens.toString) - - def testInsertThenReplaceSameIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "0") - tokens.replace(0, "x") # supercedes insert at 0 - - result = tokens.toString() - expecting = "0xbc" - self.assertEqual(result, expecting) - - - def test2InsertMiddleIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(1, "y") - - result = tokens.toString() - expecting = "ayxbc" - self.assertEqual(result, expecting) - - - def test2InsertThenReplaceIndex0(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "x") - tokens.insertBefore(0, "y") - tokens.replace(0, "z") - - result = tokens.toString() - expecting = "yxzbc" - self.assertEqual(result, expecting) - - - def testReplaceThenInsertBeforeLastIndex(self): - tokens = self._parse("abc") - tokens.replace(2, "x") - tokens.insertBefore(2, "y") - - result = tokens.toString() - expecting = "abyx" - self.assertEqual(result, expecting) - - - def testInsertThenReplaceLastIndex(self): - tokens = self._parse("abc") - tokens.insertBefore(2, "y") - tokens.replace(2, "x") - - result = tokens.toString() - expecting = "abyx" - self.assertEqual(result, expecting) - - - def testReplaceThenInsertAfterLastIndex(self): - tokens = self._parse("abc") - tokens.replace(2, "x") - tokens.insertAfter(2, "y") - - result = tokens.toString() - expecting = "abxy" - self.assertEqual(result, expecting) - - - def testReplaceRangeThenInsertAtLeftEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertBefore(2, "y") - - result = tokens.toString() - expecting = "abyxba" - self.assertEqual(result, expecting) - - - def testReplaceRangeThenInsertAtRightEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertBefore(4, "y") # no effect; within range of a replace - - self.assertRaisesRegex( - ValueError, - (r'insert op within boundaries of ' - r'previous '), - tokens.toString) - - - def testReplaceRangeThenInsertAfterRightEdge(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "x") - tokens.insertAfter(4, "y") - - result = tokens.toString() - expecting = "abxyba" - self.assertEqual(result, expecting) - - - def testReplaceAll(self): - tokens = self._parse("abcccba") - tokens.replace(0, 6, "x") - - result = tokens.toString() - expecting = "x" - self.assertEqual(result, expecting) - - - def testReplaceSubsetThenFetch(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - - result = tokens.toString(0, 6) - expecting = "abxyzba" - self.assertEqual(result, expecting) - - - def testReplaceThenReplaceSuperset(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - tokens.replace(3, 5, "foo") # overlaps, error - - self.assertRaisesRegex( - ValueError, - (r'replace op boundaries of overlap ' - r'with previous '), - tokens.toString) - - - def testReplaceThenReplaceLowerIndexedSuperset(self): - tokens = self._parse("abcccba") - tokens.replace(2, 4, "xyz") - tokens.replace(1, 3, "foo") # overlap, error - - self.assertRaisesRegex( - ValueError, - (r'replace op boundaries of overlap ' - r'with previous '), - tokens.toString) - - - def testReplaceSingleMiddleThenOverlappingSuperset(self): - tokens = self._parse("abcba") - tokens.replace(2, 2, "xyz") - tokens.replace(0, 3, "foo") - - result = tokens.toString() - expecting = "fooa" - self.assertEqual(result, expecting) - - - def testCombineInserts(self): - tokens = self._parse("abc") - tokens.insertBefore(0, "x") - tokens.insertBefore(0, "y") - result = tokens.toString() - expecting = "yxabc" - self.assertEqual(expecting, result) - - - def testCombine3Inserts(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(0, "y") - tokens.insertBefore(1, "z") - result = tokens.toString() - expecting = "yazxbc" - self.assertEqual(expecting, result) - - - def testCombineInsertOnLeftWithReplace(self): - tokens = self._parse("abc") - tokens.replace(0, 2, "foo") - tokens.insertBefore(0, "z") # combine with left edge of rewrite - result = tokens.toString() - expecting = "zfoo" - self.assertEqual(expecting, result) - - - def testCombineInsertOnLeftWithDelete(self): - tokens = self._parse("abc") - tokens.delete(0, 2) - tokens.insertBefore(0, "z") # combine with left edge of rewrite - result = tokens.toString() - expecting = "z" # make sure combo is not znull - self.assertEqual(expecting, result) - - - def testDisjointInserts(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "x") - tokens.insertBefore(2, "y") - tokens.insertBefore(0, "z") - result = tokens.toString() - expecting = "zaxbyc" - self.assertEqual(expecting, result) - - - def testOverlappingReplace(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(0, 3, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "bar" - self.assertEqual(expecting, result) - - - def testOverlappingReplace2(self): - tokens = self._parse("abcc") - tokens.replace(0, 3, "bar") - tokens.replace(1, 2, "foo") # cannot split earlier replace - - self.assertRaisesRegex( - ValueError, - (r'replace op boundaries of overlap ' - r'with previous '), - tokens.toString) - - - def testOverlappingReplace3(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(0, 2, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "barc" - self.assertEqual(expecting, result) - - - def testOverlappingReplace4(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(1, 3, "bar") # wipes prior nested replace - result = tokens.toString() - expecting = "abar" - self.assertEqual(expecting, result) - - - def testDropIdenticalReplace(self): - tokens = self._parse("abcc") - tokens.replace(1, 2, "foo") - tokens.replace(1, 2, "foo") # drop previous, identical - result = tokens.toString() - expecting = "afooc" - self.assertEqual(expecting, result) - - - def testDropPrevCoveredInsert(self): - tokens = self._parse("abc") - tokens.insertBefore(1, "foo") - tokens.replace(1, 2, "foo") # kill prev insert - result = tokens.toString() - expecting = "afoofoo" - self.assertEqual(expecting, result) - - - def testLeaveAloneDisjointInsert(self): - tokens = self._parse("abcc") - tokens.insertBefore(1, "x") - tokens.replace(2, 3, "foo") - result = tokens.toString() - expecting = "axbfoo" - self.assertEqual(expecting, result) - - - def testLeaveAloneDisjointInsert2(self): - tokens = self._parse("abcc") - tokens.replace(2, 3, "foo") - tokens.insertBefore(1, "x") - result = tokens.toString() - expecting = "axbfoo" - self.assertEqual(expecting, result) - - - def testInsertBeforeTokenThenDeleteThatToken(self): - tokens = self._parse("abc") - tokens.insertBefore(2, "y") - tokens.delete(2) - result = tokens.toString() - expecting = "aby" - self.assertEqual(expecting, result) - - -class T2(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar('t048rewrite2.g') - - - def _parse(self, input): - cStream = antlr3.StringStream(input) - lexer = self.getLexer(cStream) - tStream = antlr3.TokenRewriteStream(lexer) - tStream.fillBuffer() - - return tStream - - - def testToStringStartStop(self): - # Tokens: 0123456789 - # Input: x = 3 * 0 - tokens = self._parse("x = 3 * 0;") - tokens.replace(4, 8, "0") # replace 3 * 0 with 0 - - result = tokens.toOriginalString() - expecting = "x = 3 * 0;" - self.assertEqual(expecting, result) - - result = tokens.toString() - expecting = "x = 0;" - self.assertEqual(expecting, result) - - result = tokens.toString(0, 9) - expecting = "x = 0;" - self.assertEqual(expecting, result) - - result = tokens.toString(4, 8) - expecting = "0" - self.assertEqual(expecting, result) - - - def testToStringStartStop2(self): - # Tokens: 012345678901234567 - # Input: x = 3 * 0 + 2 * 0 - tokens = self._parse("x = 3 * 0 + 2 * 0;") - - result = tokens.toOriginalString() - expecting = "x = 3 * 0 + 2 * 0;" - self.assertEqual(expecting, result) - - tokens.replace(4, 8, "0") # replace 3 * 0 with 0 - result = tokens.toString() - expecting = "x = 0 + 2 * 0;" - self.assertEqual(expecting, result) - - result = tokens.toString(0, 17) - expecting = "x = 0 + 2 * 0;" - self.assertEqual(expecting, result) - - result = tokens.toString(4, 8) - expecting = "0" - self.assertEqual(expecting, result) - - result = tokens.toString(0, 8) - expecting = "x = 0" - self.assertEqual(expecting, result) - - result = tokens.toString(12, 16) - expecting = "2 * 0" - self.assertEqual(expecting, result) - - tokens.insertAfter(17, "// comment") - result = tokens.toString(12, 18) - expecting = "2 * 0;// comment" - self.assertEqual(expecting, result) - - result = tokens.toString(0, 8) # try again after insert at end - expecting = "x = 0" - self.assertEqual(expecting, result) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t048rewrite2.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t048rewrite2.g deleted file mode 100644 index 60178d76..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t048rewrite2.g +++ /dev/null @@ -1,12 +0,0 @@ -lexer grammar t048rewrite2; -options { - language=Python3; -} - -ID : 'a'..'z'+; -INT : '0'..'9'+; -SEMI : ';'; -PLUS : '+'; -MUL : '*'; -ASSIGN : '='; -WS : ' '+; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t049treeparser.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t049treeparser.py deleted file mode 100644 index ec77618c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t049treeparser.py +++ /dev/null @@ -1,477 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - getattr(walker, treeEntry)() - - return walker._output - - - def testFlatList(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ID INT - {self.capture("{}, {}".format($ID, $INT))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("abc, 34", found) - - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ^(ID INT) - {self.capture(str($ID)+", "+str($INT))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("abc, 34", found) - - - def testFlatVsTreeDecision(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : b c ; - b : ID INT -> ^(ID INT); - c : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : b b ; - b : ID INT {self.capture(str($ID)+" "+str($INT)+'\n')} - | ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 b 2" - ) - self.assertEqual("^(a 1)b 2\n", found) - - - def testFlatVsTreeDecision2(self): - grammar = textwrap.dedent( - r"""grammar T; - options { - language=Python3; - output=AST; - } - a : b c ; - b : ID INT+ -> ^(ID INT+); - c : ID INT+; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : b b ; - b : ID INT+ {self.capture(str($ID)+" "+str($INT)+"\n")} - | ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 2 3 b 4 5" - ) - self.assertEqual("^(a 3)b 5\n", found) - - - def testCyclicDFALookahead(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT+ PERIOD; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ID INT+ PERIOD {self.capture("alt 1")} - | ID INT+ SEMI {self.capture("alt 2")} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1 2 3." - ) - self.assertEqual("alt 1", found) - - - def testNullableChildList(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT? -> ^(ID INT?); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ^(ID INT?) - {self.capture(str($ID))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - self.assertEqual("abc", found) - - - def testNullableChildList2(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT? SEMI -> ^(ID INT?) SEMI ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ^(ID INT?) SEMI - {self.capture(str($ID))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc;" - ) - self.assertEqual("abc", found) - - - def testNullableChildList3(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a : ^(ID INT? b) SEMI - {self.capture(str($ID)+", "+str($b.text))} - ; - b : ID? ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc def;" - ) - self.assertEqual("abc, def", found) - - - def testActionsAfterRoot(self): - grammar = textwrap.dedent( - r'''grammar T; - options { - language=Python3; - output=AST; - } - a : x=ID INT? SEMI -> ^($x INT?) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar TP; - options { - language=Python3; - ASTLabelType=CommonTree; - } - a @init {x=0} : ^(ID {x=1} {x=2} INT?) - {self.capture(str($ID)+", "+str(x))} - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc;" - ) - self.assertEqual("abc, 2", found) - - - def testWildcardLookahead(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' . INT) { self.capture("alt 1") } - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEqual("alt 1", found) - - - def testWildcardLookahead2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' . INT) { self.capture("alt 1") } - | ^('+' . .) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEqual("alt 1", found) - - - def testWildcardLookahead3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' ID INT) { self.capture("alt 1") } - | ^('+' . .) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEqual("alt 1", found) - - - def testWildcardPlusLookahead(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID '+'^ INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - SEMI : ';' ; - PERIOD : '.' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;} - a : ^('+' INT INT ) { self.capture("alt 1") } - | ^('+' .+) { self.capture("alt 2") } - ; - ''') - - # AMBIG upon '+' DOWN INT UP etc.. but so what. - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a + 2") - self.assertEqual("alt 2", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.g b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.g deleted file mode 100644 index 50e54e74..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.g +++ /dev/null @@ -1,29 +0,0 @@ -grammar t050decorate; -options { - language = Python3; -} - -@header { - def logme(func): - def decorated(self, *args, **kwargs): - self.events.append('before') - try: - return func(self, *args, **kwargs) - finally: - self.events.append('after') - - return decorated -} - -@parser::init { -self.events = [] -} - -document -@decorate { - @logme -} - : IDENTIFIER - ; - -IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.py deleted file mode 100644 index b5337a64..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t050decorate.py +++ /dev/null @@ -1,21 +0,0 @@ -import antlr3 -import testbase -import unittest - -class t013parser(testbase.ANTLRTest): - def setUp(self): - self.compileGrammar() - - - def testValid(self): - cStream = antlr3.StringStream('foobar') - lexer = self.getLexer(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = self.getParser(tStream) - parser.document() - - self.assertEqual(parser.events, ['before', 'after']) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t051treeRewriteAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t051treeRewriteAST.py deleted file mode 100644 index 3c9ced62..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t051treeRewriteAST.py +++ /dev/null @@ -1,1565 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase - -class T(testbase.ANTLRTest): - def walkerClass(self, base): - class TWalker(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.buf = "" - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TWalker - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r.tree: - return r.tree.toStringTree() - - return "" - - - def testFlatList(self): - grammar = textwrap.dedent( - r''' - grammar T1; - options { - language=Python3; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP1; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T1; - } - - a : ID INT -> INT ID; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("34 abc", found) - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r''' - grammar T2; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP2; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T2; - } - a : ^(ID INT) -> ^(INT ID); - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("(34 abc)", found) - - - def testCombinedRewriteAndAuto(self): - grammar = textwrap.dedent( - r''' - grammar T3; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT) | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP3; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T3; - } - a : ^(ID INT) -> ^(INT ID) | INT; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("(34 abc)", found) - - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "34" - ) - - self.assertEqual("34", found) - - - def testAvoidDup(self): - grammar = textwrap.dedent( - r''' - grammar T4; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP4; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T4; - } - a : ID -> ^(ID ID); - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.assertEqual("(abc abc)", found) - - - def testLoop(self): - grammar = textwrap.dedent( - r''' - grammar T5; - options { - language=Python3; - output=AST; - } - a : ID+ INT+ -> (^(ID INT))+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP5; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T5; - } - a : (^(ID INT))+ -> INT+ ID+; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b c 3 4 5" - ) - - self.assertEqual("3 4 5 a b c", found) - - - def testAutoDup(self): - grammar = textwrap.dedent( - r''' - grammar T6; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP6; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T6; - } - a : ID; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.assertEqual("abc", found) - - - def testAutoDupRule(self): - grammar = textwrap.dedent( - r''' - grammar T7; - options { - language=Python3; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP7; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T7; - } - a : b c ; - b : ID ; - c : INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 1" - ) - - self.assertEqual("a 1", found) - - - def testAutoWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID . - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEqual("abc 34", found) - - - def testAutoWildcard2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ^(ID .) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEqual("(abc 34)", found) - - - def testAutoWildcardWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID c=. - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEqual("abc 34", found) - - - def testAutoWildcardWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - a : ID c+=. - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - self.assertEqual("abc 34", found) - - - def testAutoDupMultiple(self): - grammar = textwrap.dedent( - r''' - grammar T8; - options { - language=Python3; - output=AST; - } - a : ID ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP8; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T8; - } - a : ID ID INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b 3" - ) - - self.assertEqual("a b 3", found) - - - def testAutoDupTree(self): - grammar = textwrap.dedent( - r''' - grammar T9; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP9; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T9; - } - a : ^(ID INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupTreeWithLabels(self): - grammar = textwrap.dedent( - r''' - grammar T10; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP10; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T10; - } - a : ^(x=ID y=INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupTreeWithListLabels(self): - grammar = textwrap.dedent( - r''' - grammar T11; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP11; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T11; - } - a : ^(x+=ID y+=INT) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T12; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP12; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T12; - } - a : ^(b INT) ; - b : ID ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRootAndLabels(self): - grammar = textwrap.dedent( - r''' - grammar T13; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP13; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T13; - } - a : ^(x=b INT) ; - b : ID ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupTreeWithRuleRootAndListLabels(self): - grammar = textwrap.dedent( - r''' - grammar T14; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP14; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T14; - } - a : ^(x+=b y+=c) ; - b : ID ; - c : INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a 3" - ) - - self.assertEqual("(a 3)", found) - - - def testAutoDupNestedTree(self): - grammar = textwrap.dedent( - r''' - grammar T15; - options { - language=Python3; - output=AST; - } - a : x=ID y=ID INT -> ^($x ^($y INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP15; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T15; - } - a : ^(ID ^(ID INT)) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "a b 3" - ) - - self.assertEqual("(a (b 3))", found) - - - def testDelete(self): - grammar = textwrap.dedent( - r''' - grammar T16; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP16; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T16; - } - a : ID -> - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc" - ) - - self.assertEqual("", found) - - def testSetMatchNoRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : b INT; - b : ID | INT; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("abc 34", found) - - - def testSetOptionalMatchNoRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : (ID|INT)? INT ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34") - - self.assertEqual("abc 34", found) - - - def testSetMatchNoRewriteLevel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : x=ID INT -> ^($x INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^(ID (ID | INT) ) ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("(abc 34)", found) - - - def testSetMatchNoRewriteLevel2Root(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : x=ID INT -> ^($x INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^((ID | INT) INT) ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("(abc 34)", found) - - - ## REWRITE MODE - - def testRewriteModeCombinedRewriteAndAuto(self): - grammar = textwrap.dedent( - r''' - grammar T17; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID INT) | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP17; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T17; - rewrite=true; - } - a : ^(ID INT) -> ^(ID["ick"] INT) - | INT // leaves it alone, returning $a.start - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc 34" - ) - - self.assertEqual("(ick 34)", found) - - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "34" - ) - - self.assertEqual("34", found) - - - def testRewriteModeFlatTree(self): - grammar = textwrap.dedent( - r''' - grammar T18; - options { - language=Python3; - output=AST; - } - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP18; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T18; - rewrite=true; - } - s : ID a ; - a : INT -> INT["1"] - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - self.assertEqual("abc 1", found) - - - def testRewriteModeChainRuleFlatTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a ; - a : b ; - b : ID INT -> INT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("34 abc", found) - - - def testRewriteModeChainRuleTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a ; - a : b ; // a.tree must become b.tree - b : ^(ID INT) -> INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("34", found) - - - def testRewriteModeChainRuleTree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : a* b ; // only b contributes to tree, but it's after a*; s.tree = b.tree - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("34", found) - - - def testRewriteModeChainRuleTree3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : 'boo' ID INT -> 'boo' ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : 'boo' a* b ; // don't reset s.tree to b.tree due to 'boo' - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEqual("boo 34", found) - - - def testRewriteModeChainRuleTree4(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : 'boo' ID INT -> ^('boo' ^(ID INT)) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : ^('boo' a* b) ; // don't reset s.tree to b.tree due to 'boo' - a : X ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEqual("(boo 34)", found) - - - def testRewriteModeChainRuleTree5(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : 'boo' ID INT -> ^('boo' ^(ID INT)) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - tokens { X; } - s : ^(a b) ; // s.tree is a.tree - a : 'boo' ; - b : ^(ID INT) -> INT - ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "boo abc 34") - self.assertEqual("(boo 34)", found) - - - def testRewriteOfRuleRef(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ID INT | INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : a -> a ; - a : ID INT -> ID INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("abc 34", found) - - - def testRewriteOfRuleRefRoot(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.assertEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefRootLabeled(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(label=a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.assertEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefRootListLabeled(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT INT -> ^(INT ^(ID INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(label+=a ^(ID INT)) -> a ; - a : INT ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 12 34") - # emits whole tree when you ref the root since I can't know whether - # you want the children or not. You might be returning a whole new - # tree. Hmm...still seems weird. oh well. - self.assertEqual("(12 (abc 34))", found) - - - def testRewriteOfRuleRefChild(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("(34 34)", found) - - - def testRewriteOfRuleRefLabel(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID label=a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("(34 34)", found) - - - def testRewriteOfRuleRefListLabel(self): - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID ^(INT INT)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - """) - - treeGrammar = textwrap.dedent( - r""" - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(ID label+=a) -> a ; - a : ^(INT INT) ; - """) - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("(34 34)", found) - - - - def testRewriteModeWithPredicatedRewrites(self): - grammar = textwrap.dedent( - r''' - grammar T19; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID["root"] ^(ID INT)) | INT -> ^(ID["root"] INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP19; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T19; - rewrite=true; - } - s : ^(ID a) { self.buf += $s.start.toStringTree() }; - a : ^(ID INT) -> {True}? ^(ID["ick"] INT) - -> INT - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - - self.assertEqual("(root (ick 34))", found) - - - def testWildcardSingleNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID INT -> ^(ID["root"] INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - s : ^(ID c=.) -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34" - ) - - self.assertEqual("34", found) - - def testWildcardUnlabeledSingleNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID INT -> ^(ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ^(ID .) -> ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 34") - self.assertEqual("abc", found) - - - def testWildcardGrabsSubtree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ^(ID c=.) -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 1 2 3") - self.assertEqual("(1 2 3)", found) - - - def testWildcardGrabsSubtree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : ID x=INT y=INT z=INT -> ID ^($x $y $z); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : ID c=. -> $c - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "abc 1 2 3") - self.assertEqual("(1 2 3)", found) - - - def testWildcardListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST;} - a : INT INT INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;} - s : (c+=.)+ -> $c+ - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "1 2 3") - self.assertEqual("1 2 3", found) - - - def testWildcardListLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3; output=AST; ASTLabelType=CommonTree;} - a : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z)); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;} - s : ^(INT (c+=.)+) -> $c+ - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 's', - "1 2 3") - self.assertEqual("(2 3) (2 3)", found) - - - def testRuleResultAsRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID '=' INT -> ^('=' ID INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - COLON : ':' ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - rewrite=true; - ASTLabelType=CommonTree; - tokenVocab=T; - } - a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ; - eq : '=' | ':' {pass} ; // bug in set match, doesn't add to tree!! booh. force nonset. - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - "abc = 34") - self.assertEqual("(= 34 abc)", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t052import.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t052import.py deleted file mode 100644 index d6de6ef4..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t052import.py +++ /dev/null @@ -1,431 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class T(testbase.ANTLRTest): - def setUp(self): - self.oldPath = sys.path[:] - sys.path.insert(0, self.baseDir) - - - def tearDown(self): - sys.path = self.oldPath - - - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, slaves, input): - for slave in slaves: - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - if parserName + 'Parser' in sys.modules: - del sys.modules[parserName + 'Parser'] - - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - getattr(parser, grammarEntry)() - - return parser._output - - - def execLexer(self, grammar, slaves, input): - for slave in slaves: - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - if parserName + 'Parser' in sys.modules: - del sys.modules[parserName + 'Parser'] - - lexerCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - - while True: - token = lexer.nextToken() - if token is None or token.type == antlr3.EOF: - break - - lexer._output += token.text - - return lexer._output - - - def testDelegatorInvokesDelegateRule(self): - slave = textwrap.dedent( - r''' - parser grammar S1; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM1.capture(t) - - } - - a : B { self.capture("S.a") } ; - ''') - - master = textwrap.dedent( - r''' - grammar M1; - options { - language=Python3; - } - import S1; - s : a ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.assertEqual("S.a", found) - - - def testDelegatorInvokesDelegateRuleWithArgs(self): - slave = textwrap.dedent( - r''' - parser grammar S2; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM2.capture(t) - } - a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ; - ''') - - master = textwrap.dedent( - r''' - grammar M2; - options { - language=Python3; - } - import S2; - s : label=a[3] {self.capture($label.y);} ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.assertEqual("S.a1000", found) - - - def testDelegatorAccessesDelegateMembers(self): - slave = textwrap.dedent( - r''' - parser grammar S3; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM3.capture(t) - - def foo(self): - self.capture("foo") - } - a : B ; - ''') - - master = textwrap.dedent( - r''' - grammar M3; // uses no rules from the import - options { - language=Python3; - } - import S3; - s : 'b' {self.gS3.foo();} ; // gS is import pointer - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave], - input="b" - ) - - self.assertEqual("foo", found) - - - def testDelegatorInvokesFirstVersionOfDelegateRule(self): - slave = textwrap.dedent( - r''' - parser grammar S4; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM4.capture(t) - } - a : b {self.capture("S.a");} ; - b : B ; - ''') - - slave2 = textwrap.dedent( - r''' - parser grammar T4; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM4.capture(t) - } - a : B {self.capture("T.a");} ; // hidden by S.a - ''') - - master = textwrap.dedent( - r''' - grammar M4; - options { - language=Python3; - } - import S4,T4; - s : a ; - B : 'b' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave, slave2], - input="b" - ) - - self.assertEqual("S.a", found) - - - def testDelegatesSeeSameTokenType(self): - slave = textwrap.dedent( - r''' - parser grammar S5; // A, B, C token type order - options { - language=Python3; - } - tokens { A; B; C; } - @members { - def capture(self, t): - self.gM5.capture(t) - } - x : A {self.capture("S.x ");} ; - ''') - - slave2 = textwrap.dedent( - r''' - parser grammar T5; - options { - language=Python3; - } - tokens { C; B; A; } /// reverse order - @members { - def capture(self, t): - self.gM5.capture(t) - } - y : A {self.capture("T.y");} ; - ''') - - master = textwrap.dedent( - r''' - grammar M5; - options { - language=Python3; - } - import S5,T5; - s : x y ; // matches AA, which should be "aa" - B : 'b' ; // another order: B, A, C - A : 'a' ; - C : 'c' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 's', - slaves=[slave, slave2], - input="aa" - ) - - self.assertEqual("S.x T.y", found) - - - def testDelegatorRuleOverridesDelegate(self): - slave = textwrap.dedent( - r''' - parser grammar S6; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM6.capture(t) - } - a : b {self.capture("S.a");} ; - b : B ; - ''') - - master = textwrap.dedent( - r''' - grammar M6; - options { - language=Python3; - } - import S6; - b : 'b'|'c' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execParser( - master, 'a', - slaves=[slave], - input="c" - ) - - self.assertEqual("S.a", found) - - - # LEXER INHERITANCE - - def testLexerDelegatorInvokesDelegateRule(self): - slave = textwrap.dedent( - r''' - lexer grammar S7; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM7.capture(t) - } - A : 'a' {self.capture("S.A ");} ; - C : 'c' ; - ''') - - master = textwrap.dedent( - r''' - lexer grammar M7; - options { - language=Python3; - } - import S7; - B : 'b' ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execLexer( - master, - slaves=[slave], - input="abc" - ) - - self.assertEqual("S.A abc", found) - - - def testLexerDelegatorRuleOverridesDelegate(self): - slave = textwrap.dedent( - r''' - lexer grammar S8; - options { - language=Python3; - } - @members { - def capture(self, t): - self.gM8.capture(t) - } - A : 'a' {self.capture("S.A")} ; - ''') - - master = textwrap.dedent( - r''' - lexer grammar M8; - options { - language=Python3; - } - import S8; - A : 'a' {self.capture("M.A ");} ; - WS : (' '|'\n') {self.skip()} ; - ''') - - found = self.execLexer( - master, - slaves=[slave], - input="a" - ) - - self.assertEqual("M.A a", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t053hetero.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t053hetero.py deleted file mode 100644 index e85c0388..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t053hetero.py +++ /dev/null @@ -1,939 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class T(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if r: - return r.tree.toStringTree() - - return "" - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r: - return r.tree.toStringTree() - - return "" - - - # PARSERS -- AUTO AST - - def testToken(self): - grammar = textwrap.dedent( - r''' - grammar T1; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testTokenCommonTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a") - - self.assertEqual("a", found) - - - def testTokenWithQualifiedType(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - @members { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - } - a : ID ; // TParser.V is qualified name - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testNamedType(self): - grammar = textwrap.dedent( - r""" - grammar $T; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - } - a : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - """) - - found = self.execParser(grammar, 'a', input="a") - self.assertEqual("a", found) - - - def testTokenWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T2; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x=ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testTokenWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T3; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x+=ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testTokenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T4; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testTokenRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T5; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : x+=ID^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testString(self): - grammar = textwrap.dedent( - r''' - grammar T6; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.assertEqual("begin", found) - - - def testStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T7; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin'^ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.assertEqual("begin", found) - - - # PARSERS -- REWRITE AST - - def testRewriteToken(self): - grammar = textwrap.dedent( - r''' - grammar T8; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID -> ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual("a", found) - - - def testRewriteTokenWithArgs(self): - grammar = textwrap.dedent( - r''' - grammar T9; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def __init__(self, *args): - if len(args) == 4: - ttype = args[0] - x = args[1] - y = args[2] - z = args[3] - token = CommonToken(type=ttype, text="") - - elif len(args) == 3: - ttype = args[0] - token = args[1] - x = args[2] - y, z = 0, 0 - - else: - raise TypeError("Invalid args {!r}".format(args)) - - super().__init__(token) - self.x = x - self.y = y - self.z = z - - def toString(self): - txt = "" - if self.token: - txt += self.token.text - txt +=";{0.x}{0.y}{0.z}".format(self) - return txt - __str__ = toString - - } - a : ID -> ID[42,19,30] ID[$ID,99]; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a" - ) - - self.assertEqual(";421930 a;9900", found) - - - def testRewriteTokenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T10; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> ^(ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a 2" - ) - - self.assertEqual("(a 2)", found) - - - def testRewriteString(self): - grammar = textwrap.dedent( - r''' - grammar T11; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' -> 'begin' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin" - ) - - self.assertEqual("begin", found) - - - def testRewriteStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T12; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : 'begin' INT -> ^('begin' INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="begin 2" - ) - - self.assertEqual("(begin 2)", found) - - def testRewriteRuleResults(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - tokens {LIST;} - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def __init__(self, tokenType, txt): - super().__init__( - CommonToken(type=tokenType, text=txt)) - - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : id (',' id)* -> ^(LIST["LIST"] id+); - id : ID -> ID; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="a,b,c") - - self.assertEqual("(LIST a b c)", found) - - def testCopySemanticsWithHetero(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - @header { - class V(CommonTree): - def dupNode(self): - return V(self) - - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : type ID (',' ID)* ';' -> ^(type ID)+; - type : 'int' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\\n') {$channel=HIDDEN;} ; - ''') - - found = self.execParser( - grammar, 'a', - input="int a, b, c;") - self.assertEqual("(int a) (int b) (int c)", found) - - # TREE PARSERS -- REWRITE AST - - def testTreeParserRewriteFlatList(self): - grammar = textwrap.dedent( - r''' - grammar T13; - options { - language=Python3; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP13; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T13; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> INT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc 34" - ) - - self.assertEqual("34 abc", found) - - - def testTreeParserRewriteTree(self): - grammar = textwrap.dedent( - r''' - grammar T14; - options { - language=Python3; - output=AST; - } - a : ID INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP14; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T14; - } - @header { - class V(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - class W(CommonTree): - def toString(self): - return self.token.text + "" - __str__ = toString - - } - a : ID INT -> ^(INT ID) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc 34" - ) - - self.assertEqual("(34 abc)", found) - - - def testTreeParserRewriteImaginary(self): - grammar = textwrap.dedent( - r''' - grammar T15; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP15; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T15; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType): - super().__init__(CommonToken(tokenType)) - - def toString(self): - return tokenNames[self.token.type] + "" - __str__ = toString - - - } - a : ID -> ROOT ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.assertEqual("ROOT abc", found) - - - def testTreeParserRewriteImaginaryWithArgs(self): - grammar = textwrap.dedent( - r''' - grammar T16; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP16; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T16; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType, x): - super().__init__(CommonToken(tokenType)) - self.x = x - - def toString(self): - return tokenNames[self.token.type] + ";" + str(self.x) - __str__ = toString - - } - a : ID -> ROOT[42] ID - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.assertEqual("ROOT;42 abc", found) - - - def testTreeParserRewriteImaginaryRoot(self): - grammar = textwrap.dedent( - r''' - grammar T17; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP17; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T17; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType): - super().__init__(CommonToken(tokenType)) - - def toString(self): - return tokenNames[self.token.type] + "" - __str__ = toString - - } - a : ID -> ^(ROOT ID) - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.assertEqual("(ROOT abc)", found) - - - def testTreeParserRewriteImaginaryFromReal(self): - grammar = textwrap.dedent( - r''' - grammar T18; - options { - language=Python3; - output=AST; - } - a : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP18; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T18; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def __init__(self, tokenType, tree=None): - if tree is None: - super().__init__(CommonToken(tokenType)) - else: - super().__init__(tree) - self.token.type = tokenType - - def toString(self): - return tokenNames[self.token.type]+"@"+str(self.token.line) - __str__ = toString - - } - a : ID -> ROOT[$ID] - ; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc" - ) - - self.assertEqual("ROOT@1", found) - - - def testTreeParserAutoHeteroAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ID ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN;} ; - ''') - - treeGrammar = textwrap.dedent( - r''' - tree grammar TP; - options { - language=Python3; - output=AST; - ASTLabelType=CommonTree; - tokenVocab=T; - } - tokens { ROOT; } - @header { - class V(CommonTree): - def toString(self): - return CommonTree.toString(self) + "" - __str__ = toString - - } - - a : ID ';'; - ''') - - found = self.execTreeParser( - grammar, 'a', - treeGrammar, 'a', - input="abc;" - ) - - self.assertEqual("abc ;", found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t054main.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t054main.py deleted file mode 100644 index e81d2539..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t054main.py +++ /dev/null @@ -1,309 +0,0 @@ - -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys -from io import StringIO - -class T(testbase.ANTLRTest): - def setUp(self): - self.oldPath = sys.path[:] - sys.path.insert(0, self.baseDir) - - - def tearDown(self): - sys.path = self.oldPath - - - def testOverrideMain(self): - grammar = textwrap.dedent( - r"""lexer grammar T3; - options { - language = Python3; - } - - @main { - def main(argv): - raise RuntimeError("no") - } - - ID: ('a'..'z' | '\u00c0'..'\u00ff')+; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - self.assertRaises(RuntimeError, lexerMod.main, ['lexer.py']) - - - def testLexerFromFile(self): - input = "foo bar" - inputPath = self.writeFile("input.txt", input) - - grammar = textwrap.dedent( - r"""lexer grammar T1; - options { - language = Python3; - } - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py', inputPath], - stdout=stdout - ) - - self.assertEqual(len(stdout.getvalue().splitlines()), 3) - - - def testLexerFromStdIO(self): - input = "foo bar" - - grammar = textwrap.dedent( - r"""lexer grammar T2; - options { - language = Python3; - } - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py'], - stdin=StringIO(input), - stdout=stdout - ) - - self.assertEqual(len(stdout.getvalue().splitlines()), 3) - - - def testLexerEncoding(self): - input = "föö bär" - - grammar = textwrap.dedent( - r"""lexer grammar T3; - options { - language = Python3; - } - - ID: ('a'..'z' | '\u00c0'..'\u00ff')+; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod = self.compileInlineGrammar(grammar, returnModule=True) - lexerMod.main( - ['lexer.py'], - stdin=StringIO(input), - stdout=stdout - ) - - self.assertEqual(len(stdout.getvalue().splitlines()), 3) - - - def testCombined(self): - input = "foo bar" - - grammar = textwrap.dedent( - r"""grammar T4; - options { - language = Python3; - } - - r returns [res]: (ID)+ EOF { $res = $text }; - - ID: 'a'..'z'+; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - parserMod.main( - ['combined.py', '--rule', 'r'], - stdin=StringIO(input), - stdout=stdout - ) - - stdout = stdout.getvalue() - self.assertEqual(len(stdout.splitlines()), 1, stdout) - - - def testCombinedOutputAST(self): - input = "foo + bar" - - grammar = textwrap.dedent( - r"""grammar T5; - options { - language = Python3; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN }; - """) - - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - parserMod.main( - ['combined.py', '--rule', 'r'], - stdin=StringIO(input), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.assertEqual(stdout, "(+ foo bar)") - - - def testTreeParser(self): - grammar = textwrap.dedent( - r'''grammar T6; - options { - language = Python3; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN }; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar T6Walker; - options { - language=Python3; - ASTLabelType=CommonTree; - tokenVocab=T6; - } - r returns [res]: ^(OP a=ID b=ID) - { $res = "{} {} {}".format($a.text, $OP.text, $b.text) } - ; - ''') - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True) - - stdout = StringIO() - walkerMod.main( - ['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'], - stdin=StringIO("a+b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.assertEqual(stdout, "'a + b'") - - - def testTreeParserRewrite(self): - grammar = textwrap.dedent( - r'''grammar T7; - options { - language = Python3; - output = AST; - } - - r: ID OP^ ID EOF!; - - ID: 'a'..'z'+; - OP: '+'; - WS: ' '+ { $channel = HIDDEN }; - ''') - - treeGrammar = textwrap.dedent( - r'''tree grammar T7Walker; - options { - language=Python3; - ASTLabelType=CommonTree; - tokenVocab=T7; - output=AST; - } - tokens { - ARG; - } - r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID)); - ''') - - lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True) - walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True) - - stdout = StringIO() - walkerMod.main( - ['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'], - stdin=StringIO("a+b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.assertEqual(stdout, "(+ (ARG a) (ARG b))") - - - - def testGrammarImport(self): - slave = textwrap.dedent( - r''' - parser grammar T8S; - options { - language=Python3; - } - - a : B; - ''') - - parserName = self.writeInlineGrammar(slave)[0] - # slave parsers are imported as normal python modules - # to force reloading current version, purge module from sys.modules - if parserName + 'Parser' in sys.modules: - del sys.modules[parserName+'Parser'] - - master = textwrap.dedent( - r''' - grammar T8M; - options { - language=Python3; - } - import T8S; - s returns [res]: a { $res = $a.text }; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') {self.skip()} ; - ''') - - stdout = StringIO() - - lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True) - parserMod.main( - ['import.py', '--rule', 's'], - stdin=StringIO("b"), - stdout=stdout - ) - - stdout = stdout.getvalue().strip() - self.assertEqual(stdout, "'b'") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t057autoAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t057autoAST.py deleted file mode 100644 index 63ce05a9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t057autoAST.py +++ /dev/null @@ -1,1005 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class TestAutoAST(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input, expectErrors=False): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if not expectErrors: - self.assertEqual(len(parser._errors), 0, parser._errors) - - result = "" - - if r: - if hasattr(r, 'result'): - result += r.result - - if r.tree: - result += r.tree.toStringTree() - - if not expectErrors: - return result - - else: - return result, parser._errors - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r: - return r.tree.toStringTree() - - return "" - - - def testTokenList(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN}; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("abc 34", found); - - - def testTokenListInSingleAltBlock(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : (ID INT) ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar,"a", "abc 34") - self.assertEqual("abc 34", found) - - - def testSimpleRootAtOuterLevel(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("(abc 34)", found) - - - def testSimpleRootAtOuterLevelReverse(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : INT ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34 abc") - self.assertEqual("(abc 34)", found) - - - def testBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT! ID! INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34 dag 4532") - self.assertEqual("abc 4532", found) - - - def testOptionalThenRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ( ID INT )? ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 1 b") - self.assertEqual("(b a 1)", found) - - - def testLabeledStringRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void'^ ID ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("(void foo ;)", found) - - - def testWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void'^ . ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("(void foo ;)", found) - - - def testWildcardRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void' .^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("(foo void ;)", found) - - - def testWildcardRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void' x=.^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("(foo void ;)", found) - - - def testWildcardRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void' x=.^ ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("(foo void ;)", found) - - - def testWildcardBangWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : v='void' x=.! ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void foo;") - self.assertEqual("void ;", found) - - - def testRootRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID^ INT^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 34 c") - self.assertEqual("(34 a c)", found) - - - def testRootRoot2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT^ ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 34 c") - self.assertEqual("(c (34 a))", found) - - - def testRootThenRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID^ (INT '*'^ ID)+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 34 * b 9 * c") - self.assertEqual("(* (* (a 34) b 9) c)", found) - - - def testNestedSubrule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'void' (({pass}ID|INT) ID | 'null' ) ';' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void a b;") - self.assertEqual("void a b ;", found) - - - def testInvokeRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : type ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEqual("int a", found) - - - def testInvokeRuleAsRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEqual("(int a)", found) - - - def testInvokeRuleAsRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x=type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEqual("(int a)", found) - - - def testInvokeRuleAsRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x+=type^ ID ; - type : {pass}'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a") - self.assertEqual("(int a)", found) - - - def testRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID ('+'^ ID)* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a+b+c+d") - self.assertEqual("(+ (+ (+ a b) c) d)", found) - - - def testRuleInvocationRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID (op^ ID)* ; - op : {pass}'+' | '-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a+b+c-d") - self.assertEqual("(- (+ (+ a b) c) d)", found) - - - def testTailRecursion(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - s : a ; - a : atom ('exp'^ a)? ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "s", "3 exp 4 exp 5") - self.assertEqual("(exp 3 (exp 4 5))", found) - - - def testSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID|INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testSetRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ('+' | '-')^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "+abc") - self.assertEqual("(+ abc)", found) - - - @testbase.broken( - "FAILS until antlr.g rebuilt in v3", testbase.GrammarCompileError) - def testSetRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x=('+' | '-')^ ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "+abc") - self.assertEqual("(+ abc)", found) - - - def testSetAsRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID (('+'|'-')^ ID)* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a+b-c") - self.assertEqual("(- (+ a b) c)", found) - - - def testNotSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEqual("34 + 2", found) - - - def testNotSetWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x=~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEqual("34 + 2", found) - - - def testNotSetWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x=~ID '+' INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34+2") - self.assertEqual("34 + 2", found) - - - def testNotSetRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEqual("(34 55)", found) - - - def testNotSetRootWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEqual("(34 55)", found) - - - def testNotSetRootWithListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ~'+'^ INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "34 55") - self.assertEqual("(34 55)", found) - - - def testNotSetRuleRootInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : INT (~INT^ INT)* ; - blort : '+' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "3+4+5") - self.assertEqual("(+ (+ 3 4) 5)", found) - - - @testbase.broken("FIXME: What happened to the semicolon?", AssertionError) - def testTokenLabelReuse(self): - # check for compilation problem due to multiple defines - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result] : id=ID id=ID {$result = "2nd id="+$id.text+";"} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("2nd id=b;a b", found) - - - def testTokenLabelReuse2(self): - # check for compilation problem due to multiple defines - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result]: id=ID id=ID^ {$result = "2nd id="+$id.text+','} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("2nd id=b,(b a)", found) - - - def testTokenListLabelReuse(self): - # check for compilation problem due to multiple defines - # make sure ids has both ID tokens - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result] : ids+=ID ids+=ID {$result = "id list=[{}],".format(",".join([t.text for t in $ids]))} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - expecting = "id list=[a,b],a b" - self.assertEqual(expecting, found) - - - def testTokenListLabelReuse2(self): - # check for compilation problem due to multiple defines - # make sure ids has both ID tokens - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result] : ids+=ID^ ids+=ID {$result = "id list=[{}],".format(",".join([t.text for t in $ids]))} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - expecting = "id list=[a,b],(a b)" - self.assertEqual(expecting, found) - - - def testTokenListLabelRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : id+=ID^ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a", found) - - - def testTokenListLabelBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : id+=ID! ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("", found) - - - def testRuleListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result]: x+=b x+=b { - t=$x[1] - $result = "2nd x="+t.toStringTree()+','; - }; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("2nd x=b,a b", found) - - - def testRuleListLabelRuleRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result] : ( x+=b^ )+ { - $result = "x="+$x[1].toStringTree()+','; - } ; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("x=(b a),(b a)", found) - - - def testRuleListLabelBang(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a returns [result] : x+=b! x+=b { - $result = "1st x="+$x[0].toStringTree()+','; - } ; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("1st x=a,b", found) - - - def testComplicatedMelange(self): - # check for compilation problem - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : A b=B b=B c+=C c+=C D {s = $D.text} ; - A : 'a' ; - B : 'b' ; - C : 'c' ; - D : 'd' ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b b c c d") - self.assertEqual("a b b c c d", found) - - - def testReturnValueWithAST(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a returns [result] : ID b { $result = str($b.i) + '\n';} ; - b returns [i] : INT {$i=int($INT.text);} ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("34\nabc 34", found) - - - def testSetLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { language=Python3;output=AST; } - r : (INT|ID)+ ; - ID : 'a'..'z' + ; - INT : '0'..'9' +; - WS: (' ' | '\n' | '\\t')+ {$channel = HIDDEN}; - ''') - - found = self.execParser(grammar, "r", "abc 34 d") - self.assertEqual("abc 34 d", found) - - - def testExtraTokenInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int 34 x=1;", - expectErrors=True) - self.assertEqual(["line 1:4 extraneous input '34' expecting ID"], - errors) - self.assertEqual("(int x 1)", found) # tree gets correct x and 1 tokens - - - def testMissingIDInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - tokens {EXPR;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int =1;", - expectErrors=True) - self.assertEqual(["line 1:4 missing ID at '='"], errors) - self.assertEqual("(int 1)", found) # tree gets invented ID token - - - def testMissingSetInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - tokens {EXPR;} - decl : type^ ID '='! INT ';'! ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "x=1;", - expectErrors=True) - self.assertEqual(["line 1:0 mismatched input 'x' expecting set None"], errors) - self.assertEqual("( x 1)", found) # tree gets invented ID token - - - def testMissingTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID INT ; // follow is EOF - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", expectErrors=True) - self.assertEqual(["line 1:3 missing INT at ''"], errors) - self.assertEqual("abc ", found) - - - def testMissingTokenGivesErrorNodeInInvokedRule(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b ; - b : ID INT ; // follow should see EOF - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", expectErrors=True) - self.assertEqual(["line 1:3 mismatched input '' expecting INT"], errors) - self.assertEqual(", resync=abc>", found) - - - def testExtraTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc ick 34", - expectErrors=True) - self.assertEqual(["line 1:4 extraneous input 'ick' expecting INT"], - errors) - self.assertEqual("abc 34", found) - - - def testMissingFirstTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - self.assertEqual(["line 1:0 missing ID at '34'"], errors) - self.assertEqual(" 34", found) - - - def testMissingFirstTokenGivesErrorNode2(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEqual(["line 1:0 missing ID at '34'"], errors) - self.assertEqual(" 34", found) - - - def testNoViableAltGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b | c ; - b : ID ; - c : INT ; - ID : 'a'..'z'+ ; - S : '*' ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "*", expectErrors=True) - self.assertEqual(["line 1:0 no viable alternative at input '*'"], - errors) - self.assertEqual(",1:0], resync=*>", - found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t058rewriteAST.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t058rewriteAST.py deleted file mode 100644 index bb59b507..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t058rewriteAST.py +++ /dev/null @@ -1,1505 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import testbase -import sys - -class TestRewriteAST(testbase.ANTLRTest): - def parserClass(self, base): - class TParser(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._errors = [] - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def emitErrorMessage(self, msg): - self._errors.append(msg) - - - return TParser - - - def lexerClass(self, base): - class TLexer(base): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._output = "" - - - def capture(self, t): - self._output += t - - - def traceIn(self, ruleName, ruleIndex): - self.traces.append('>'+ruleName) - - - def traceOut(self, ruleName, ruleIndex): - self.traces.append('<'+ruleName) - - - def recover(self, input, re): - # no error recovery yet, just crash! - raise - - return TLexer - - - def execParser(self, grammar, grammarEntry, input, expectErrors=False): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - - if not expectErrors: - self.assertEqual(len(parser._errors), 0, parser._errors) - - result = "" - - if r: - if hasattr(r, 'result'): - result += r.result - - if r.tree: - result += r.tree.toStringTree() - - if not expectErrors: - return result - - else: - return result, parser._errors - - - def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): - lexerCls, parserCls = self.compileInlineGrammar(grammar) - walkerCls = self.compileInlineGrammar(treeGrammar) - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream) - r = getattr(parser, grammarEntry)() - nodes = antlr3.tree.CommonTreeNodeStream(r.tree) - nodes.setTokenStream(tStream) - walker = walkerCls(nodes) - r = getattr(walker, treeEntry)() - - if r: - return r.tree.toStringTree() - - return "" - - - def testDelete(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("", found) - - - def testSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testSingleTokenToNewNode(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> ID["x"]; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("x", found) - - - def testSingleTokenToNewNodeRoot(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> ^(ID["x"] INT); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("(x INT)", found) - - - def testSingleTokenToNewNode2(self): - # Allow creation of new nodes w/o args. - grammar = textwrap.dedent( - r''' - grammar TT; - options {language=Python3;output=AST;} - a : ID -> ID[ ]; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("ID", found) - - - def testSingleCharLiteral(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'c' -> 'c'; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "c") - self.assertEqual("c", found) - - - def testSingleStringLiteral(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'ick' -> 'ick'; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "ick") - self.assertEqual("ick", found) - - - def testSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : b -> b; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testReorderTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> INT ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("34 abc", found) - - - def testReorderTokenAndRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : b INT -> INT b; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("34 abc", found) - - - def testTokenTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> ^(INT ID); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("(34 abc)", found) - - - def testTokenTreeAfterOtherStuff(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'void' ID INT -> 'void' ^(INT ID); - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "void abc 34") - self.assertEqual("void (34 abc)", found) - - - def testNestedTokenTreeWithOuterLoop(self): - # verify that ID and INT both iterate over outer index variable - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {DUH;} - a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 1 b 2") - self.assertEqual("(DUH a (DUH 1)) (DUH b (DUH 2))", found) - - - def testOptionalSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> ID? ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testClosureSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID ID -> ID* ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testPositiveClosureSingleToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID ID -> ID+ ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptionalSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : b -> b?; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testClosureSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : b b -> b*; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testClosureOfLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x+=b x+=b -> $x*; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptionalLabelNoListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : (x=ID)? -> $x?; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a", found) - - - def testPositiveClosureSingleRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : b b -> b+; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testSinglePredicateT(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> {True}? ID -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("abc", found) - - - def testSinglePredicateF(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID -> {False}? ID -> ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc") - self.assertEqual("", found) - - - def testMultiplePredicate(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> {False}? ID - -> {True}? INT - -> - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 2") - self.assertEqual("2", found) - - - def testMultiplePredicateTrees(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID INT -> {False}? ^(ID INT) - -> {True}? ^(INT ID) - -> ID - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 2") - self.assertEqual("(2 a)", found) - - - def testSimpleTree(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : op INT -> ^(op INT); - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "-34") - self.assertEqual("(- 34)", found) - - - def testSimpleTree2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : op INT -> ^(INT op); - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "+ 34") - self.assertEqual("(34 +)", found) - - - - def testNestedTrees(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "var a:int; b:float;") - self.assertEqual("(var (: a int) (: b float))", found) - - - def testImaginaryTokenCopy(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {VAR;} - a : ID (',' ID)*-> ^(VAR ID)+ ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c") - self.assertEqual("(VAR a) (VAR b) (VAR c)", found) - - - def testTokenUnreferencedOnLeftButDefined(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {VAR;} - a : b -> ID ; - b : ID ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("ID", found) - - - def testImaginaryTokenCopySetText(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {VAR;} - a : ID (',' ID)*-> ^(VAR["var"] ID)+ ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c") - self.assertEqual("(var a) (var b) (var c)", found) - - - def testImaginaryTokenNoCopyFromToken(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "{a b c}") - self.assertEqual("({ a b c)", found) - - - def testImaginaryTokenNoCopyFromTokenSetText(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : lc='{' ID+ '}' -> ^(BLOCK[$lc,"block"] ID+) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "{a b c}") - self.assertEqual("(block a b c)", found) - - - def testMixedRewriteAndAutoAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : b b^ ; // 2nd b matches only an INT; can make it root - b : ID INT -> INT ID - | INT - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 1 2") - self.assertEqual("(2 1 a)", found) - - - def testSubruleWithRewrite(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : b b ; - b : (ID INT -> INT ID | INT INT -> INT+ ) - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a 1 2 3") - self.assertEqual("1 a 2 3", found) - - - def testSubruleWithRewrite2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {TYPE;} - a : b b ; - b : 'int' - ( ID -> ^(TYPE 'int' ID) - | ID '=' INT -> ^(TYPE 'int' ID INT) - ) - ';' - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a; int b=3;") - self.assertEqual("(TYPE int a) (TYPE int b 3)", found) - - - def testNestedRewriteShutsOffAutoAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : b b ; - b : ID ( ID (last=ID -> $last)+ ) ';' // get last ID - | INT // should still get auto AST construction - ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b c d; 42") - self.assertEqual("d 42", found) - - - def testRewriteActions(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : atom -> ^({self.adaptor.create(INT,"9")} atom) ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEqual("(9 3)", found) - - - def testRewriteActions2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : atom -> {self.adaptor.create(INT,"9")} atom ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEqual("9 3", found) - - - def testRefToOldValue(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ; - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "3+4+5") - self.assertEqual("(+ (+ 3 4) 5)", found) - - - def testCopySemanticsForRules(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom) - atom : INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "3") - self.assertEqual("(3 3)", found) - - - def testCopySemanticsForRules2(self): - # copy type as a root for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : type ID (',' ID)* ';' -> ^(type ID)+ ; - type : 'int' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEqual("(int a) (int b) (int c)", found) - - - def testCopySemanticsForRules3(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEqual("(int public a) (int public b) (int public c)", found) - - - def testCopySemanticsForRules3Double(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEqual("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)", found) - - - def testCopySemanticsForRules4(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {MOD;} - a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ; - type : 'int' ; - modifier : 'public' ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "public int a,b,c;") - self.assertEqual("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)", found) - - - def testCopySemanticsLists(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {MOD;} - a : ID (',' ID)* ';' -> ID+ ID+ ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a,b,c;") - self.assertEqual("a b c a b c", found) - - - def testCopyRuleLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=b -> $x $x; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a a", found) - - - def testCopyRuleLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=b -> ^($x $x); - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("(a a)", found) - - - def testQueueingOfTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEqual("(int a b c)", found) - - - def testCopyOfTokens(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'int' ID ';' -> 'int' ID 'int' ID ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a;") - self.assertEqual("int a int a", found) - - - def testTokenCopyInLoop(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a,b,c;") - self.assertEqual("(int a) (int b) (int c)", found) - - - def testTokenCopyInLoopAgainstTwoOthers(self): - # must smear 'int' copies across as root of multiple trees - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "int a:1,b:2,c:3;") - self.assertEqual("(int a 1) (int b 2) (int c 3)", found) - - - def testListRefdOneAtATime(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID+ -> ID ID ID ; // works if 3 input IDs - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b c") - self.assertEqual("a b c", found) - - - def testSplitListWithLabels(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {VAR;} - a : first=ID others+=ID* -> $first VAR $others+ ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b c") - self.assertEqual("a VAR b c", found) - - - def testComplicatedMelange(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : A A b=B B b=B c+=C C c+=C D {s=$D.text} -> A+ B+ C+ D ; - type : 'int' | 'float' ; - A : 'a' ; - B : 'b' ; - C : 'c' ; - D : 'd' ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a a b b b c c c d") - self.assertEqual("a a b b b c c c d", found) - - - def testRuleLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=b -> $x; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a", found) - - - def testAmbiguousRule(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID a -> a | INT ; - ID : 'a'..'z'+ ; - INT: '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, - "a", "abc 34") - self.assertEqual("34", found) - - - def testRuleListLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> $x+; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testRuleListLabel2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> $x $x*; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptional(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=b (y=b)? -> $x $y?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a", found) - - - def testOptional2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=ID (y=b)? -> $x $y?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptional3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x=ID (y=b)? -> ($x $y)?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptional4(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x+=ID (y=b)? -> ($x $y)?; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("a b", found) - - - def testOptional5(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : ID -> ID? ; // match an ID to optional ID - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a") - self.assertEqual("a", found) - - - def testArbitraryExprType(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : x+=b x+=b -> {CommonTree(None)}; - b : ID ; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "a b") - self.assertEqual("", found) - - - def testSet(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a: (INT|ID)+ -> INT+ ID+ ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "2 a 34 de") - self.assertEqual("2 34 a de", found) - - - def testSet2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a: (INT|ID) -> INT? ID? ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "2") - self.assertEqual("2", found) - - - @testbase.broken("http://www.antlr.org:8888/browse/ANTLR-162", - antlr3.tree.RewriteEmptyStreamException) - def testSetWithLabel(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : x=(INT|ID) -> $x ; - INT: '0'..'9'+; - ID : 'a'..'z'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "2") - self.assertEqual("2", found) - - - def testRewriteAction(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens { FLOAT; } - r - : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text+".0"))} - ; - INT : '0'..'9'+; - WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN}; - ''') - - found = self.execParser(grammar, "r", "25") - self.assertEqual("25.0", found) - - - def testOptionalSubruleWithoutRealElements(self): - # copy type *and* modifier even though it's optional - # for each invocation of (...)+ in rewrite - grammar = textwrap.dedent( - r""" - grammar T; - options {language=Python3;output=AST;} - tokens {PARMS;} - - modulo - : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?) - ; - parms : '#'|ID; - ID : ('a'..'z' | 'A'..'Z')+; - WS : (' '|'\n') {$channel=HIDDEN} ; - """) - - found = self.execParser(grammar, "modulo", "modulo abc (x y #)") - self.assertEqual("(modulo abc (PARMS x y #))", found) - - - ## C A R D I N A L I T Y I S S U E S - - def testCardinality(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - tokens {BLOCK;} - a : ID ID INT INT INT -> (ID INT)+; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - self.assertRaises(antlr3.tree.RewriteCardinalityException, - self.execParser, grammar, "a", "a b 3 4 5") - - - def testCardinality2(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID+ -> ID ID ID ; // only 2 input IDs - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - self.assertRaises(antlr3.tree.RewriteCardinalityException, - self.execParser, grammar, "a", "a b") - - - def testCardinality3(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID? INT -> ID INT ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - self.assertRaises(antlr3.tree.RewriteEmptyStreamException, - self.execParser, grammar, "a", "3") - - - def testLoopCardinality(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID? INT -> ID+ INT ; - op : '+'|'-' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - self.assertRaises(antlr3.tree.RewriteEarlyExitException, - self.execParser, grammar, "a", "3") - - - def testWildcard(self): - grammar = textwrap.dedent( - r''' - grammar T; - options {language=Python3;output=AST;} - a : ID c=. -> $c; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found = self.execParser(grammar, "a", "abc 34") - self.assertEqual("34", found) - - - # E R R O R S - - def testExtraTokenInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int 34 x=1;", - expectErrors=True) - self.assertEqual(["line 1:4 extraneous input '34' expecting ID"], - errors) - self.assertEqual("(EXPR int x 1)", found) # tree gets correct x and 1 tokens - - - #@testbase.broken("FIXME", AssertionError) - def testMissingIDInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "int =1;", - expectErrors=True) - self.assertEqual(["line 1:4 missing ID at '='"], errors) - self.assertEqual("(EXPR int 1)", found) # tree gets invented ID token - - - def testMissingSetInSimpleDecl(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - tokens {EXPR;} - decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ; - type : 'int' | 'float' ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "decl", "x=1;", - expectErrors=True) - self.assertEqual(["line 1:0 mismatched input 'x' expecting set None"], - errors); - self.assertEqual("(EXPR x 1)", found) # tree gets invented ID token - - - def testMissingTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID INT -> ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc", - expectErrors=True) - self.assertEqual(["line 1:3 missing INT at ''"], errors) - # doesn't do in-line recovery for sets (yet?) - self.assertEqual("abc ", found) - - - def testExtraTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b c -> b c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "abc ick 34", - expectErrors=True) - self.assertEqual(["line 1:4 extraneous input 'ick' expecting INT"], - errors) - self.assertEqual("abc 34", found) - - - #@testbase.broken("FIXME", AssertionError) - def testMissingFirstTokenGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : ID INT -> ID INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - self.assertEqual(["line 1:0 missing ID at '34'"], errors) - self.assertEqual(" 34", found) - - - #@testbase.broken("FIXME", AssertionError) - def testMissingFirstTokenGivesErrorNode2(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b c -> b c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "34", expectErrors=True) - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEqual(["line 1:0 missing ID at '34'"], errors) - self.assertEqual(" 34", found) - - - def testNoViableAltGivesErrorNode(self): - grammar = textwrap.dedent( - r''' - grammar foo; - options {language=Python3;output=AST;} - a : b -> b | c -> c; - b : ID -> ID ; - c : INT -> INT ; - ID : 'a'..'z'+ ; - S : '*' ; - INT : '0'..'9'+; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - found, errors = self.execParser(grammar, "a", "*", expectErrors=True) - # finds an error at the first token, 34, and re-syncs. - # re-synchronizing does not consume a token because 34 follows - # ref to rule b (start of c). It then matches 34 in c. - self.assertEqual(["line 1:0 no viable alternative at input '*'"], - errors); - self.assertEqual(",1:0], resync=*>", - found) - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t059debug.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t059debug.py deleted file mode 100644 index 8e129f73..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t059debug.py +++ /dev/null @@ -1,787 +0,0 @@ -import unittest -import textwrap -import antlr3 -import antlr3.tree -import antlr3.debug -import testbase -import sys -import threading -import socket -import errno -import time - -class Debugger(threading.Thread): - def __init__(self, port): - super().__init__() - self.events = [] - self.success = False - self.port = port - - def run(self): - # create listening socket - s = None - tstart = time.time() - while time.time() - tstart < 10: - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect(('127.0.0.1', self.port)) - break - except socket.error as exc: - if s: - s.close() - if exc.args[0] != errno.ECONNREFUSED: - raise - time.sleep(0.1) - - if s is None: - self.events.append(['nosocket']) - return - - s.setblocking(1) - s.settimeout(10.0) - - output = s.makefile('w', 1) - input = s.makefile('r', 1) - - try: - # handshake - l = input.readline().strip() - assert l == 'ANTLR 2' - l = input.readline().strip() - assert l.startswith('grammar "'), l - - output.write('ACK\n') - output.flush() - - while True: - event = input.readline().strip() - self.events.append(event.split('\t')) - - output.write('ACK\n') - output.flush() - - if event == 'terminate': - self.success = True - break - - except socket.timeout: - self.events.append(['timeout']) - except socket.error as exc: - self.events.append(['socketerror', exc.args]) - finally: - output.close() - input.close() - s.close() - - -class T(testbase.ANTLRTest): - def execParser(self, grammar, grammarEntry, input, listener, - parser_args={}): - if listener is None: - port = 49100 - debugger = Debugger(port) - debugger.start() - # TODO(pink): install alarm, so it doesn't hang forever in case of a bug - - else: - port = None - - try: - lexerCls, parserCls = self.compileInlineGrammar( - grammar, options='-debug') - - cStream = antlr3.StringStream(input) - lexer = lexerCls(cStream) - tStream = antlr3.CommonTokenStream(lexer) - parser = parserCls(tStream, dbg=listener, port=port, **parser_args) - getattr(parser, grammarEntry)() - - finally: - if listener is None: - debugger.join() - return debugger - - def testBasicParser(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - listener = antlr3.debug.RecordDebugEventListener() - - self.execParser( - grammar, 'a', - input="a", - listener=listener) - - # We only check that some LT events are present. How many is subject - # to change (at the time of writing there are two, which is one too - # many). - lt_events = [event for event in listener.events - if event.startswith("LT ")] - self.assertNotEqual(lt_events, []) - - # For the rest, filter out LT events to get a reliable test. - expected = ["enterRule a", - "location 6:1", - "location 6:5", - "location 6:8", - "location 6:11", - "exitRule a"] - found = [event for event in listener.events - if not event.startswith("LT ")] - self.assertListEqual(found, expected) - - def testSocketProxy(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '8'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['consumeToken', '-1', '-1', '0', '1', '1', '"'], - ['location', '6', '11'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - def testRecognitionException(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a b", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '5', '99', '1', '1', '"'], - ['location', '6', '8'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '2', '-1', '-1', '0', '1', '3', '"'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['LT', '1', '2', '4', '0', '1', '2', '"b'], - ['beginResync'], - ['consumeToken', '2', '4', '0', '1', '2', '"b'], - ['endResync'], - ['exception', 'UnwantedTokenException', '2', '1', '2'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['consumeToken', '-1', '-1', '0', '1', '3', '"'], - ['location', '6', '11'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testSemPred(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : {True}? ID EOF; - ID : 'a'..'z'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['semanticPredicate', '1', 'True'], - ['location', '6', '13'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '16'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['consumeToken', '-1', '-1', '0', '1', '1', '"'], - ['location', '6', '19'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testPositiveClosureBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID ( ID | INT )+ EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1 b c 3", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['consumeToken', '4', '4', '0', '1', '4', '"b'], - ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['consumeToken', '6', '4', '0', '1', '6', '"c'], - ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['consumeToken', '8', '5', '0', '1', '8', '"3'], - ['enterDecision', '1', '0'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['location', '6', '22'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['consumeToken', '-1', '-1', '0', '1', '9', '"'], - ['location', '6', '25'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testClosureBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID ( ID | INT )* EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1 b c 3", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '4', '4', '0', '1', '4', '"b'], - ['consumeToken', '4', '4', '0', '1', '4', '"b'], - ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '6', '4', '0', '1', '6', '"c'], - ['consumeToken', '6', '4', '0', '1', '6', '"c'], - ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'], - ['enterDecision', '1', '0'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['exitDecision', '1'], - ['enterAlt', '1'], - ['location', '6', '8'], - ['LT', '1', '8', '5', '0', '1', '8', '"3'], - ['consumeToken', '8', '5', '0', '1', '8', '"3'], - ['enterDecision', '1', '0'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['location', '6', '22'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['LT', '1', '-1', '-1', '0', '1', '9', '"'], - ['consumeToken', '-1', '-1', '0', '1', '9', '"'], - ['location', '6', '25'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testMismatchedSetException(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID ( ID | INT ) EOF; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '6', '8'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['exception', 'MismatchedSetException', '1', '1', '1'], - ['exception', 'MismatchedSetException', '1', '1', '1'], - ['beginResync'], - ['LT', '1', '-1', '-1', '0', '1', '1', '"'], - ['endResync'], - ['location', '6', '24'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID ( b | c ) EOF; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a 1", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '14'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['LT', '1', '2', '5', '0', '1', '2', '"1'], - ['consumeToken', '2', '5', '0', '1', '2', '"1'], - ['location', '8', '8'], - ['exitRule', 'T.g', 'c'], - ['exitSubRule', '1'], - ['location', '6', '18'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['consumeToken', '-1', '-1', '0', '1', '3', '"'], - ['location', '6', '21'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testNoViableAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ID ( b | c ) EOF; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!' ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a !", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['consumeHiddenToken', '1', '7', '99', '1', '1', '"'], - ['location', '6', '8'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['exception', 'NoViableAltException', '2', '1', '2'], - ['exitDecision', '1'], - ['exitSubRule', '1'], - ['exception', 'NoViableAltException', '2', '1', '2'], - ['beginResync'], - ['LT', '1', '2', '4', '0', '1', '2', '"!'], - ['consumeToken', '2', '4', '0', '1', '2', '"!'], - ['LT', '1', '-1', '-1', '0', '1', '3', '"'], - ['endResync'], - ['location', '6', '21'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testRuleBlock(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : b | c; - b : ID; - c : INT; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="1", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterDecision', '1', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '9'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['LT', '1', '0', '5', '0', '1', '0', '"1'], - ['consumeToken', '0', '5', '0', '1', '0', '"1'], - ['location', '8', '8'], - ['exitRule', 'T.g', 'c'], - ['location', '6', '10'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testRuleBlockSingleAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : b; - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterRule', 'T.g', 'b'], - ['location', '7', '1'], - ['enterAlt', '1'], - ['location', '7', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '7', '7'], - ['exitRule', 'T.g', 'b'], - ['location', '6', '6'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBlockSingleAlt(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ( b ); - b : ID; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterAlt', '1'], - ['location', '6', '7'], - ['enterRule', 'T.g', 'b'], - ['location', '7', '1'], - ['enterAlt', '1'], - ['location', '7', '5'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['LT', '1', '0', '4', '0', '1', '0', '"a'], - ['consumeToken', '0', '4', '0', '1', '0', '"a'], - ['location', '7', '7'], - ['exitRule', 'T.g', 'b'], - ['location', '6', '10'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testDFA(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - } - a : ( b | c ) EOF; - b : ID* INT; - c : ID+ BANG; - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!'; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - debugger = self.execParser( - grammar, 'a', - input="a!", - listener=None) - - self.assertTrue(debugger.success) - expected = [['enterRule', 'T.g', 'a'], - ['location', '6', '1'], - ['enterAlt', '1'], - ['location', '6', '5'], - ['enterSubRule', '1'], - ['enterDecision', '1', '0'], - ['mark', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['consumeToken', '1', '4', '0', '1', '1', '"!'], - ['rewind', '0'], - ['exitDecision', '1'], - ['enterAlt', '2'], - ['location', '6', '11'], - ['enterRule', 'T.g', 'c'], - ['location', '8', '1'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['enterSubRule', '3'], - ['enterDecision', '3', '0'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['exitDecision', '3'], - ['enterAlt', '1'], - ['location', '8', '5'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['LT', '1', '0', '5', '0', '1', '0', '"a'], - ['consumeToken', '0', '5', '0', '1', '0', '"a'], - ['enterDecision', '3', '0'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['exitDecision', '3'], - ['exitSubRule', '3'], - ['location', '8', '9'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['LT', '1', '1', '4', '0', '1', '1', '"!'], - ['consumeToken', '1', '4', '0', '1', '1', '"!'], - ['location', '8', '13'], - ['exitRule', 'T.g', 'c'], - ['exitSubRule', '1'], - ['location', '6', '15'], - ['LT', '1', '-1', '-1', '0', '1', '2', '"'], - ['LT', '1', '-1', '-1', '0', '1', '2', '"'], - ['consumeToken', '-1', '-1', '0', '1', '2', '"'], - ['location', '6', '18'], - ['exitRule', 'T.g', 'a'], - ['terminate']] - - self.assertListEqual(debugger.events, expected) - - - def testBasicAST(self): - grammar = textwrap.dedent( - r''' - grammar T; - options { - language=Python3; - output=AST; - } - a : ( b | c ) EOF!; - b : ID* INT -> ^(INT ID*); - c : ID+ BANG -> ^(BANG ID+); - ID : 'a'..'z'+ ; - INT : '0'..'9'+ ; - BANG : '!'; - WS : (' '|'\n') {$channel=HIDDEN} ; - ''') - - listener = antlr3.debug.RecordDebugEventListener() - - self.execParser( - grammar, 'a', - input="a!", - listener=listener) - - # don't check output for now (too dynamic), I'm satisfied if it - # doesn't crash - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t060leftrecursion.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t060leftrecursion.py deleted file mode 100644 index 05b5bc00..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t060leftrecursion.py +++ /dev/null @@ -1,468 +0,0 @@ -import unittest -import re -import textwrap -import antlr3 -import testbase - - -# Left-recursion resolution is not yet enabled in the tool. - -# class TestLeftRecursion(testbase.ANTLRTest): -# def parserClass(self, base): -# class TParser(base): -# def __init__(self, *args, **kwargs): -# super().__init__(*args, **kwargs) - -# self._output = "" - - -# def capture(self, t): -# self._output += str(t) - - -# def recover(self, input, re): -# # no error recovery yet, just crash! -# raise - -# return TParser - - -# def execParser(self, grammar, grammarEntry, input): -# lexerCls, parserCls = self.compileInlineGrammar(grammar) - -# cStream = antlr3.StringStream(input) -# lexer = lexerCls(cStream) -# tStream = antlr3.CommonTokenStream(lexer) -# parser = parserCls(tStream) -# getattr(parser, grammarEntry)() -# return parser._output - - -# def runTests(self, grammar, tests, grammarEntry): -# lexerCls, parserCls = self.compileInlineGrammar(grammar) - -# build_ast = re.search(r'output\s*=\s*AST', grammar) - -# for input, expecting in tests: -# cStream = antlr3.StringStream(input) -# lexer = lexerCls(cStream) -# tStream = antlr3.CommonTokenStream(lexer) -# parser = parserCls(tStream) -# r = getattr(parser, grammarEntry)() -# found = parser._output -# if build_ast: -# found += r.tree.toStringTree() - -# self.assertEqual( -# expecting, found, -# "{!r} != {!r} (for input {!r})".format(expecting, found, input)) - - -# def testSimple(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# } -# s : a { self.capture($a.text) } ; -# a : a ID -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# found = self.execParser(grammar, 's', 'a b c') -# expecting = "abc" -# self.assertEqual(expecting, found) - - -# def testSemPred(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# } -# s : a { self.capture($a.text) } ; -# a : a {True}? ID -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# found = self.execParser(grammar, "s", "a b c") -# expecting = "abc" -# self.assertEqual(expecting, found) - -# def testTernaryExpr(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# e : e '*'^ e -# | e '+'^ e -# | e '?'^ e ':'! e -# | e '='^ e -# | ID -# ; -# ID : 'a'..'z'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("a+b", "(+ a b)"), -# ("a*b", "(* a b)"), -# ("a?b:c", "(? a b c)"), -# ("a=b=c", "(= a (= b c))"), -# ("a?b+c:d", "(? a (+ b c) d)"), -# ("a?b=c:d", "(? a (= b c) d)"), -# ("a? b?c:d : e", "(? a (? b c d) e)"), -# ("a?b: c?d:e", "(? a b (? c d e))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testDeclarationsUsingASTOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# declarator -# : declarator '['^ e ']'! -# | declarator '['^ ']'! -# | declarator '('^ ')'! -# | '*'^ declarator // binds less tight than suffixes -# | '('! declarator ')'! -# | ID -# ; -# e : INT ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("*a", "(* a)"), -# ("**a", "(* (* a))"), -# ("a[3]", "([ a 3)"), -# ("b[]", "([ b)"), -# ("(a)", "a"), -# ("a[]()", "(( ([ a))"), -# ("a[][]", "([ ([ a))"), -# ("*a[]", "(* ([ a))"), -# ("(*a)[]", "([ (* a))"), -# ] -# self.runTests(grammar, tests, "declarator") - - -# def testDeclarationsUsingRewriteOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# declarator -# : declarator '[' e ']' -> ^('[' declarator e) -# | declarator '[' ']' -> ^('[' declarator) -# | declarator '(' ')' -> ^('(' declarator) -# | '*' declarator -> ^('*' declarator) // binds less tight than suffixes -# | '(' declarator ')' -> declarator -# | ID -> ID -# ; -# e : INT ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("*a", "(* a)"), -# ("**a", "(* (* a))"), -# ("a[3]", "([ a 3)"), -# ("b[]", "([ b)"), -# ("(a)", "a"), -# ("a[]()", "(( ([ a))"), -# ("a[][]", "([ ([ a))"), -# ("*a[]", "(* ([ a))"), -# ("(*a)[]", "([ (* a))"), -# ] -# self.runTests(grammar, tests, "declarator") - - -# def testExpressionsUsingASTOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# e : e '.'^ ID -# | e '.'^ 'this' -# | '-'^ e -# | e '*'^ e -# | e ('+'^|'-'^) e -# | INT -# | ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a.this", "(. a this)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ] -# self.runTests(grammar, tests, "e") - - -# @testbase.broken( -# "Grammar compilation returns errors", testbase.GrammarCompileError) -# def testExpressionsUsingRewriteOperators(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# e : e '.' ID -> ^('.' e ID) -# | e '.' 'this' -> ^('.' e 'this') -# | '-' e -> ^('-' e) -# | e '*' b=e -> ^('*' e $b) -# | e (op='+'|op='-') b=e -> ^($op e $b) -# | INT -> INT -# | ID -> ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a.this", "(. a this)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testExpressionAssociativity(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# e -# : e '.'^ ID -# | '-'^ e -# | e '^'^ e -# | e '*'^ e -# | e ('+'^|'-'^) e -# | e ('='^ |'+='^) e -# | INT -# | ID -# ; -# ID : 'a'..'z'+ ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ("a^b^c", "(^ a (^ b c))"), -# ("a=b=c", "(= a (= b c))"), -# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testJavaExpressions(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# expressionList -# : e (','! e)* -# ; -# e : '('! e ')'! -# | 'this' -# | 'super' -# | INT -# | ID -# | type '.'^ 'class' -# | e '.'^ ID -# | e '.'^ 'this' -# | e '.'^ 'super' '('^ expressionList? ')'! -# | e '.'^ 'new'^ ID '('! expressionList? ')'! -# | 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+) // ugly; simplified -# | e '['^ e ']'! -# | '('^ type ')'! e -# | e ('++'^ | '--'^) -# | e '('^ expressionList? ')'! -# | ('+'^|'-'^|'++'^|'--'^) e -# | ('~'^|'!'^) e -# | e ('*'^|'/'^|'%'^) e -# | e ('+'^|'-'^) e -# | e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e -# | e ('<='^ | '>='^ | '>'^ | '<'^) e -# | e 'instanceof'^ e -# | e ('=='^ | '!='^) e -# | e '&'^ e -# | e '^'^ e -# | e '|'^ e -# | e '&&'^ e -# | e '||'^ e -# | e '?' e ':' e -# | e ('='^ -# |'+='^ -# |'-='^ -# |'*='^ -# |'/='^ -# |'&='^ -# |'|='^ -# |'^='^ -# |'>>='^ -# |'>>>='^ -# |'<<='^ -# |'%='^) e -# ; -# type: ID -# | ID '['^ ']'! -# | 'int' -# | 'int' '['^ ']'! -# ; -# ID : ('a'..'z'|'A'..'Z'|'_'|'$')+; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("a", "a"), -# ("1", "1"), -# ("a+1", "(+ a 1)"), -# ("a*1", "(* a 1)"), -# ("a.b", "(. a b)"), -# ("a-b+c", "(+ (- a b) c)"), -# ("a+b*c", "(+ a (* b c))"), -# ("a.b+1", "(+ (. a b) 1)"), -# ("-a", "(- a)"), -# ("-a+b", "(+ (- a) b)"), -# ("-a.b", "(- (. a b))"), -# ("a^b^c", "(^ a (^ b c))"), -# ("a=b=c", "(= a (= b c))"), -# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"), -# ("a|b&c", "(| a (& b c))"), -# ("(a|b)&c", "(& (| a b) c)"), -# ("a > b", "(> a b)"), -# ("a >> b", "(> a b)"), # text is from one token -# ("a < b", "(< a b)"), -# ("(T)x", "(( T x)"), -# ("new A().b", "(. (new A () b)"), -# ("(T)t.f()", "(( (( T (. t f)))"), -# ("a.f(x)==T.c", "(== (( (. a f) x) (. T c))"), -# ("a.f().g(x,1)", "(( (. (( (. a f)) g) x 1)"), -# ("new T[((n-1) * x) + 1]", "(new T [ (+ (* (- n 1) x) 1))"), -# ] -# self.runTests(grammar, tests, "e") - - -# def testReturnValueAndActions(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# } -# s : e { self.capture($e.v) } ; -# e returns [v, ignored] -# : e '*' b=e {$v *= $b.v;} -# | e '+' b=e {$v += $b.v;} -# | INT {$v = int($INT.text);} -# ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("4", "4"), -# ("1+2", "3") -# ] -# self.runTests(grammar, tests, "s") - - -# def testReturnValueAndActionsAndASTs(self): -# grammar = textwrap.dedent( -# r""" -# grammar T; -# options { -# language=Python3; -# output=AST; -# } -# s : e { self.capture("v={}, ".format($e.v)) } ; -# e returns [v, ignored] -# : e '*'^ b=e {$v *= $b.v;} -# | e '+'^ b=e {$v += $b.v;} -# | INT {$v = int($INT.text);} -# ; -# INT : '0'..'9'+ ; -# WS : (' '|'\n') {self.skip()} ; -# """) - -# tests = [ -# ("4", "v=4, 4"), -# ("1+2", "v=3, (+ 1 2)"), -# ] -# self.runTests(grammar, tests, "s") - - -if __name__ == '__main__': - unittest.main() diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/testbase.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/testbase.py deleted file mode 100644 index b2a9223c..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/testbase.py +++ /dev/null @@ -1,425 +0,0 @@ -from distutils.errors import * -import errno -import glob -import hashlib -import imp -import inspect -import os -import re -import shutil -import sys -import tempfile -import unittest - -import antlr3 - -def unlink(path): - try: - os.unlink(path) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - - -class GrammarCompileError(Exception): - """Grammar failed to compile.""" - pass - - -# At least on MacOSX tempdir (/tmp) is a symlink. It's sometimes dereferences, -# sometimes not, breaking the inspect.getmodule() function. -testbasedir = os.path.join( - os.path.realpath(tempfile.gettempdir()), - 'antlr3-test') - - -class BrokenTest(unittest.TestCase.failureException): - def __repr__(self): - name, reason = self.args - return '{}: {}: {} works now'.format( - (self.__class__.__name__, name, reason)) - - -def broken(reason, *exceptions): - '''Indicates a failing (or erroneous) test case fails that should succeed. - If the test fails with an exception, list the exception type in args''' - def wrapper(test_method): - def replacement(*args, **kwargs): - try: - test_method(*args, **kwargs) - except exceptions or unittest.TestCase.failureException: - pass - else: - raise BrokenTest(test_method.__name__, reason) - replacement.__doc__ = test_method.__doc__ - replacement.__name__ = 'XXX_' + test_method.__name__ - replacement.todo = reason - return replacement - return wrapper - - -dependencyCache = {} -compileErrorCache = {} - -# setup java CLASSPATH -if 'CLASSPATH' not in os.environ: - cp = [] - - baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) - libDir = os.path.join(baseDir, 'lib') - - jar = os.path.join(libDir, 'ST-4.0.5.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '{}'. Grab it from a distribution package.".format(jar) - ) - cp.append(jar) - - jar = os.path.join(libDir, 'antlr-3.4.1-SNAPSHOT.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '{}'. Grab it from a distribution package.".format(jar) - ) - cp.append(jar) - - jar = os.path.join(libDir, 'antlr-runtime-3.4.jar') - if not os.path.isfile(jar): - raise DistutilsFileError( - "Missing file '{}'. Grab it from a distribution package.".format(jar) - ) - cp.append(jar) - - cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build')) - - classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"' - -else: - classpath = '' - - -class ANTLRTest(unittest.TestCase): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.moduleName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0] - self.className = self.__class__.__name__ - self._baseDir = None - - self.lexerModule = None - self.parserModule = None - - self.grammarName = None - self.grammarType = None - - - @property - def baseDir(self): - if self._baseDir is None: - testName = 'unknownTest' - for frame in inspect.stack(): - code = frame[0].f_code - codeMod = inspect.getmodule(code) - if codeMod is None: - continue - - # skip frames not in requested module - if codeMod is not sys.modules[self.__module__]: - continue - - # skip some unwanted names - if code.co_name in ('nextToken', ''): - continue - - if code.co_name.startswith('test'): - testName = code.co_name - break - - self._baseDir = os.path.join( - testbasedir, - self.moduleName, self.className, testName) - if not os.path.isdir(self._baseDir): - os.makedirs(self._baseDir) - - return self._baseDir - - - def _invokeantlr(self, dir, file, options, javaOptions=''): - cmd = 'cd {}; java {} {} org.antlr.Tool -o . {} {} 2>&1'.format( - dir, javaOptions, classpath, options, file - ) - fp = os.popen(cmd) - output = '' - failed = False - for line in fp: - output += line - - if line.startswith('error('): - failed = True - - rc = fp.close() - if rc: - failed = True - - if failed: - raise GrammarCompileError( - "Failed to compile grammar '{}':\n{}\n\n{}".format(file, cmd, output) - ) - - - def compileGrammar(self, grammarName=None, options='', javaOptions=''): - if grammarName is None: - grammarName = self.moduleName + '.g' - - self._baseDir = os.path.join( - testbasedir, - self.moduleName) - if not os.path.isdir(self._baseDir): - os.makedirs(self._baseDir) - - if self.grammarName is None: - self.grammarName = os.path.splitext(grammarName)[0] - - grammarPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), grammarName) - - # get type and name from first grammar line - with open(grammarPath, 'r') as fp: - grammar = fp.read() - m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE) - self.assertIsNotNone(m, grammar) - self.grammarType = m.group(2) or 'combined' - - self.assertIn(self.grammarType, ('lexer', 'parser', 'tree', 'combined')) - - # don't try to rebuild grammar, if it already failed - if grammarName in compileErrorCache: - return - - try: - # # get dependencies from antlr - # if grammarName in dependencyCache: - # dependencies = dependencyCache[grammarName] - - # else: - # dependencies = [] - # cmd = ('cd %s; java %s %s org.antlr.Tool -o . -depend %s 2>&1' - # % (self.baseDir, javaOptions, classpath, grammarPath)) - - # output = "" - # failed = False - - # fp = os.popen(cmd) - # for line in fp: - # output += line - - # if line.startswith('error('): - # failed = True - # elif ':' in line: - # a, b = line.strip().split(':', 1) - # dependencies.append( - # (os.path.join(self.baseDir, a.strip()), - # [os.path.join(self.baseDir, b.strip())]) - # ) - - # rc = fp.close() - # if rc is not None: - # failed = True - - # if failed: - # raise GrammarCompileError( - # "antlr -depend failed with code {} on grammar '{}':\n\n{}\n{}".format( - # rc, grammarName, cmd, output) - # ) - - # # add dependencies to my .stg files - # templateDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'tool', 'src', 'main', 'resources', 'org', 'antlr', 'codegen', 'templates', 'Python')) - # templates = glob.glob(os.path.join(templateDir, '*.stg')) - - # for dst, src in dependencies: - # src.extend(templates) - - # dependencyCache[grammarName] = dependencies - - # rebuild = False - # for dest, sources in dependencies: - # if not os.path.isfile(dest): - # rebuild = True - # break - - # for source in sources: - # if os.path.getmtime(source) > os.path.getmtime(dest): - # rebuild = True - # break - - - # if rebuild: - # self._invokeantlr(self.baseDir, grammarPath, options, javaOptions) - - self._invokeantlr(self.baseDir, grammarPath, options, javaOptions) - - except: - # mark grammar as broken - compileErrorCache[grammarName] = True - raise - - - def lexerClass(self, base): - """Optionally build a subclass of generated lexer class""" - - return base - - - def parserClass(self, base): - """Optionally build a subclass of generated parser class""" - - return base - - - def walkerClass(self, base): - """Optionally build a subclass of generated walker class""" - - return base - - - def __load_module(self, name): - modFile, modPathname, modDescription = imp.find_module(name, [self.baseDir]) - - with modFile: - return imp.load_module(name, modFile, modPathname, modDescription) - - - def getLexer(self, *args, **kwargs): - """Build lexer instance. Arguments are passed to lexer.__init__().""" - - if self.grammarType == 'lexer': - self.lexerModule = self.__load_module(self.grammarName) - cls = getattr(self.lexerModule, self.grammarName) - else: - self.lexerModule = self.__load_module(self.grammarName + 'Lexer') - cls = getattr(self.lexerModule, self.grammarName + 'Lexer') - - cls = self.lexerClass(cls) - - lexer = cls(*args, **kwargs) - - return lexer - - - def getParser(self, *args, **kwargs): - """Build parser instance. Arguments are passed to parser.__init__().""" - - if self.grammarType == 'parser': - self.lexerModule = self.__load_module(self.grammarName) - cls = getattr(self.lexerModule, self.grammarName) - else: - self.parserModule = self.__load_module(self.grammarName + 'Parser') - cls = getattr(self.parserModule, self.grammarName + 'Parser') - cls = self.parserClass(cls) - - parser = cls(*args, **kwargs) - - return parser - - - def getWalker(self, *args, **kwargs): - """Build walker instance. Arguments are passed to walker.__init__().""" - - self.walkerModule = self.__load_module(self.grammarName + 'Walker') - cls = getattr(self.walkerModule, self.grammarName + 'Walker') - cls = self.walkerClass(cls) - - walker = cls(*args, **kwargs) - - return walker - - - def writeInlineGrammar(self, grammar): - # Create a unique ID for this test and use it as the grammar name, - # to avoid class name reuse. This kinda sucks. Need to find a way so - # tests can use the same grammar name without messing up the namespace. - # Well, first I should figure out what the exact problem is... - id = hashlib.md5(self.baseDir.encode('utf-8')).hexdigest()[-8:] - grammar = grammar.replace('$TP', 'TP' + id) - grammar = grammar.replace('$T', 'T' + id) - - # get type and name from first grammar line - m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE) - self.assertIsNotNone(m, grammar) - grammarType = m.group(2) or 'combined' - grammarName = m.group(3) - - self.assertIn(grammarType, ('lexer', 'parser', 'tree', 'combined')) - - grammarPath = os.path.join(self.baseDir, grammarName + '.g') - - # dump temp grammar file - with open(grammarPath, 'w') as fp: - fp.write(grammar) - - return grammarName, grammarPath, grammarType - - - def writeFile(self, name, contents): - testDir = os.path.dirname(os.path.abspath(__file__)) - path = os.path.join(self.baseDir, name) - - with open(path, 'w') as fp: - fp.write(contents) - - return path - - - def compileInlineGrammar(self, grammar, options='', javaOptions='', - returnModule=False): - # write grammar file - grammarName, grammarPath, grammarType = self.writeInlineGrammar(grammar) - - # compile it - self._invokeantlr( - os.path.dirname(grammarPath), - os.path.basename(grammarPath), - options, - javaOptions - ) - - if grammarType == 'combined': - lexerMod = self.__load_module(grammarName + 'Lexer') - parserMod = self.__load_module(grammarName + 'Parser') - if returnModule: - return lexerMod, parserMod - - lexerCls = getattr(lexerMod, grammarName + 'Lexer') - lexerCls = self.lexerClass(lexerCls) - parserCls = getattr(parserMod, grammarName + 'Parser') - parserCls = self.parserClass(parserCls) - - return lexerCls, parserCls - - if grammarType == 'lexer': - lexerMod = self.__load_module(grammarName) - if returnModule: - return lexerMod - - lexerCls = getattr(lexerMod, grammarName) - lexerCls = self.lexerClass(lexerCls) - - return lexerCls - - if grammarType == 'parser': - parserMod = self.__load_module(grammarName) - if returnModule: - return parserMod - - parserCls = getattr(parserMod, grammarName) - parserCls = self.parserClass(parserCls) - - return parserCls - - if grammarType == 'tree': - walkerMod = self.__load_module(grammarName) - if returnModule: - return walkerMod - - walkerCls = getattr(walkerMod, grammarName) - walkerCls = self.walkerClass(walkerCls) - - return walkerCls diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testantlr3.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testantlr3.py deleted file mode 100644 index d4c67647..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testantlr3.py +++ /dev/null @@ -1,7 +0,0 @@ - -import unittest - -import antlr3 - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testbase.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testbase.py deleted file mode 100644 index c39243e5..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testbase.py +++ /dev/null @@ -1,27 +0,0 @@ -import unittest - -class BrokenTest(unittest.TestCase.failureException): - def __repr__(self): - name, reason = self.args - return '{}: {}: {} works now'.format( - self.__class__.__name__, name, reason) - - -def broken(reason, *exceptions): - '''Indicates a failing (or erroneous) test case fails that should succeed. - If the test fails with an exception, list the exception type in args''' - def wrapper(test_method): - def replacement(*args, **kwargs): - try: - test_method(*args, **kwargs) - except exceptions or unittest.TestCase.failureException: - pass - else: - raise BrokenTest(test_method.__name__, reason) - replacement.__doc__ = test_method.__doc__ - replacement.__name__ = 'XXX_' + test_method.__name__ - replacement.todo = reason - return replacement - return wrapper - - diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py deleted file mode 100644 index 45e7a50e..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py +++ /dev/null @@ -1,63 +0,0 @@ - -import unittest - -import antlr3 - - -class TestDFA(unittest.TestCase): - """Test case for the DFA class.""" - - def setUp(self): - """Setup test fixure. - - We need a Recognizer in order to instantiate a DFA. - - """ - - class TRecognizer(antlr3.BaseRecognizer): - api_version = 'HEAD' - - self.recog = TRecognizer() - - - def testInit(self): - """DFA.__init__() - - Just a smoke test. - - """ - - dfa = antlr3.DFA( - self.recog, 1, - eot=[], - eof=[], - min=[], - max=[], - accept=[], - special=[], - transition=[] - ) - - - def testUnpack(self): - """DFA.unpack()""" - - self.assertEqual( - antlr3.DFA.unpack( - "\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff" - "\32\6\4\uffff\1\6\1\uffff\32\6" - ), - [ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6 - ] - ) - - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testexceptions.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testexceptions.py deleted file mode 100644 index 2cdef632..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testexceptions.py +++ /dev/null @@ -1,96 +0,0 @@ -import unittest -import antlr3 -import testbase - - -class TestRecognitionException(unittest.TestCase): - """Tests for the antlr3.RecognitionException class""" - - def testInitNone(self): - """RecognitionException.__init__()""" - - exc = antlr3.RecognitionException() - - -class TestEarlyExitException(unittest.TestCase): - """Tests for the antlr3.EarlyExitException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """EarlyExitException.__init__()""" - - exc = antlr3.EarlyExitException() - - -class TestFailedPredicateException(unittest.TestCase): - """Tests for the antlr3.FailedPredicateException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """FailedPredicateException.__init__()""" - - exc = antlr3.FailedPredicateException() - - -class TestMismatchedNotSetException(unittest.TestCase): - """Tests for the antlr3.MismatchedNotSetException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedNotSetException.__init__()""" - - exc = antlr3.MismatchedNotSetException() - - -class TestMismatchedRangeException(unittest.TestCase): - """Tests for the antlr3.MismatchedRangeException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedRangeException.__init__()""" - - exc = antlr3.MismatchedRangeException() - - -class TestMismatchedSetException(unittest.TestCase): - """Tests for the antlr3.MismatchedSetException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedSetException.__init__()""" - - exc = antlr3.MismatchedSetException() - - -class TestMismatchedTokenException(unittest.TestCase): - """Tests for the antlr3.MismatchedTokenException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedTokenException.__init__()""" - - exc = antlr3.MismatchedTokenException() - - -class TestMismatchedTreeNodeException(unittest.TestCase): - """Tests for the antlr3.MismatchedTreeNodeException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """MismatchedTreeNodeException.__init__()""" - - exc = antlr3.MismatchedTreeNodeException() - - -class TestNoViableAltException(unittest.TestCase): - """Tests for the antlr3.NoViableAltException class""" - - @testbase.broken("FIXME", Exception) - def testInitNone(self): - """NoViableAltException.__init__()""" - - exc = antlr3.NoViableAltException() - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testrecognizers.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testrecognizers.py deleted file mode 100644 index c30c06c9..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testrecognizers.py +++ /dev/null @@ -1,67 +0,0 @@ -import sys -import unittest - -import antlr3 - - -class TestBaseRecognizer(unittest.TestCase): - """Tests for BaseRecognizer class""" - - def testGetRuleInvocationStack(self): - """BaseRecognizer._getRuleInvocationStack()""" - - rules = antlr3.BaseRecognizer._getRuleInvocationStack(__name__) - self.assertEqual( - rules, - ['testGetRuleInvocationStack'] - ) - - -class TestTokenSource(unittest.TestCase): - """Testcase to the antlr3.TokenSource class""" - - - def testIteratorInterface(self): - """TokenSource.next()""" - - class TrivialToken(object): - def __init__(self, type): - self.type = type - - class TestSource(antlr3.TokenSource): - def __init__(self): - self.tokens = [ - TrivialToken(1), - TrivialToken(2), - TrivialToken(3), - TrivialToken(4), - TrivialToken(antlr3.EOF), - ] - - def nextToken(self): - return self.tokens.pop(0) - - - src = TestSource() - tokens = [] - for token in src: - tokens.append(token.type) - - self.assertEqual(tokens, [1, 2, 3, 4]) - - - -class TestLexer(unittest.TestCase): - - def testInit(self): - """Lexer.__init__()""" - - class TLexer(antlr3.Lexer): - api_version = 'HEAD' - - stream = antlr3.StringStream('foo') - TLexer(stream) - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input1 b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input1 deleted file mode 100644 index a907ec3f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input1 +++ /dev/null @@ -1,2 +0,0 @@ -foo -bar \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input2 b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input2 deleted file mode 100644 index 49dccf4f..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.input2 +++ /dev/null @@ -1,2 +0,0 @@ -foo -bär \ No newline at end of file diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.py deleted file mode 100644 index 957fffaf..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.py +++ /dev/null @@ -1,659 +0,0 @@ - -from io import StringIO -import os -import unittest -import antlr3 - - -class TestStringStream(unittest.TestCase): - """Test case for the StringStream class.""" - - def testSize(self): - """StringStream.size()""" - - stream = antlr3.StringStream('foo') - - self.assertEqual(stream.size(), 3) - - - def testIndex(self): - """StringStream.index()""" - - stream = antlr3.StringStream('foo') - - self.assertEqual(stream.index(), 0) - - - def testConsume(self): - """StringStream.consume()""" - - stream = antlr3.StringStream('foo\nbar') - - stream.consume() # f - self.assertEqual(stream.index(), 1) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.line, 1) - - stream.consume() # o - self.assertEqual(stream.index(), 2) - self.assertEqual(stream.charPositionInLine, 2) - self.assertEqual(stream.line, 1) - - stream.consume() # o - self.assertEqual(stream.index(), 3) - self.assertEqual(stream.charPositionInLine, 3) - self.assertEqual(stream.line, 1) - - stream.consume() # \n - self.assertEqual(stream.index(), 4) - self.assertEqual(stream.charPositionInLine, 0) - self.assertEqual(stream.line, 2) - - stream.consume() # b - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.line, 2) - - stream.consume() # a - self.assertEqual(stream.index(), 6) - self.assertEqual(stream.charPositionInLine, 2) - self.assertEqual(stream.line, 2) - - stream.consume() # r - self.assertEqual(stream.index(), 7) - self.assertEqual(stream.charPositionInLine, 3) - self.assertEqual(stream.line, 2) - - stream.consume() # EOF - self.assertEqual(stream.index(), 7) - self.assertEqual(stream.charPositionInLine, 3) - self.assertEqual(stream.line, 2) - - stream.consume() # EOF - self.assertEqual(stream.index(), 7) - self.assertEqual(stream.charPositionInLine, 3) - self.assertEqual(stream.line, 2) - - - def testReset(self): - """StringStream.reset()""" - - stream = antlr3.StringStream('foo') - - stream.consume() - stream.consume() - - stream.reset() - self.assertEqual(stream.index(), 0) - self.assertEqual(stream.line, 1) - self.assertEqual(stream.charPositionInLine, 0) - self.assertEqual(stream.LT(1), 'f') - - - def testLA(self): - """StringStream.LA()""" - - stream = antlr3.StringStream('foo') - - self.assertEqual(stream.LT(1), 'f') - self.assertEqual(stream.LT(2), 'o') - self.assertEqual(stream.LT(3), 'o') - - stream.consume() - stream.consume() - - self.assertEqual(stream.LT(1), 'o') - self.assertEqual(stream.LT(2), antlr3.EOF) - self.assertEqual(stream.LT(3), antlr3.EOF) - - - def testSubstring(self): - """StringStream.substring()""" - - stream = antlr3.StringStream('foobar') - - self.assertEqual(stream.substring(0, 0), 'f') - self.assertEqual(stream.substring(0, 1), 'fo') - self.assertEqual(stream.substring(0, 5), 'foobar') - self.assertEqual(stream.substring(3, 5), 'bar') - - - def testSeekForward(self): - """StringStream.seek(): forward""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - self.assertEqual(stream.index(), 4) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 0) - self.assertEqual(stream.LT(1), 'b') - - -## # not yet implemented -## def testSeekBackward(self): -## """StringStream.seek(): backward""" - -## stream = antlr3.StringStream('foo\nbar') - -## stream.seek(4) -## stream.seek(1) - -## self.assertEqual(stream.index(), 1) -## self.assertEqual(stream.line, 1) -## self.assertEqual(stream.charPositionInLine, 1) -## self.assertEqual(stream.LA(1), 'o') - - - def testMark(self): - """StringStream.mark()""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - marker = stream.mark() - self.assertEqual(marker, 1) - self.assertEqual(stream.markDepth, 1) - - stream.consume() - marker = stream.mark() - self.assertEqual(marker, 2) - self.assertEqual(stream.markDepth, 2) - - - def testReleaseLast(self): - """StringStream.release(): last marker""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.release() - self.assertEqual(stream.markDepth, 1) - - # release same marker again, nothing has changed - stream.release() - self.assertEqual(stream.markDepth, 1) - - - def testReleaseNested(self): - """StringStream.release(): nested""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.release(marker2) - self.assertEqual(stream.markDepth, 1) - - - def testRewindLast(self): - """StringStream.rewind(): last marker""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - - marker = stream.mark() - stream.consume() - stream.consume() - - stream.rewind() - self.assertEqual(stream.markDepth, 0) - self.assertEqual(stream.index(), 4) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 0) - self.assertEqual(stream.LT(1), 'b') - - - def testRewindNested(self): - """StringStream.rewind(): nested""" - - stream = antlr3.StringStream('foo\nbar') - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.assertEqual(stream.markDepth, 1) - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.LT(1), 'a') - - -class TestFileStream(unittest.TestCase): - """Test case for the FileStream class.""" - - - def testNoEncoding(self): - path = os.path.join(os.path.dirname(__file__), 'teststreams.input1') - - stream = antlr3.FileStream(path) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.assertEqual(stream.markDepth, 1) - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.LT(1), 'a') - self.assertEqual(stream.LA(1), ord('a')) - - - def testEncoded(self): - path = os.path.join(os.path.dirname(__file__), 'teststreams.input2') - - stream = antlr3.FileStream(path) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.assertEqual(stream.markDepth, 1) - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.LT(1), 'ä') - self.assertEqual(stream.LA(1), ord('ä')) - - - -class TestInputStream(unittest.TestCase): - """Test case for the InputStream class.""" - - def testNoEncoding(self): - file = StringIO('foo\nbar') - - stream = antlr3.InputStream(file) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.assertEqual(stream.markDepth, 1) - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.LT(1), 'a') - self.assertEqual(stream.LA(1), ord('a')) - - - def testEncoded(self): - file = StringIO('foo\nbär') - - stream = antlr3.InputStream(file) - - stream.seek(4) - marker1 = stream.mark() - - stream.consume() - marker2 = stream.mark() - - stream.consume() - marker3 = stream.mark() - - stream.rewind(marker2) - self.assertEqual(stream.markDepth, 1) - self.assertEqual(stream.index(), 5) - self.assertEqual(stream.line, 2) - self.assertEqual(stream.charPositionInLine, 1) - self.assertEqual(stream.LT(1), 'ä') - self.assertEqual(stream.LA(1), ord('ä')) - - -class TestCommonTokenStream(unittest.TestCase): - """Test case for the StringStream class.""" - - def setUp(self): - """Setup test fixure - - The constructor of CommonTokenStream needs a token source. This - is a simple mock class providing just the nextToken() method. - - """ - - class MockSource(object): - def __init__(self): - self.tokens = [] - - def makeEOFToken(self): - return antlr3.CommonToken(type=antlr3.EOF) - - def nextToken(self): - if self.tokens: - return self.tokens.pop(0) - return None - - self.source = MockSource() - - - def testInit(self): - """CommonTokenStream.__init__()""" - - stream = antlr3.CommonTokenStream(self.source) - self.assertEqual(stream.index(), -1) - - - def testSetTokenSource(self): - """CommonTokenStream.setTokenSource()""" - - stream = antlr3.CommonTokenStream(None) - stream.setTokenSource(self.source) - self.assertEqual(stream.index(), -1) - self.assertEqual(stream.channel, antlr3.DEFAULT_CHANNEL) - - - def testLTEmptySource(self): - """CommonTokenStream.LT(): EOF (empty source)""" - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.assertEqual(lt1.type, antlr3.EOF) - - - def testLT1(self): - """CommonTokenStream.LT(1)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.assertEqual(lt1.type, 12) - - - def testLT1WithHidden(self): - """CommonTokenStream.LT(1): with hidden tokens""" - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(1) - self.assertEqual(lt1.type, 13) - - - def testLT2BeyondEnd(self): - """CommonTokenStream.LT(2): beyond end""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(2) - self.assertEqual(lt1.type, antlr3.EOF) - - - # not yet implemented - def testLTNegative(self): - """CommonTokenStream.LT(-1): look back""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - stream.consume() - - lt1 = stream.LT(-1) - self.assertEqual(lt1.type, 12) - - - def testLB1(self): - """CommonTokenStream.LB(1)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - stream.consume() - - self.assertEqual(stream.LB(1).type, 12) - - - def testLTZero(self): - """CommonTokenStream.LT(0)""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - - lt1 = stream.LT(0) - self.assertIsNone(lt1) - - - def testLBBeyondBegin(self): - """CommonTokenStream.LB(-1): beyond begin""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.assertIsNone(stream.LB(1)) - - stream.consume() - stream.consume() - self.assertIsNone(stream.LB(3)) - - - def testFillBuffer(self): - """CommonTokenStream.fillBuffer()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=14) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - - self.assertEqual(len(stream.tokens), 3) - self.assertEqual(stream.tokens[0].type, 12) - self.assertEqual(stream.tokens[1].type, 13) - self.assertEqual(stream.tokens[2].type, 14) - - - def testConsume(self): - """CommonTokenStream.consume()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.assertEqual(stream.LA(1), 12) - - stream.consume() - self.assertEqual(stream.LA(1), 13) - - stream.consume() - self.assertEqual(stream.LA(1), antlr3.EOF) - - stream.consume() - self.assertEqual(stream.LA(1), antlr3.EOF) - - - def testSeek(self): - """CommonTokenStream.seek()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - self.assertEqual(stream.LA(1), 12) - - stream.seek(2) - self.assertEqual(stream.LA(1), antlr3.EOF) - - stream.seek(0) - self.assertEqual(stream.LA(1), 12) - - - def testMarkRewind(self): - """CommonTokenStream.mark()/rewind()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13) - ) - - self.source.tokens.append( - antlr3.CommonToken(type=antlr3.EOF) - ) - - stream = antlr3.CommonTokenStream(self.source) - stream.fillBuffer() - - stream.consume() - marker = stream.mark() - - stream.consume() - stream.rewind(marker) - - self.assertEqual(stream.LA(1), 13) - - - def testToString(self): - """CommonTokenStream.toString()""" - - self.source.tokens.append( - antlr3.CommonToken(type=12, text="foo") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=13, text="bar") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=14, text="gnurz") - ) - - self.source.tokens.append( - antlr3.CommonToken(type=15, text="blarz") - ) - - stream = antlr3.CommonTokenStream(self.source) - - self.assertEqual(stream.toString(), "foobargnurzblarz") - self.assertEqual(stream.toString(1, 2), "bargnurz") - self.assertEqual(stream.toString(stream.tokens[1], stream.tokens[-2]), "bargnurz") - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtree.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtree.py deleted file mode 100644 index 83c38768..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtree.py +++ /dev/null @@ -1,1334 +0,0 @@ - -from io import StringIO -import os -import unittest - -from antlr3.tree import (CommonTreeNodeStream, CommonTree, CommonTreeAdaptor, - TreeParser, TreeVisitor, TreeIterator) -from antlr3 import CommonToken, UP, DOWN, EOF -from antlr3.treewizard import TreeWizard - -class TestTreeNodeStream(unittest.TestCase): - """Test case for the TreeNodeStream class.""" - - def setUp(self): - self.adaptor = CommonTreeAdaptor() - - - def newStream(self, t): - """Build new stream; let's us override to test other streams.""" - return CommonTreeNodeStream(t) - - - def testSingleNode(self): - t = CommonTree(CommonToken(101)) - - stream = self.newStream(t) - expecting = "101" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101" - found = str(stream) - self.assertEqual(expecting, found) - - - def testTwoChildrenOfNilRoot(self): - class V(CommonTree): - def __init__(self, token=None, ttype=None): - if token: - self.token = token - - elif ttype: - self.token = CommonToken(type=ttype) - - - def __str__(self): - if self.token: - txt = self.token.text - else: - txt = "" - - txt += "" - return txt - - root_0 = self.adaptor.nil() - t = V(ttype=101) - u = V(token=CommonToken(type=102, text="102")) - self.adaptor.addChild(root_0, t) - self.adaptor.addChild(root_0, u) - self.assertIsNone(root_0.parent) - self.assertEqual(-1, root_0.childIndex) - self.assertEqual(0, t.childIndex) - self.assertEqual(1, u.childIndex) - - - def test4Nodes(self): - # ^(101 ^(102 103) 104) - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - stream = self.newStream(t) - expecting = "101 102 103 104" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101 2 102 2 103 3 104 3" - found = str(stream) - self.assertEqual(expecting, found) - - - def testList(self): - root = CommonTree(None) - - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - u = CommonTree(CommonToken(105)) - - root.addChild(t) - root.addChild(u) - - stream = CommonTreeNodeStream(root) - expecting = "101 102 103 104 105" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101 2 102 2 103 3 104 3 105" - found = str(stream) - self.assertEqual(expecting, found) - - - def testFlatList(self): - root = CommonTree(None) - - root.addChild(CommonTree(CommonToken(101))) - root.addChild(CommonTree(CommonToken(102))) - root.addChild(CommonTree(CommonToken(103))) - - stream = CommonTreeNodeStream(root) - expecting = "101 102 103" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101 102 103" - found = str(stream) - self.assertEqual(expecting, found) - - - def testListWithOneNode(self): - root = CommonTree(None) - - root.addChild(CommonTree(CommonToken(101))) - - stream = CommonTreeNodeStream(root) - expecting = "101" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101" - found = str(stream) - self.assertEqual(expecting, found) - - - def testAoverB(self): - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - - stream = self.newStream(t) - expecting = "101 102" - found = self.toNodesOnlyString(stream) - self.assertEqual(expecting, found) - - expecting = "101 2 102 3" - found = str(stream) - self.assertEqual(expecting, found) - - - def testLT(self): - # ^(101 ^(102 103) 104) - t = CommonTree(CommonToken(101)) - t.addChild(CommonTree(CommonToken(102))) - t.getChild(0).addChild(CommonTree(CommonToken(103))) - t.addChild(CommonTree(CommonToken(104))) - - stream = self.newStream(t) - self.assertEqual(101, stream.LT(1).getType()) - self.assertEqual(DOWN, stream.LT(2).getType()) - self.assertEqual(102, stream.LT(3).getType()) - self.assertEqual(DOWN, stream.LT(4).getType()) - self.assertEqual(103, stream.LT(5).getType()) - self.assertEqual(UP, stream.LT(6).getType()) - self.assertEqual(104, stream.LT(7).getType()) - self.assertEqual(UP, stream.LT(8).getType()) - self.assertEqual(EOF, stream.LT(9).getType()) - # check way ahead - self.assertEqual(EOF, stream.LT(100).getType()) - - - def testMarkRewindEntire(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - m = stream.mark() # MARK - for _ in range(13): # consume til end - stream.LT(1) - stream.consume() - - self.assertEqual(EOF, stream.LT(1).getType()) - self.assertEqual(UP, stream.LT(-1).getType()) #TODO: remove? - stream.rewind(m) # REWIND - - # consume til end again :) - for _ in range(13): # consume til end - stream.LT(1) - stream.consume() - - self.assertEqual(EOF, stream.LT(1).getType()) - self.assertEqual(UP, stream.LT(-1).getType()) #TODO: remove? - - - def testMarkRewindInMiddle(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - for _ in range(7): # consume til middle - #System.out.println(tream.LT(1).getType()) - stream.consume() - - self.assertEqual(107, stream.LT(1).getType()) - m = stream.mark() # MARK - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - stream.consume() # consume 104 - stream.rewind(m) # REWIND - - self.assertEqual(107, stream.LT(1).getType()) - stream.consume() - self.assertEqual(UP, stream.LT(1).getType()) - stream.consume() - self.assertEqual(UP, stream.LT(1).getType()) - stream.consume() - self.assertEqual(104, stream.LT(1).getType()) - stream.consume() - # now we're past rewind position - self.assertEqual(105, stream.LT(1).getType()) - stream.consume() - self.assertEqual(UP, stream.LT(1).getType()) - stream.consume() - self.assertEqual(EOF, stream.LT(1).getType()) - self.assertEqual(UP, stream.LT(-1).getType()) - - - def testMarkRewindNested(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - m = stream.mark() # MARK at start - stream.consume() # consume 101 - stream.consume() # consume DN - m2 = stream.mark() # MARK on 102 - stream.consume() # consume 102 - stream.consume() # consume DN - stream.consume() # consume 103 - stream.consume() # consume 106 - stream.rewind(m2) # REWIND to 102 - self.assertEqual(102, stream.LT(1).getType()) - stream.consume() - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() - # stop at 103 and rewind to start - stream.rewind(m) # REWIND to 101 - self.assertEqual(101, stream.LT(1).getType()) - stream.consume() - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() - self.assertEqual(102, stream.LT(1).getType()) - stream.consume() - self.assertEqual(DOWN, stream.LT(1).getType()) - - - def testSeek(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - stream.consume() # consume 101 - stream.consume() # consume DN - stream.consume() # consume 102 - stream.seek(7) # seek to 107 - self.assertEqual(107, stream.LT(1).getType()) - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - self.assertEqual(104, stream.LT(1).getType()) - - - def testSeekFromStart(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - stream.seek(7) # seek to 107 - self.assertEqual(107, stream.LT(1).getType()) - stream.consume() # consume 107 - stream.consume() # consume UP - stream.consume() # consume UP - self.assertEqual(104, stream.LT(1).getType()) - - - def testReset(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - # stream has 7 real + 6 nav nodes - # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - v1 = self.toNodesOnlyString(stream) # scan all - stream.reset() - v2 = self.toNodesOnlyString(stream) # scan all - self.assertEqual(v1, v2) - - - def testIterator(self): - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - stream = CommonTreeNodeStream(r0) - - expecting = [ - 101, DOWN, 102, DOWN, 103, 106, DOWN, 107, UP, UP, 104, 105, UP] - found = [t.type for t in stream] - self.assertEqual(expecting, found) - - - def toNodesOnlyString(self, nodes): - buf = [] - for i in range(nodes.size()): - t = nodes.LT(i + 1) - type = nodes.getTreeAdaptor().getType(t) - if type not in {DOWN, UP}: - buf.append(str(type)) - - return ' '.join(buf) - - -class TestCommonTreeNodeStream(unittest.TestCase): - """Test case for the CommonTreeNodeStream class.""" - - def testPushPop(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3" - found = str(stream) - self.assertEqual(expecting, found) - - # Assume we want to hit node 107 and then "call 102" then return - - indexOf102 = 2 - indexOf107 = 12 - for _ in range(indexOf107):# consume til 107 node - stream.consume() - - # CALL 102 - self.assertEqual(107, stream.LT(1).getType()) - stream.push(indexOf102) - self.assertEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.assertEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - self.assertEqual(UP, stream.LT(1).getType()) - # RETURN - stream.pop() - self.assertEqual(107, stream.LT(1).getType()) - - - def testNestedPushPop(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - - # Assume we want to hit node 107 and then "call 102", which - # calls 104, then return - - indexOf102 = 2 - indexOf107 = 12 - for _ in range(indexOf107): # consume til 107 node - stream.consume() - - self.assertEqual(107, stream.LT(1).getType()) - # CALL 102 - stream.push(indexOf102) - self.assertEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.assertEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - - # CALL 104 - indexOf104 = 6 - stream.push(indexOf104) - self.assertEqual(104, stream.LT(1).getType()) - stream.consume() # consume 102 - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.assertEqual(105, stream.LT(1).getType()) - stream.consume() # consume 103 - self.assertEqual(UP, stream.LT(1).getType()) - # RETURN (to UP node in 102 subtree) - stream.pop() - - self.assertEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.assertEqual(107, stream.LT(1).getType()) - - - def testPushPopFromEOF(self): - # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) - # stream has 9 real + 8 nav nodes - # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r1.addChild(CommonTree(CommonToken(103))) - r0.addChild(r1) - r2 = CommonTree(CommonToken(104)) - r2.addChild(CommonTree(CommonToken(105))) - r0.addChild(r2) - r3 = CommonTree(CommonToken(106)) - r3.addChild(CommonTree(CommonToken(107))) - r0.addChild(r3) - r0.addChild(CommonTree(CommonToken(108))) - r0.addChild(CommonTree(CommonToken(109))) - - stream = CommonTreeNodeStream(r0) - - while stream.LA(1) != EOF: - stream.consume() - - indexOf102 = 2 - indexOf104 = 6 - self.assertEqual(EOF, stream.LT(1).getType()) - - # CALL 102 - stream.push(indexOf102) - self.assertEqual(102, stream.LT(1).getType()) - stream.consume() # consume 102 - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.assertEqual(103, stream.LT(1).getType()) - stream.consume() # consume 103 - self.assertEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.assertEqual(EOF, stream.LT(1).getType()) - - # CALL 104 - stream.push(indexOf104) - self.assertEqual(104, stream.LT(1).getType()) - stream.consume() # consume 102 - self.assertEqual(DOWN, stream.LT(1).getType()) - stream.consume() # consume DN - self.assertEqual(105, stream.LT(1).getType()) - stream.consume() # consume 103 - self.assertEqual(UP, stream.LT(1).getType()) - # RETURN (to empty stack) - stream.pop() - self.assertEqual(EOF, stream.LT(1).getType()) - - -class TestCommonTree(unittest.TestCase): - """Test case for the CommonTree class.""" - - def setUp(self): - """Setup test fixure""" - - self.adaptor = CommonTreeAdaptor() - - - def testSingleNode(self): - t = CommonTree(CommonToken(101)) - self.assertIsNone(t.parent) - self.assertEqual(-1, t.childIndex) - - - def test4Nodes(self): - # ^(101 ^(102 103) 104) - r0 = CommonTree(CommonToken(101)) - r0.addChild(CommonTree(CommonToken(102))) - r0.getChild(0).addChild(CommonTree(CommonToken(103))) - r0.addChild(CommonTree(CommonToken(104))) - - self.assertIsNone(r0.parent) - self.assertEqual(-1, r0.childIndex) - - - def testList(self): - # ^(nil 101 102 103) - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - self.assertIsNone(r0.parent) - self.assertEqual(-1, r0.childIndex) - self.assertEqual(r0, c0.parent) - self.assertEqual(0, c0.childIndex) - self.assertEqual(r0, c1.parent) - self.assertEqual(1, c1.childIndex) - self.assertEqual(r0, c2.parent) - self.assertEqual(2, c2.childIndex) - - - def testList2(self): - # Add child ^(nil 101 102 103) to root 5 - # should pull 101 102 103 directly to become 5's child list - root = CommonTree(CommonToken(5)) - - # child tree - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - root.addChild(r0) - - self.assertIsNone(root.parent) - self.assertEqual(-1, root.childIndex) - # check children of root all point at root - self.assertEqual(root, c0.parent) - self.assertEqual(0, c0.childIndex) - self.assertEqual(root, c0.parent) - self.assertEqual(1, c1.childIndex) - self.assertEqual(root, c0.parent) - self.assertEqual(2, c2.childIndex) - - - def testAddListToExistChildren(self): - # Add child ^(nil 101 102 103) to root ^(5 6) - # should add 101 102 103 to end of 5's child list - root = CommonTree(CommonToken(5)) - root.addChild(CommonTree(CommonToken(6))) - - # child tree - r0 = CommonTree(None) - c0=CommonTree(CommonToken(101)) - r0.addChild(c0) - c1=CommonTree(CommonToken(102)) - r0.addChild(c1) - c2=CommonTree(CommonToken(103)) - r0.addChild(c2) - - root.addChild(r0) - - self.assertIsNone(root.parent) - self.assertEqual(-1, root.childIndex) - # check children of root all point at root - self.assertEqual(root, c0.parent) - self.assertEqual(1, c0.childIndex) - self.assertEqual(root, c0.parent) - self.assertEqual(2, c1.childIndex) - self.assertEqual(root, c0.parent) - self.assertEqual(3, c2.childIndex) - - - def testDupTree(self): - # ^(101 ^(102 103 ^(106 107) ) 104 105) - r0 = CommonTree(CommonToken(101)) - r1 = CommonTree(CommonToken(102)) - r0.addChild(r1) - r1.addChild(CommonTree(CommonToken(103))) - r2 = CommonTree(CommonToken(106)) - r2.addChild(CommonTree(CommonToken(107))) - r1.addChild(r2) - r0.addChild(CommonTree(CommonToken(104))) - r0.addChild(CommonTree(CommonToken(105))) - - dup = self.adaptor.dupTree(r0) - - self.assertIsNone(dup.parent) - self.assertEqual(-1, dup.childIndex) - dup.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot(self): - # 5 becomes root of ^(nil 101 102 103) - newRoot = CommonTree(CommonToken(5)) - - oldRoot = CommonTree(None) - oldRoot.addChild(CommonTree(CommonToken(101))) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot2(self): - # 5 becomes root of ^(101 102 103) - newRoot = CommonTree(CommonToken(5)) - - oldRoot = CommonTree(CommonToken(101)) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot3(self): - # ^(nil 5) becomes root of ^(nil 101 102 103) - newRoot = CommonTree(None) - newRoot.addChild(CommonTree(CommonToken(5))) - - oldRoot = CommonTree(None) - oldRoot.addChild(CommonTree(CommonToken(101))) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot5(self): - # ^(nil 5) becomes root of ^(101 102 103) - newRoot = CommonTree(None) - newRoot.addChild(CommonTree(CommonToken(5))) - - oldRoot = CommonTree(CommonToken(101)) - oldRoot.addChild(CommonTree(CommonToken(102))) - oldRoot.addChild(CommonTree(CommonToken(103))) - - self.adaptor.becomeRoot(newRoot, oldRoot) - newRoot.sanityCheckParentAndChildIndexes() - - - def testBecomeRoot6(self): - # emulates construction of ^(5 6) - root_0 = self.adaptor.nil() - root_1 = self.adaptor.nil() - root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1) - - self.adaptor.addChild(root_1, CommonTree(CommonToken(6))) - - self.adaptor.addChild(root_0, root_1) - - root_0.sanityCheckParentAndChildIndexes() - - - # Test replaceChildren - - def testReplaceWithNoChildren(self): - t = CommonTree(CommonToken(101)) - newChild = CommonTree(CommonToken(5)) - error = False - self.assertRaises(IndexError, t.replaceChildren, 0, 0, newChild) - - - def testReplaceWithOneChildren(self): - # assume token type 99 and use text - t = CommonTree(CommonToken(99, text="a")) - c0 = CommonTree(CommonToken(99, text="b")) - t.addChild(c0) - - newChild = CommonTree(CommonToken(99, text="c")) - t.replaceChildren(0, 0, newChild) - expecting = "(a c)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceInMiddle(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1 - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(1, 1, newChild) - expecting = "(a b x d)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0 - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(0, 0, newChild) - expecting = "(a x c d)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2 - - newChild = CommonTree(CommonToken(99, text="x")) - t.replaceChildren(2, 2, newChild) - expecting = "(a b c x)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(0, 0, newChildren) - expecting = "(a x y c d)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(2, 2, newChildren) - expecting = "(a b c x y)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceOneWithTwoInMiddle(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(1, 1, newChildren) - expecting = "(a b x y d)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceTwoWithOneAtLeft(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(0, 1, newChild) - expecting = "(a x d)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceTwoWithOneAtRight(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(1, 2, newChild) - expecting = "(a b x)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAllWithOne(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChild = CommonTree(CommonToken(99, text="x")) - - t.replaceChildren(0, 2, newChild) - expecting = "(a x)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - - def testReplaceAllWithTwo(self): - t = CommonTree(CommonToken(99, text="a")) - t.addChild(CommonTree(CommonToken(99, text="b"))) - t.addChild(CommonTree(CommonToken(99, text="c"))) - t.addChild(CommonTree(CommonToken(99, text="d"))) - - newChildren = self.adaptor.nil() - newChildren.addChild(CommonTree(CommonToken(99, text="x"))) - newChildren.addChild(CommonTree(CommonToken(99, text="y"))) - - t.replaceChildren(0, 2, newChildren) - expecting = "(a x y)" - self.assertEqual(expecting, t.toStringTree()) - t.sanityCheckParentAndChildIndexes() - - -class TestTreeContext(unittest.TestCase): - """Test the TreeParser.inContext() method""" - - tokenNames = [ - "", "", "", "", "VEC", "ASSIGN", "PRINT", - "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" - ] - - def testSimpleParent(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEqual(expecting, found) - - - def testNoParent(self): - tree = "(PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(%x:PRINT (MULT ID (VEC INT INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEqual(expecting, found) - - - def testParentWithWildcard(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") - self.assertEqual(expecting, found) - - - def testWildcardAtStartIgnored(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "...VEC") - self.assertEqual(expecting, found) - - - def testWildcardInBetween(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT...VEC") - self.assertEqual(expecting, found) - - - def testLotsOfWildcards(self): - tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "... PRINT ... VEC ...") - self.assertEqual(expecting, found) - - - def testDeep(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") - self.assertEqual(expecting, found) - - - def testDeepAndFindRoot(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ...") - self.assertEqual(expecting, found) - - - def testDeepAndFindRoot2(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... VEC ...") - self.assertEqual(expecting, found) - - - def testChain(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = True - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT MULT VEC MULT") - self.assertEqual(expecting, found) - - - ## TEST INVALID CONTEXTS - - def testNotParent(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") - self.assertEqual(expecting, found) - - - def testMismatch(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - ## missing MULT - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC MULT") - self.assertEqual(expecting, found) - - - def testMismatch2(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC ...") - self.assertEqual(expecting, found) - - - def testMismatch3(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - expecting = False - found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ... VEC MULT") - self.assertEqual(expecting, found) - - - def testDoubleEtc(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - self.assertRaisesRegex( - ValueError, r'invalid syntax: \.\.\. \.\.\.', - TreeParser._inContext, adaptor, self.tokenNames, node, "PRINT ... ... VEC") - - - def testDotDot(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - labels = {} - valid = wiz.parse( - t, - "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", - labels) - self.assertTrue(valid) - node = labels.get("x") - - self.assertRaisesRegex( - ValueError, r'invalid syntax: \.\.', - TreeParser._inContext, adaptor, self.tokenNames, node, "PRINT .. VEC") - - -class TestTreeVisitor(unittest.TestCase): - """Test of the TreeVisitor class.""" - - tokenNames = [ - "", "", "", "", "VEC", "ASSIGN", "PRINT", - "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" - ] - - def testTreeVisitor(self): - tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokenNames) - t = wiz.create(tree) - - found = [] - def pre(t): - found.append("pre({})".format(t)) - return t - def post(t): - found.append("post({})".format(t)) - return t - - visitor = TreeVisitor(adaptor) - visitor.visit(t, pre, post) - - expecting = [ "pre(PRINT)", "pre(MULT)", "pre(x)", "post(x)", - "pre(VEC)", "pre(MULT)", "pre(9)", "post(9)", "pre(1)", - "post(1)", "post(MULT)", "pre(2)", "post(2)", "pre(3)", - "post(3)", "post(VEC)", "post(MULT)", "post(PRINT)" ] - - self.assertEqual(expecting, found) - - -class TestTreeIterator(unittest.TestCase): - tokens = [ - "", "", "", "", - "A", "B", "C", "D", "E", "F", "G" ] - - def testNode(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("A") - it = TreeIterator(t) - expecting = "A EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testFlatAB(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(nil A B)") - it = TreeIterator(t) - expecting = "nil DOWN A B UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testAB(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A B)") - it = TreeIterator(t) - expecting = "A DOWN B UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testABC(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A B C)") - it = TreeIterator(t) - expecting = "A DOWN B C UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testVerticalList(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B C))") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C UP UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testComplex(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B (C D E) F) G)") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def testReset(self): - adaptor = CommonTreeAdaptor() - wiz = TreeWizard(adaptor, self.tokens) - t = wiz.create("(A (B (C D E) F) G)") - it = TreeIterator(t) - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - it.reset() - expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" - found = self.toString(it) - self.assertEqual(expecting, found) - - - def toString(self, it): - buf = [] - for n in it: - buf.append(str(n)) - - return ' '.join(buf) - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtreewizard.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtreewizard.py deleted file mode 100644 index 5ffaa4db..00000000 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testtreewizard.py +++ /dev/null @@ -1,689 +0,0 @@ - -from io import StringIO -import os -import unittest - -from antlr3.tree import CommonTreeAdaptor, CommonTree, INVALID_TOKEN_TYPE -from antlr3.treewizard import TreeWizard, computeTokenTypes, \ - TreePatternLexer, EOF, ID, BEGIN, END, PERCENT, COLON, DOT, ARG, \ - TreePatternParser, \ - TreePattern, WildcardTreePattern, TreePatternTreeAdaptor - - -class TestComputeTokenTypes(unittest.TestCase): - """Test case for the computeTokenTypes function.""" - - def testNone(self): - """computeTokenTypes(None) -> {}""" - - typeMap = computeTokenTypes(None) - self.assertIsInstance(typeMap, dict) - self.assertEqual(typeMap, {}) - - - def testList(self): - """computeTokenTypes(['a', 'b']) -> { 'a': 0, 'b': 1 }""" - - typeMap = computeTokenTypes(['a', 'b']) - self.assertIsInstance(typeMap, dict) - self.assertEqual(typeMap, { 'a': 0, 'b': 1 }) - - -class TestTreePatternLexer(unittest.TestCase): - """Test case for the TreePatternLexer class.""" - - def testBegin(self): - """TreePatternLexer(): '('""" - - lexer = TreePatternLexer('(') - type = lexer.nextToken() - self.assertEqual(type, BEGIN) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testEnd(self): - """TreePatternLexer(): ')'""" - - lexer = TreePatternLexer(')') - type = lexer.nextToken() - self.assertEqual(type, END) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testPercent(self): - """TreePatternLexer(): '%'""" - - lexer = TreePatternLexer('%') - type = lexer.nextToken() - self.assertEqual(type, PERCENT) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testDot(self): - """TreePatternLexer(): '.'""" - - lexer = TreePatternLexer('.') - type = lexer.nextToken() - self.assertEqual(type, DOT) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testColon(self): - """TreePatternLexer(): ':'""" - - lexer = TreePatternLexer(':') - type = lexer.nextToken() - self.assertEqual(type, COLON) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testEOF(self): - """TreePatternLexer(): EOF""" - - lexer = TreePatternLexer(' \n \r \t ') - type = lexer.nextToken() - self.assertEqual(type, EOF) - self.assertEqual(lexer.sval, '') - self.assertFalse(lexer.error) - - - def testID(self): - """TreePatternLexer(): ID""" - - lexer = TreePatternLexer('_foo12_bar') - type = lexer.nextToken() - self.assertEqual(type, ID) - self.assertEqual(lexer.sval, '_foo12_bar') - self.assertFalse(lexer.error) - - - def testARG(self): - """TreePatternLexer(): ARG""" - - lexer = TreePatternLexer(r'[ \]bla\n]') - type = lexer.nextToken() - self.assertEqual(type, ARG) - self.assertEqual(lexer.sval, r' ]bla\n') - self.assertFalse(lexer.error) - - - def testError(self): - """TreePatternLexer(): error""" - - lexer = TreePatternLexer('1') - type = lexer.nextToken() - self.assertEqual(type, EOF) - self.assertEqual(lexer.sval, '') - self.assertTrue(lexer.error) - - -class TestTreePatternParser(unittest.TestCase): - """Test case for the TreePatternParser class.""" - - def setUp(self): - """Setup text fixure - - We need a tree adaptor, use CommonTreeAdaptor. - And a constant list of token names. - - """ - - self.adaptor = CommonTreeAdaptor() - self.tokens = [ - "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" - ] - self.wizard = TreeWizard(self.adaptor, tokenNames=self.tokens) - - - def testSingleNode(self): - """TreePatternParser: 'ID'""" - lexer = TreePatternLexer('ID') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsInstance(tree, CommonTree) - self.assertEqual(tree.getType(), 10) - self.assertEqual(tree.getText(), 'ID') - - - def testSingleNodeWithArg(self): - """TreePatternParser: 'ID[foo]'""" - lexer = TreePatternLexer('ID[foo]') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsInstance(tree, CommonTree) - self.assertEqual(tree.getType(), 10) - self.assertEqual(tree.getText(), 'foo') - - - def testSingleLevelTree(self): - """TreePatternParser: '(A B)'""" - lexer = TreePatternLexer('(A B)') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsInstance(tree, CommonTree) - self.assertEqual(tree.getType(), 5) - self.assertEqual(tree.getText(), 'A') - self.assertEqual(tree.getChildCount(), 1) - self.assertEqual(tree.getChild(0).getType(), 6) - self.assertEqual(tree.getChild(0).getText(), 'B') - - - def testNil(self): - """TreePatternParser: 'nil'""" - lexer = TreePatternLexer('nil') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsInstance(tree, CommonTree) - self.assertEqual(tree.getType(), 0) - self.assertIsNone(tree.getText()) - - - def testWildcard(self): - """TreePatternParser: '(.)'""" - lexer = TreePatternLexer('(.)') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsInstance(tree, WildcardTreePattern) - - - def testLabel(self): - """TreePatternParser: '(%a:A)'""" - lexer = TreePatternLexer('(%a:A)') - parser = TreePatternParser(lexer, self.wizard, TreePatternTreeAdaptor()) - tree = parser.pattern() - self.assertIsInstance(tree, TreePattern) - self.assertEqual(tree.label, 'a') - - - def testError1(self): - """TreePatternParser: ')'""" - lexer = TreePatternLexer(')') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsNone(tree) - - - def testError2(self): - """TreePatternParser: '()'""" - lexer = TreePatternLexer('()') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsNone(tree) - - - def testError3(self): - """TreePatternParser: '(A ])'""" - lexer = TreePatternLexer('(A ])') - parser = TreePatternParser(lexer, self.wizard, self.adaptor) - tree = parser.pattern() - self.assertIsNone(tree) - - -class TestTreeWizard(unittest.TestCase): - """Test case for the TreeWizard class.""" - - def setUp(self): - """Setup text fixure - - We need a tree adaptor, use CommonTreeAdaptor. - And a constant list of token names. - - """ - - self.adaptor = CommonTreeAdaptor() - self.tokens = [ - "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" - ] - - - def testInit(self): - """TreeWizard.__init__()""" - - wiz = TreeWizard( - self.adaptor, - tokenNames=['a', 'b'] - ) - - self.assertIs(wiz.adaptor, self.adaptor) - self.assertEqual( - wiz.tokenNameToTypeMap, - { 'a': 0, 'b': 1 } - ) - - - def testGetTokenType(self): - """TreeWizard.getTokenType()""" - - wiz = TreeWizard( - self.adaptor, - tokenNames=self.tokens - ) - - self.assertEqual( - wiz.getTokenType('A'), - 5 - ) - - self.assertEqual( - wiz.getTokenType('VAR'), - 11 - ) - - self.assertEqual( - wiz.getTokenType('invalid'), - INVALID_TOKEN_TYPE - ) - - def testSingleNode(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("ID") - found = t.toStringTree() - expecting = "ID" - self.assertEqual(expecting, found) - - - def testSingleNodeWithArg(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("ID[foo]") - found = t.toStringTree() - expecting = "foo" - self.assertEqual(expecting, found) - - - def testSingleNodeTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A)") - found = t.toStringTree() - expecting = "A" - self.assertEqual(expecting, found) - - - def testSingleLevelTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C D)") - found = t.toStringTree() - expecting = "(A B C D)" - self.assertEqual(expecting, found) - - - def testListTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - found = t.toStringTree() - expecting = "A B C" - self.assertEqual(expecting, found) - - - def testInvalidListTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A B C") - self.assertIsNone(t) - - - def testDoubleLevelTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A (B C) (B D) E)") - found = t.toStringTree() - expecting = "(A (B C) (B D) E)" - self.assertEqual(expecting, found) - - - def __simplifyIndexMap(self, indexMap): - return dict( # stringify nodes for easy comparing - (ttype, [str(node) for node in nodes]) - for ttype, nodes in indexMap.items() - ) - - def testSingleNodeIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("ID") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 10: ["ID"] } - self.assertEqual(expecting, found) - - - def testNoRepeatsIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C D)") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 8:['D'], 6:['B'], 7:['C'], 5:['A'] } - self.assertEqual(expecting, found) - - - def testRepeatsIndex(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - indexMap = wiz.index(tree) - found = self.__simplifyIndexMap(indexMap) - expecting = { 8: ['D', 'D'], 6: ['B', 'B', 'B'], 7: ['C'], 5: ['A', 'A'] } - self.assertEqual(expecting, found) - - - def testNoRepeatsVisit(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B'] - self.assertEqual(expecting, elements) - - - def testNoRepeatsVisit2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("C"), visitor) - - expecting = ['C'] - self.assertEqual(expecting, elements) - - - def testRepeatsVisit(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B', 'B', 'B'] - self.assertEqual(expecting, elements) - - - def testRepeatsVisit2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append(str(node)) - - wiz.visit(tree, wiz.getTokenType("A"), visitor) - - expecting = ['A', 'A'] - self.assertEqual(expecting, elements) - - - def testRepeatsVisitWithContext(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append('{}@{}[{}]'.format(node, parent, childIndex)) - - wiz.visit(tree, wiz.getTokenType("B"), visitor) - - expecting = ['B@A[0]', 'B@A[1]', 'B@A[2]'] - self.assertEqual(expecting, elements) - - - def testRepeatsVisitWithNullParentAndContext(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B (A C B) B D D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '{}@{}[{}]'.format( - node, parent or 'nil', childIndex) - ) - - wiz.visit(tree, wiz.getTokenType("A"), visitor) - - expecting = ['A@nil[0]', 'A@A[1]'] - self.assertEqual(expecting, elements) - - - def testVisitPattern(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A B) D)") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - str(node) - ) - - wiz.visit(tree, '(A B)', visitor) - - expecting = ['A'] # shouldn't match overall root, just (A B) - self.assertEqual(expecting, elements) - - - def testVisitPatternMultiple(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A B) (D (A B)))") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '{}@{}[{}]'.format(node, parent or 'nil', childIndex) - ) - - wiz.visit(tree, '(A B)', visitor) - - expecting = ['A@A[2]', 'A@D[0]'] - self.assertEqual(expecting, elements) - - - def testVisitPatternMultipleWithLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - tree = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - - elements = [] - def visitor(node, parent, childIndex, labels): - elements.append( - '{}@{}[{}]{}&{}'.format( - node, - parent or 'nil', - childIndex, - labels['a'], - labels['b'], - ) - ) - - wiz.visit(tree, '(%a:A %b:B)', visitor) - - expecting = ['foo@A[2]foo&bar', 'big@D[0]big&dog'] - self.assertEqual(expecting, elements) - - - def testParse(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A B C)") - self.assertTrue(valid) - - - def testParseSingleNode(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A") - valid = wiz.parse(t, "A") - self.assertTrue(valid) - - - def testParseSingleNodeFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("A") - valid = wiz.parse(t, "B") - self.assertFalse(valid) - - - def testParseFlatTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B C)") - self.assertTrue(valid) - - - def testParseFlatTreeFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B)") - self.assertFalse(valid) - - - def testParseFlatTreeFails2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(nil A B C)") - valid = wiz.parse(t, "(nil A B A)") - self.assertFalse(valid) - - - def testWildcard(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A . .)") - self.assertTrue(valid) - - - def testParseWithText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[foo] C[bar])") - # C pattern has no text arg so despite [bar] in t, no need - # to match text--check structure only. - valid = wiz.parse(t, "(A B[foo] C)") - self.assertTrue(valid) - - - def testParseWithText2(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[T__32] (C (D E[a])))") - # C pattern has no text arg so despite [bar] in t, no need - # to match text--check structure only. - valid = wiz.parse(t, "(A B[foo] C)") - self.assertEqual("(A T__32 (C (D a)))", t.toStringTree()) - - - def testParseWithTextFails(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - valid = wiz.parse(t, "(A[foo] B C)") - self.assertFalse(valid) # fails - - - def testParseLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - labels = {} - valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels) - self.assertTrue(valid) - self.assertEqual("A", str(labels["a"])) - self.assertEqual("B", str(labels["b"])) - self.assertEqual("C", str(labels["c"])) - - - def testParseWithWildcardLabels(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C)") - labels = {} - valid = wiz.parse(t, "(A %b:. %c:.)", labels) - self.assertTrue(valid) - self.assertEqual("B", str(labels["b"])) - self.assertEqual("C", str(labels["c"])) - - - def testParseLabelsAndTestText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B[foo] C)") - labels = {} - valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels) - self.assertTrue(valid) - self.assertEqual("A", str(labels["a"])) - self.assertEqual("foo", str(labels["b"])) - self.assertEqual("C", str(labels["c"])) - - - def testParseLabelsInNestedTree(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A (B C) (D E))") - labels = {} - valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels) - self.assertTrue(valid) - self.assertEqual("A", str(labels["a"])) - self.assertEqual("B", str(labels["b"])) - self.assertEqual("C", str(labels["c"])) - self.assertEqual("D", str(labels["d"])) - self.assertEqual("E", str(labels["e"])) - - - def testEquals(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B C)") - same = wiz.equals(t1, t2) - self.assertTrue(same) - - - def testEqualsWithText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B[foo] C)") - t2 = wiz.create("(A B[foo] C)") - same = wiz.equals(t1, t2) - self.assertTrue(same) - - - def testEqualsWithMismatchedText(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B[foo] C)") - t2 = wiz.create("(A B C)") - same = wiz.equals(t1, t2) - self.assertFalse(same) - - - def testEqualsWithMismatchedList(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B A)") - same = wiz.equals(t1, t2) - self.assertFalse(same) - - - def testEqualsWithMismatchedListLength(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t1 = wiz.create("(A B C)") - t2 = wiz.create("(A B)") - same = wiz.equals(t1, t2) - self.assertFalse(same) - - - def testFindPattern(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - subtrees = wiz.find(t, "(A B)") - found = [str(node) for node in subtrees] - expecting = ['foo', 'big'] - self.assertEqual(expecting, found) - - - def testFindTokenType(self): - wiz = TreeWizard(self.adaptor, self.tokens) - t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") - subtrees = wiz.find(t, wiz.getTokenType('A')) - found = [str(node) for node in subtrees] - expecting = ['A', 'foo', 'big'] - self.assertEqual(expecting, found) - - - -if __name__ == "__main__": - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/thirdparty/plexxi_setup.sh b/thirdparty/plexxi_setup.sh deleted file mode 100644 index 919bd832..00000000 --- a/thirdparty/plexxi_setup.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -# Replace 2.0.2-sp2 with your respective plexxicore version -# This can be checked at http://PlexxiCoreURL:8080/PlexxiCore/ -pip install -v --index-url http://pypi.plexxi.com plexxi==2.0.2-sp2 diff --git a/tools/abandon_old_reviews.sh b/tools/abandon_old_reviews.sh deleted file mode 100755 index f32c4e05..00000000 --- a/tools/abandon_old_reviews.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# before you run this modify your .ssh/config to create a -# review.openstack.org entry: -# -# Host review.openstack.org -# User -# Port 29418 -# - -# Note: due to gerrit bug somewhere, this double posts messages. :( - -# first purge the all reviews that are more than 4w old and blocked by a core -2 - -set -o errexit - -function abandon_review { - local gitid=$1 - shift - local msg=$@ - echo "Abandoning $gitid" - # echo ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\" - ssh review.openstack.org -p 29418 gerrit review $gitid --abandon --message \"$msg\" -} - -PROJECTS="(project:openstack/congress OR project:openstack/python-openstackclient)" - -blocked_reviews=$(ssh review.openstack.org -p 29418 "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') - -blocked_msg=$(cat < 4 weeks without comment and currently blocked by a -core reviewer with a -2. We are abandoning this for now. - -Feel free to reactivate the review by pressing the restore button and -contacting the reviewer with the -2 on this review to ensure you -address their concerns. - -EOF -) - -# For testing, put in a git rev of something you own and uncomment -# blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f" - -for review in $blocked_reviews; do - # echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\" - echo "Blocked review $review" - abandon_review $review $blocked_msg -done - -# then purge all the reviews that are > 4w with no changes and Jenkins has -1ed - -failing_reviews=$(ssh review.openstack.org -p 29418 "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') - -failing_msg=$(cat < 4 weeks without comment, and failed Jenkins the last -time it was checked. We are abandoning this for now. - -Feel free to reactivate the review by pressing the restore button and -leaving a 'recheck' comment to get fresh test results. - -EOF -) - -for review in $failing_reviews; do - echo "Failing review $review" - abandon_review $review $failing_msg -done diff --git a/tools/install_venv.py b/tools/install_venv.py deleted file mode 100644 index 42b155fb..00000000 --- a/tools/install_venv.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2010 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -import install_venv_common as install_venv # noqa - - -def print_help(venv, root): - help = """ - OpenStack development environment setup is complete. - - OpenStack development uses virtualenv to track and manage Python - dependencies while in development and testing. - - To activate the OpenStack virtualenv for the extent of your current shell - session you can run: - - $ source %s/bin/activate - - Or, if you prefer, you can run commands in the virtualenv on a case by case - basis by running: - - $ %s/tools/with_venv.sh - - Also, make test will automatically use the virtualenv. - """ - print(help % (venv, root)) - - -def main(argv): - root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - - if os.environ.get('tools_path'): - root = os.environ['tools_path'] - venv = os.path.join(root, '.venv') - if os.environ.get('VENV'): - venv = os.environ['VENV'] - - pip_requires = os.path.join(root, 'requirements.txt') - test_requires = os.path.join(root, 'test-requirements.txt') - py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - project = 'OpenStack' - install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, - py_version, project) - options = install.parse_args(argv) - install.check_python_version() - install.check_dependencies() - install.create_virtualenv(no_site_packages=options.no_site_packages) - install.install_dependencies() - print_help(venv, root) - -if __name__ == '__main__': - main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py deleted file mode 100644 index e279159a..00000000 --- a/tools/install_venv_common.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides methods needed by installation script for OpenStack development -virtual environments. - -Since this script is used to bootstrap a virtualenv from the system's Python -environment, it should be kept strictly compatible with Python 2.6. - -Synced in from openstack-common -""" - -from __future__ import print_function - -import optparse -import os -import subprocess -import sys - - -class InstallVenv(object): - - def __init__(self, root, venv, requirements, - test_requirements, py_version, - project): - self.root = root - self.venv = venv - self.requirements = requirements - self.test_requirements = test_requirements - self.py_version = py_version - self.project = project - - def die(self, message, *args): - print(message % args, file=sys.stderr) - sys.exit(1) - - def check_python_version(self): - if sys.version_info < (2, 6): - self.die("Need Python Version >= 2.6") - - def run_command_with_code(self, cmd, redirect_output=True, - check_exit_code=True): - """Runs a command in an out-of-process shell. - - Returns the output of that command. Working directory is self.root. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None - - proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return (output, proc.returncode) - - def run_command(self, cmd, redirect_output=True, check_exit_code=True): - return self.run_command_with_code(cmd, redirect_output, - check_exit_code)[0] - - def get_distro(self): - if (os.path.exists('/etc/fedora-release') or - os.path.exists('/etc/redhat-release')): - return Fedora( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - else: - return Distro( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - - def check_dependencies(self): - self.get_distro().install_virtualenv() - - def create_virtualenv(self, no_site_packages=True): - """Creates the virtual environment and installs PIP. - - Creates the virtual environment and installs PIP only into the - virtual environment. - """ - if not os.path.isdir(self.venv): - print('Creating venv...', end=' ') - if no_site_packages: - self.run_command(['virtualenv', '-q', '--no-site-packages', - self.venv]) - else: - self.run_command(['virtualenv', '-q', self.venv]) - print('done.') - else: - print("venv already exists...") - pass - - def pip_install(self, *args): - self.run_command(['tools/with_venv.sh', - 'pip', 'install', '--upgrade'] + list(args), - redirect_output=False) - - def install_dependencies(self): - print('Installing dependencies with pip (this can take a while)...') - - # First things first, make sure our venv has the latest pip and - # setuptools and pbr - self.pip_install('pip>=1.4') - self.pip_install('setuptools') - self.pip_install('pbr') - - self.pip_install('-r', self.requirements, '-r', self.test_requirements) - - def parse_args(self, argv): - """Parses command-line arguments.""" - parser = optparse.OptionParser() - parser.add_option('-n', '--no-site-packages', - action='store_true', - help="Do not inherit packages from global Python " - "install.") - return parser.parse_args(argv[1:])[0] - - -class Distro(InstallVenv): - - def check_cmd(self, cmd): - return bool(self.run_command(['which', cmd], - check_exit_code=False).strip()) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if self.check_cmd('easy_install'): - print('Installing virtualenv via easy_install...', end=' ') - if self.run_command(['easy_install', 'virtualenv']): - print('Succeeded') - return - else: - print('Failed') - - self.die('ERROR: virtualenv not found.\n\n%s development' - ' requires virtualenv, please install it using your' - ' favorite package management tool' % self.project) - - -class Fedora(Distro): - """This covers all Fedora-based distributions. - - Includes: Fedora, RHEL, CentOS, Scientific Linux - """ - - def check_pkg(self, pkg): - return self.run_command_with_code(['rpm', '-q', pkg], - check_exit_code=False)[1] == 0 - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if not self.check_pkg('python-virtualenv'): - self.die("Please install 'python-virtualenv'.") - - super(Fedora, self).install_virtualenv() diff --git a/tools/pip-install-single-req.sh b/tools/pip-install-single-req.sh deleted file mode 100755 index b2ef4ffc..00000000 --- a/tools/pip-install-single-req.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# install specific package $2 according to -# version specified in requirements file $1 -pip install -U `grep $2 $1 | sed 's/#.*//'` diff --git a/tools/with_venv.sh b/tools/with_venv.sh deleted file mode 100755 index 7303990b..00000000 --- a/tools/with_venv.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)} -VENV_PATH=${VENV_PATH:-${TOOLS_PATH}} -VENV_DIR=${VENV_NAME:-/../.venv} -TOOLS=${TOOLS_PATH} -VENV=${VENV:-${VENV_PATH}/${VENV_DIR}} -source ${VENV}/bin/activate && "$@" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 2b9f0ee5..00000000 --- a/tox.ini +++ /dev/null @@ -1,79 +0,0 @@ -[tox] -minversion = 1.6 -skipsdist = True -envlist = py35,py27,pep8 - -[testenv] -usedevelop = True - -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -U {opts} {packages} -whitelist_externals = find - rm -setenv = - VIRTUAL_ENV={envdir} - LANG=en_US.UTF-8 - LANGUAGE=en_US - LC_ALL=en_US.utf-8 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - find . -type f -name "*.py[c|o]" -delete - python setup.py testr --slowest --testr-args='{posargs} --concurrency=1' - -[testenv:pep8] -usedevelop = False -deps = -commands = {toxinidir}/tools/pip-install-single-req.sh test-requirements.txt hacking - flake8 - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -commands = - coverage erase - find . -type f -name "*.pyc" -delete - python setup.py testr --coverage --testr-args='^(?!congress\.tests\.haht\.test_congress_haht.*){posargs}' - coverage report - -[testenv:debug] -commands = oslo_debug_helper -t congress/tests {posargs} - -[testenv:bench] -setenv = - TEST_BENCHMARK=true - VIRTUAL_ENV={envdir} - LANG=en_US.UTF-8 - LANGUAGE=en_US - LC_ALL=en_US.utf-8 -commands = python setup.py testr --testr-args='test_benchmark {posargs} --concurrency=1' - testr slowest --all - -[flake8] -show-source = True -builtins = _ -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*thirdparty/*,CongressLexer.py,CongressParser.py,contrib/* - -[testenv:genconfig] -deps = -commands = {toxinidir}/tools/pip-install-single-req.sh requirements.txt oslo.config - oslo-config-generator --config-file=etc/congress-config-generator.conf - -[testenv:docs] -setenv = PYTHONHASHSEED=0 -commands = rm -rf doc/build doc/source/api - python setup.py build_sphinx - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:bindep] -# Do not install any requirements. We want this to be fast and work even if -# system dependencies are missing, since it's used to tell you what system -# dependencies are missing! This also means that bindep must be installed -# separately, outside of the requirements files, and develop mode disabled -# explicitly to avoid unnecessarily installing the checked-out repo too (this -# further relies on "tox.skipsdist = True" above). -deps = bindep -commands = bindep test -usedevelop = False