From d61398b5d91fb366c26e87e7b07b5cb6491ebd0b Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 10 May 2024 18:08:31 -0700 Subject: [PATCH] Retire ec2-api: remove repo content ec2-api project is retiring - https://review.opendev.org/c/openstack/governance/+/919394/1 this commit remove the content of this project repo Depends-On: https://review.opendev.org/c/openstack/project-config/+/919396/1 Change-Id: I671d27260e11ec0ae3488acf561bbdaa73a29a60 --- .gitignore | 24 - .stestr.conf | 3 - .zuul.yaml | 12 - CONTRIBUTING.rst | 19 - HACKING.rst | 43 - LICENSE | 176 -- README.rst | 706 +---- api-ref/source/conf.py | 221 -- api-ref/source/index.rst | 13 - api-ref/source/supported_features.inc | 503 ---- devstack/README.rst | 16 - devstack/create_config | 306 --- devstack/override-defaults | 2 - devstack/plugin.sh | 316 --- devstack/settings | 9 - doc/requirements.txt | 7 - doc/source/conf.py | 82 - .../_flagmappings/ec2api.flagmappings | 143 - .../_flagmappings/ec2api.headers | 7 - doc/source/configuration/api.rst | 18 - doc/source/configuration/index.rst | 19 - doc/source/configuration/metadata.rst | 16 - .../configuration/tables/ec2api-clients.inc | 37 - .../configuration/tables/ec2api-common.inc | 41 - .../configuration/tables/ec2api-database.inc | 25 - .../configuration/tables/ec2api-ec2.inc | 69 - .../configuration/tables/ec2api-metadata.inc | 76 - doc/source/configuration/tables/ec2api-s3.inc | 37 - .../configuration/tables/ec2api-service.inc | 49 - doc/source/contributor/contributing.rst | 47 - doc/source/hacking.rst | 1 - doc/source/index.rst | 69 - doc/source/install/configuration.rst | 66 - doc/source/install/credentials-creation.rst | 24 - doc/source/install/database-creation.rst | 31 - doc/source/install/endpoints-creation.rst | 13 - doc/source/install/index.rst | 27 - doc/source/install/install-devstack.rst | 10 - doc/source/install/install-manual.rst | 47 - doc/source/install/install-sh.rst | 38 - doc/source/install/install-ubuntu.rst | 19 - doc/source/install/metadata-configuration.rst | 29 - doc/source/install/next-steps.rst | 8 - doc/source/install/verify.rst | 52 - ec2api/__init__.py | 26 - ec2api/api/__init__.py | 398 --- ec2api/api/address.py | 459 ---- ec2api/api/apirequest.py | 101 - ec2api/api/auth.py | 47 - ec2api/api/availability_zone.py | 214 -- ec2api/api/cloud.py | 2115 --------------- ec2api/api/common.py | 534 ---- ec2api/api/customer_gateway.py | 88 - ec2api/api/dhcp_options.py | 186 -- ec2api/api/ec2utils.py | 560 ---- ec2api/api/faults.py | 69 - ec2api/api/image.py | 1100 -------- ec2api/api/instance.py | 1716 ------------ ec2api/api/internet_gateway.py | 147 -- ec2api/api/key_pair.py | 151 -- ec2api/api/network_interface.py | 586 ----- ec2api/api/opts.py | 39 - ec2api/api/route_table.py | 686 ----- ec2api/api/security_group.py | 589 ----- ec2api/api/snapshot.py | 155 -- ec2api/api/subnet.py | 209 -- ec2api/api/tag.py | 126 - ec2api/api/validator.py | 230 -- ec2api/api/volume.py | 252 -- ec2api/api/vpc.py | 200 -- ec2api/api/vpn_connection.py | 500 ---- ec2api/api/vpn_gateway.py | 225 -- ec2api/clients.py | 163 -- ec2api/cmd/__init__.py | 17 - ec2api/cmd/api.py | 40 - ec2api/cmd/api_metadata.py | 40 - ec2api/cmd/api_s3.py | 39 - ec2api/cmd/manage.py | 69 - ec2api/config.py | 44 - ec2api/context.py | 144 - ec2api/db/__init__.py | 19 - ec2api/db/api.py | 132 - ec2api/db/migration.py | 77 - ec2api/db/sqlalchemy/__init__.py | 0 ec2api/db/sqlalchemy/api.py | 337 --- ec2api/db/sqlalchemy/migrate_repo/README | 4 - ec2api/db/sqlalchemy/migrate_repo/__init__.py | 0 ec2api/db/sqlalchemy/migrate_repo/manage.py | 19 - ec2api/db/sqlalchemy/migrate_repo/migrate.cfg | 20 - .../migrate_repo/versions/001_juno.py | 59 - .../migrate_repo/versions/__init__.py | 0 ec2api/db/sqlalchemy/migration.py | 86 - ec2api/db/sqlalchemy/models.py | 62 - ec2api/exception.py | 498 ---- ec2api/hacking/__init__.py | 0 ec2api/hacking/checks.py | 51 - ec2api/i18n.py | 36 - ec2api/metadata/__init__.py | 284 -- ec2api/metadata/api.py | 310 --- ec2api/metadata/opts.py | 24 - ec2api/opts.py | 59 - ec2api/paths.py | 31 - ec2api/s3/__init__.py | 24 - ec2api/s3/opts.py | 24 - ec2api/s3/s3server.py | 387 --- ec2api/service.py | 173 -- ec2api/tests/__init__.py | 0 ec2api/tests/botocoreclient.py | 45 - ec2api/tests/unit/__init__.py | 31 - ec2api/tests/unit/abs.tar.gz | Bin 153 -> 0 bytes ec2api/tests/unit/base.py | 338 --- ec2api/tests/unit/fakes.py | 2307 ----------------- ec2api/tests/unit/fakes_request_response.py | 312 --- ec2api/tests/unit/matchers.py | 564 ---- ec2api/tests/unit/rel.tar.gz | Bin 165 -> 0 bytes ec2api/tests/unit/test_address.py | 679 ----- ec2api/tests/unit/test_api_init.py | 109 - ec2api/tests/unit/test_apirequest.py | 135 - ec2api/tests/unit/test_availability_zone.py | 95 - ec2api/tests/unit/test_clients.py | 128 - ec2api/tests/unit/test_common.py | 140 - ec2api/tests/unit/test_context.py | 64 - ec2api/tests/unit/test_customer_gateway.py | 126 - ec2api/tests/unit/test_db_api.py | 459 ---- ec2api/tests/unit/test_dhcp_options.py | 181 -- ec2api/tests/unit/test_ec2_validate.py | 275 -- ec2api/tests/unit/test_ec2utils.py | 610 ----- ec2api/tests/unit/test_faults.py | 57 - ec2api/tests/unit/test_hacking.py | 29 - ec2api/tests/unit/test_image.py | 985 ------- ec2api/tests/unit/test_instance.py | 2137 --------------- ec2api/tests/unit/test_integrated_scenario.py | 311 --- ec2api/tests/unit/test_internet_gateway.py | 260 -- ec2api/tests/unit/test_key_pair.py | 106 - ec2api/tests/unit/test_metadata.py | 410 --- ec2api/tests/unit/test_metadata_api.py | 346 --- ec2api/tests/unit/test_middleware.py | 201 -- ec2api/tests/unit/test_network_interface.py | 679 ----- ec2api/tests/unit/test_private_key.pem | 27 - ec2api/tests/unit/test_route_table.py | 1162 --------- ec2api/tests/unit/test_s3.py | 120 - ec2api/tests/unit/test_security_group.py | 571 ---- ec2api/tests/unit/test_snapshot.py | 152 -- ec2api/tests/unit/test_subnet.py | 318 --- ec2api/tests/unit/test_tag.py | 207 -- ec2api/tests/unit/test_tools.py | 92 - ec2api/tests/unit/test_volume.py | 224 -- ec2api/tests/unit/test_vpc.py | 395 --- ec2api/tests/unit/test_vpn_connection.py | 832 ------ ec2api/tests/unit/test_vpn_gateway.py | 444 ---- ec2api/tests/unit/tools.py | 263 -- ec2api/utils.py | 55 - ec2api/version.py | 17 - ec2api/wsgi.py | 515 ---- etc/ec2api/README-ec2api.conf.txt | 4 - etc/ec2api/api-paste.ini | 39 - etc/ec2api/ec2api-config-generator.conf | 13 - install.sh | 369 --- .../notes/drop-py-2-7-a4b96d486289a772.yaml | 6 - ...-volumev3-by-default-fa726fed293d94bb.yaml | 6 - requirements.txt | 37 - setup.cfg | 51 - setup.py | 20 - test-requirements.txt | 14 - tools/colorizer.py | 329 --- tools/db/ec2api-db-setup | 318 --- tools/db/import-nova-ec2-data.sql | 33 - tools/db/schema_diff.py | 268 -- tools/update-from-global-requirements.sh | 35 - tox.ini | 65 - 170 files changed, 8 insertions(+), 37568 deletions(-) delete mode 100644 .gitignore delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 api-ref/source/conf.py delete mode 100644 api-ref/source/index.rst delete mode 100644 api-ref/source/supported_features.inc delete mode 100644 devstack/README.rst delete mode 100755 devstack/create_config delete mode 100644 devstack/override-defaults delete mode 100755 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/configuration/_flagmappings/ec2api.flagmappings delete mode 100644 doc/source/configuration/_flagmappings/ec2api.headers delete mode 100644 doc/source/configuration/api.rst delete mode 100644 doc/source/configuration/index.rst delete mode 100644 doc/source/configuration/metadata.rst delete mode 100644 doc/source/configuration/tables/ec2api-clients.inc delete mode 100644 doc/source/configuration/tables/ec2api-common.inc delete mode 100644 doc/source/configuration/tables/ec2api-database.inc delete mode 100644 doc/source/configuration/tables/ec2api-ec2.inc delete mode 100644 doc/source/configuration/tables/ec2api-metadata.inc delete mode 100644 doc/source/configuration/tables/ec2api-s3.inc delete mode 100644 doc/source/configuration/tables/ec2api-service.inc delete mode 100644 doc/source/contributor/contributing.rst delete mode 100644 doc/source/hacking.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/configuration.rst delete mode 100644 doc/source/install/credentials-creation.rst delete mode 100644 doc/source/install/database-creation.rst delete mode 100644 doc/source/install/endpoints-creation.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install-devstack.rst delete mode 100644 doc/source/install/install-manual.rst delete mode 100644 doc/source/install/install-sh.rst delete mode 100644 doc/source/install/install-ubuntu.rst delete mode 100644 doc/source/install/metadata-configuration.rst delete mode 100644 doc/source/install/next-steps.rst delete mode 100644 doc/source/install/verify.rst delete mode 100644 ec2api/__init__.py delete mode 100644 ec2api/api/__init__.py delete mode 100644 ec2api/api/address.py delete mode 100644 ec2api/api/apirequest.py delete mode 100644 ec2api/api/auth.py delete mode 100644 ec2api/api/availability_zone.py delete mode 100644 ec2api/api/cloud.py delete mode 100644 ec2api/api/common.py delete mode 100644 ec2api/api/customer_gateway.py delete mode 100644 ec2api/api/dhcp_options.py delete mode 100644 ec2api/api/ec2utils.py delete mode 100644 ec2api/api/faults.py delete mode 100644 ec2api/api/image.py delete mode 100644 ec2api/api/instance.py delete mode 100644 ec2api/api/internet_gateway.py delete mode 100644 ec2api/api/key_pair.py delete mode 100644 ec2api/api/network_interface.py delete mode 100644 ec2api/api/opts.py delete mode 100644 ec2api/api/route_table.py delete mode 100644 ec2api/api/security_group.py delete mode 100644 ec2api/api/snapshot.py delete mode 100644 ec2api/api/subnet.py delete mode 100644 ec2api/api/tag.py delete mode 100644 ec2api/api/validator.py delete mode 100644 ec2api/api/volume.py delete mode 100644 ec2api/api/vpc.py delete mode 100644 ec2api/api/vpn_connection.py delete mode 100644 ec2api/api/vpn_gateway.py delete mode 100644 ec2api/clients.py delete mode 100644 ec2api/cmd/__init__.py delete mode 100644 ec2api/cmd/api.py delete mode 100644 ec2api/cmd/api_metadata.py delete mode 100644 ec2api/cmd/api_s3.py delete mode 100644 ec2api/cmd/manage.py delete mode 100644 ec2api/config.py delete mode 100644 ec2api/context.py delete mode 100644 ec2api/db/__init__.py delete mode 100644 ec2api/db/api.py delete mode 100644 ec2api/db/migration.py delete mode 100644 ec2api/db/sqlalchemy/__init__.py delete mode 100644 ec2api/db/sqlalchemy/api.py delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/README delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/__init__.py delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/manage.py delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/migrate.cfg delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/versions/001_juno.py delete mode 100644 ec2api/db/sqlalchemy/migrate_repo/versions/__init__.py delete mode 100644 ec2api/db/sqlalchemy/migration.py delete mode 100644 ec2api/db/sqlalchemy/models.py delete mode 100644 ec2api/exception.py delete mode 100644 ec2api/hacking/__init__.py delete mode 100644 ec2api/hacking/checks.py delete mode 100644 ec2api/i18n.py delete mode 100644 ec2api/metadata/__init__.py delete mode 100644 ec2api/metadata/api.py delete mode 100644 ec2api/metadata/opts.py delete mode 100644 ec2api/opts.py delete mode 100644 ec2api/paths.py delete mode 100644 ec2api/s3/__init__.py delete mode 100644 ec2api/s3/opts.py delete mode 100644 ec2api/s3/s3server.py delete mode 100644 ec2api/service.py delete mode 100644 ec2api/tests/__init__.py delete mode 100644 ec2api/tests/botocoreclient.py delete mode 100644 ec2api/tests/unit/__init__.py delete mode 100644 ec2api/tests/unit/abs.tar.gz delete mode 100644 ec2api/tests/unit/base.py delete mode 100644 ec2api/tests/unit/fakes.py delete mode 100644 ec2api/tests/unit/fakes_request_response.py delete mode 100644 ec2api/tests/unit/matchers.py delete mode 100644 ec2api/tests/unit/rel.tar.gz delete mode 100644 ec2api/tests/unit/test_address.py delete mode 100644 ec2api/tests/unit/test_api_init.py delete mode 100644 ec2api/tests/unit/test_apirequest.py delete mode 100644 ec2api/tests/unit/test_availability_zone.py delete mode 100644 ec2api/tests/unit/test_clients.py delete mode 100644 ec2api/tests/unit/test_common.py delete mode 100644 ec2api/tests/unit/test_context.py delete mode 100644 ec2api/tests/unit/test_customer_gateway.py delete mode 100644 ec2api/tests/unit/test_db_api.py delete mode 100644 ec2api/tests/unit/test_dhcp_options.py delete mode 100644 ec2api/tests/unit/test_ec2_validate.py delete mode 100644 ec2api/tests/unit/test_ec2utils.py delete mode 100644 ec2api/tests/unit/test_faults.py delete mode 100644 ec2api/tests/unit/test_hacking.py delete mode 100644 ec2api/tests/unit/test_image.py delete mode 100644 ec2api/tests/unit/test_instance.py delete mode 100644 ec2api/tests/unit/test_integrated_scenario.py delete mode 100644 ec2api/tests/unit/test_internet_gateway.py delete mode 100644 ec2api/tests/unit/test_key_pair.py delete mode 100644 ec2api/tests/unit/test_metadata.py delete mode 100644 ec2api/tests/unit/test_metadata_api.py delete mode 100644 ec2api/tests/unit/test_middleware.py delete mode 100644 ec2api/tests/unit/test_network_interface.py delete mode 100644 ec2api/tests/unit/test_private_key.pem delete mode 100644 ec2api/tests/unit/test_route_table.py delete mode 100644 ec2api/tests/unit/test_s3.py delete mode 100644 ec2api/tests/unit/test_security_group.py delete mode 100644 ec2api/tests/unit/test_snapshot.py delete mode 100644 ec2api/tests/unit/test_subnet.py delete mode 100644 ec2api/tests/unit/test_tag.py delete mode 100644 ec2api/tests/unit/test_tools.py delete mode 100644 ec2api/tests/unit/test_volume.py delete mode 100644 ec2api/tests/unit/test_vpc.py delete mode 100644 ec2api/tests/unit/test_vpn_connection.py delete mode 100644 ec2api/tests/unit/test_vpn_gateway.py delete mode 100644 ec2api/tests/unit/tools.py delete mode 100644 ec2api/utils.py delete mode 100644 ec2api/version.py delete mode 100644 ec2api/wsgi.py delete mode 100644 etc/ec2api/README-ec2api.conf.txt delete mode 100644 etc/ec2api/api-paste.ini delete mode 100644 etc/ec2api/ec2api-config-generator.conf delete mode 100755 install.sh delete mode 100644 releasenotes/notes/drop-py-2-7-a4b96d486289a772.yaml delete mode 100644 releasenotes/notes/use-volumev3-by-default-fa726fed293d94bb.yaml delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/colorizer.py delete mode 100755 tools/db/ec2api-db-setup delete mode 100644 tools/db/import-nova-ec2-data.sql delete mode 100755 tools/db/schema_diff.py delete mode 100755 tools/update-from-global-requirements.sh delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index f28d6292..00000000 --- a/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -*.pyc -*~ -etc/ec2api/ec2api.conf.sample -.project -.pydevproject -ec2_api.egg-info -.tox -.stestr -*.log -*.egg -*.swp -*.swo -build -dist -.testrepository -/functional_tests.conf* -/buckets -.venv -.coverage* -!.coveragerc -cover/ -.idea -ec2api/tests/unit/test_cert.pem -.DS_Store diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 4ec78db8..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./ec2api/tests/unit -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 3cd448d4..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,12 +0,0 @@ -- project: - queue: ec2-api - templates: - - check-requirements - - openstack-python3-jobs - - publish-openstack-docs-pti - check: - jobs: - - ec2api-tempest-plugin-functional - gate: - jobs: - - ec2api-tempest-plugin-functional diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 7c50a3d6..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/ec2-api - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Launchpad: - - https://bugs.launchpad.net/ec2-api - -For more specific information about contributing to this repository, see the -ec2-api contributor guide: - - https://docs.openstack.org/ec2-api/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 8dbfe564..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,43 +0,0 @@ -Ec2api Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - https://github.com/openstack-dev/hacking/blob/master/doc/source/index.rst -- Step 2: Read on - -Ec2api Specific Commandments ----------------------------- - -General -------- -- Do not use locals(). Example:: - - LOG.debug("volume %(vol_name)s: creating size %(vol_size)sG" % - locals()) # BAD - - LOG.debug("volume %(vol_name)s: creating size %(vol_size)sG" % - {'vol_name': vol_name, - 'vol_size': vol_size}) # OKAY - -- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: - - except Exception as e: - ... - raise e # BAD - - except Exception: - ... - raise # OKAY - - - -Creating Unit Tests -------------------- -For every new feature, unit tests should be created that both test and -(implicitly) document the usage of said feature. If submitting a patch for a -bug that had no unit test, a new passing unit test should be added. If a -submitted bug fix does have a unit test, be sure to add a new one that fails -without the patch and passes with the patch. - -For more information on creating unit tests and utilizing the testing -infrastructure in OpenStack Ec2api, please read ec2api/testing/README.rst. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 18be1eec..4ee2c5f1 100644 --- a/README.rst +++ b/README.rst @@ -1,700 +1,10 @@ -================= -OpenStack EC2 API -================= +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/ec2-api.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -Support of EC2 API for OpenStack. -This project provides a standalone EC2 API service which pursues two goals: - - 1. Implement VPC API - 2. Create a standalone service for EC2 API support. - -Installation ------------- - -For more detailed information, please see the `Installation Guide `_. - -Installation by install.sh -========================== - -Run install.sh - -The EC2 API service gets installed on port 8788 by default. It can be changed -before the installation in install.sh script. - -The services afterwards can be started as binaries: - -:: - - /usr/local/bin/ec2-api - /usr/local/bin/ec2-api-metadata - /usr/local/bin/ec2-api-s3 - -or set up as Linux services. - -Configuring OpenStack for EC2 API metadata service refering to section "EC2 metadata Configuration". - -Installation on devstack -======================== - -Installation in devstack: - -In order to install ec2-api with devstack the following should be added to -the local.conf or localrc the following line: - -:: - - enable_plugin ec2-api https://opendev.org/openstack/ec2-api - -Devstack installation with ec2-api and ec2api-tempest-plugin for development: - -1. install packages: awscli, git, python3, python3-devel, ruby -2. clone devstack repository - -:: - - git clone https://opendev.org/openstack/devstack - -3. grant all permissions for your user for directory: "/opt" -4. create folder "/opt/stack/logs/" -5. clone repository "ec2api-tempest-plugin" to stack folder: - -:: - - git clone https://github.com/openstack/ec2api-tempest-plugin /opt/stack/ec2api-tempest-plugin - -6. create local.conf: - -:: - - [[local|localrc]] - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=$ADMIN_PASSWORD - RABBIT_PASSWORD=$ADMIN_PASSWORD - SERVICE_PASSWORD=$ADMIN_PASSWORD - enable_plugin ec2-api https://opendev.org/openstack/ec2-api - enable_plugin neutron-tempest-plugin https://github.com/openstack/neutron-tempest-plugin - TEMPEST_PLUGINS='/opt/stack/ec2api-tempest-plugin' - -7. go to devstack folder and start installation - -:: - - cd ~/devstack/ - ./stack.sh - -8. check installed devstack - -:: - - source ~/devstack/accrc/admin/admin - tempest list-plugins - ps -aux | grep "ec2" - aws --endpoint-url http:// --region --profile admin ec2 describe-images - openstack catalog list - openstack flavor list - openstack image list - sudo journalctl -u devstack@ec2-api.service - -9. run integration tests (ec2 tempest test) - -:: - - cd /opt/stack/tempest - tox -eall -- ec2api_tempest_plugin --concurrency 1 - tox -eall ec2api_tempest_plugin.api.test_network_interfaces.NetworkInterfaceTest.test_create_max_network_interface - -10. run ec2-api unit tests - -:: - - cd /opt/stack/ec2-api - tox -epy36 ec2api.tests.unit.test_security_group.SecurityGroupTestCase.test_describe_security_groups_no_default_vpc - -Configuring OpenStack for EC2 API metadata service refering to section "EC2 metadata Configuration". - -EC2 metadata Configuration -========================== - -To configure OpenStack for EC2 API metadata service: - -for Nova-network - add:: - - [DEFAULT] - metadata_port = 8789 - [neutron] - service_metadata_proxy = True - - to /etc/nova.conf - - then restart nova-metadata (can be run as part of nova-api service) and - nova-network services. - -for Neutron - add:: - - [DEFAULT] - nova_metadata_port = 8789 - - to /etc/neutron/metadata_agent.ini for legacy neutron or - to neutron_ovn_metadata_agent.ini for OVN - - then restart neutron-metadata service. - -S3 server is intended only to support EC2 operations which require S3 server -(e.g. CreateImage) in OpenStack deployments without regular object storage. -It must not be used as a substitution for all-purposes object storage server. -Do not start it if the deployment has its own object storage or uses a public -one (e.g. AWS S3). - -Usage ------ - -Download aws cli from Amazon. -Create configuration file for aws cli in your home directory ~/.aws/config: - -:: - - [default] - aws_access_key_id = 1b013f18d5ed47ae8ed0fbb8debc036b - aws_secret_access_key = 9bbc6f270ffd4dfdbe0e896947f41df3 - region = us-east-1 - -Change the aws_access_key_id and aws_secret_acces_key above to the values -appropriate for your cloud (can be obtained by "openstack ec2 credentials list" -command). - -Run aws cli commands using new EC2 API endpoint URL (can be obtained from -openstack cli with the new port 8788) like this: - -aws --endpoint-url http://10.0.2.15:8788 ec2 describe-instances - - -Supported Features and Limitations ----------------------------------- - -General: - * DryRun option is not supported. - * Some exceptions are not exactly the same as reported by AWS. - -+----------+------------------------------------------+-----------------+----------------------------------------+ -| AWS | Command | Functionality | Limitations | -| Component| | group | | -+==========+==========================================+=================+========================================+ -| | **bold** - supported, normal - supported | | | -| | with limitations, *italic* -not supported| | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *AcceptVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **AllocateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AllocateHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssignIpv6Addresses* | network | not supported | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | AssignPrivateIpAddresses | network | allowReassignment parameter | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **AssociateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AssociateDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AssociateRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssociateSubnetCidrBlock* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssociateVpcCidrBlock* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *AttachClassicLinkVpc* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachInternetGateway** | internet | | -| | | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, EBS | **AttachVolume** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | AuthorizeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | AuthorizeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *BundleInstance* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelBundleTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelConversionTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelExportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelImportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelReservedInstancesListing* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelSpotFleetRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelSpotInstanceRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ConfirmProductInstance* | product codes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *CopyImage* | image | not supported | -| | | provisioning | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *CopySnapshot* | snapshots,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateCustomerGateway | VPC gateways | BGPdynamicrouting | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateEgressOnlyInternetGateway* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | CreateImage | images | blockDeviceMapping parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateInstanceExportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateInternetGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **CreateKeyPair** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateNatGateway* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateNetworkAcl* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreatePlacementGroup* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateReservedInstancesListing* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateRoute | routes | vpcPeeringConnection parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **CreateSecurityGroup** | security groups | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **CreateSnapshot** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateSubnet | subnets | availabilityZone parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **CreateTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | CreateVolume | volumes | iops, encrypted, kmsKeyId parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateVpc** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateVpcEndpoint* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateVpnConnection | VPN | BGP dynamic routing | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateVpnConnectionRoute** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateVpnGateway | VPN | BGP dynamic routing | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteCustomerGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteEgressOnlyInternetGateway* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteInternetGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DeleteKeyPair** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteNatGateway* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteNetworkAcl* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | *DeletePlacementGroup* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteRoute** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DeleteSecurityGroup** | security groups | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeleteSnapshot** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteSubnet** | subnets | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DeleteTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeleteVolume** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpc** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteVpcEndpoints* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnConnection** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnConnectionRoute** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeregisterImage** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeAccountAttributes | infrastructural | vpc-max-security-groups-per-interface, | -| | | | max-elastic-ips, | -| | | | vpc-max-elastic-ips attributes | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DescribeAddresses** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeAvailabilityZones** | availability | | -| | | zones | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeBundleTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeClassicLinkInstances* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeConversionTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeCustomerGateways** | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeEgressOnlyInternetGateways* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeExportTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeIdentityIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | DescribeImageAttribute | images | productCodes, sriovNetSupport | -| | | | attributes | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeImages** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeImportImageTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeImportSnapshotTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeInstanceAttribute | instances | same limitations as for | -| | | | ModifyInstanceAttribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, | **DescribeInstances** | instances | | -| EBS, VPC | | | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeInstanceStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeInternetGateways** | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeKeyPairs** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeMovingAddresses* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeNatGateways* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeNetworkAcls* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeNetworkInterfaces** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | *DescribePlacementGroups* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribePrefixLists* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeRegions | availability | RegionNameparameter | -| | | zones | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesListings* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesModifications* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesOfferings* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeRouteTables** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeScheduledInstanceAvailability* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSecurityGroupReferences* | security groups | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | DescribeSecurityGroups | security groups | cidr, protocol, port, sourceGroup | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *DescribeSnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeSnapshots** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetRequestHistory* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotInstanceRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotPriceHistory* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeStaleSecurityGroups* | security groups | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeSubnets** | subnets | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *DescribeVolumeAttribute* | volumes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeVolumes** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeVolumeStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcAttribute* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcEndpoints* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcEndpointServices* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcPeeringConnections* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpcs** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpnConnections** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpnGateways** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DetachClassicLinkVpc* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachInternetGateway** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, EBS | DetachVolume | volumes | instance_id, device, force parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DisableVgwRoutePropagation** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DisableVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DisableVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DisassociateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DisassociateRouteTable** | routes | | -| | *DisassociateSubnetCidrBlock* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DisassociateVpcCidrBlock* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **EnableVgwRoutePropagation** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *EnableVolumeIO* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *EnableVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *EnableVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **GetConsoleOutput** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *GetConsoleScreenshot* | instances | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **GetPasswordData** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportImage* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportInstance* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **ImportKeyPair** | keypairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportSnapshot* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportVolume* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyIdentityIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | ModifyImageAttribute | images | productCodes attribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | ModifyInstanceAttribute | instances | only disableApiTermination, | -| | | | sourceDestCheck,instanceType supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyInstancePlacement* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ModifyNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyReservedInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ModifySnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifySpotFleetRequest* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifySubnetAttribute* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ModifyVolumeAttribute* | volumes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifyVpcAttribute* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifyVpcEndpoint* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyVpcPeeringConnectionOptions* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *MonitorInstances* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *MoveAddressToVpc* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *PurchaseReservedInstancesOffering* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *PurchaseScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **RebootInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | RegisterImage | images | virtualizationType, sriovNetSupport | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *RejectVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **ReleaseAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ReleaseHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ReplaceNetworkAclAssociation* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ReplaceNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ReplaceRoute** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ReplaceRouteTableAssociation** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ReportInstanceStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RequestSpotFleet* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RequestSpotInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **ResetImageAttribute** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | ResetInstanceAttribute | instances | same limitations as for | -| | | | ModifyInstanceAttribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ResetNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ResetSnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *RestoreAddressToClassic* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | RevokeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | RevokeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, | RunInstances | instances | placement, block_device_mapping partial| -| VPC, EBS | | | support, monitoring, | -| | | | iamInstanceProfile, ebsOptimized, | -| | | | shutdownInitiatedInstanceBehavior | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RunScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **StartInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **StopInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **TerminateInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *UnassignIpv6Addresses* | network | not supported | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **UnassignPrivateIpAddresses** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *UnmonitorInstances* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ - - -References ----------- - -Documentation: -https://docs.openstack.org/ec2-api/latest/ - -Wiki: -https://wiki.openstack.org/wiki/EC2API - -Bugs: -https://launchpad.net/ec2-api - -Source: -https://opendev.org/openstack/ec2-api - -Blueprint: -https://blueprints.launchpad.net/nova/+spec/ec2-api - -Spec: -https://review.opendev.org/#/c/147882/ +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 6a0d93bc..00000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# nova documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - - -extensions = [ - 'os_api_ref', - 'openstackdocstheme' -] - - -html_theme = 'openstackdocs' -html_theme_options = { - "sidebar_mode": "toc", -} -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/ec2-api' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'ec2-api' -openstackdocs_bug_tag = '' - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'EC2 API Reference' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# - -# from ec2-api.version import version_info -# The full version, including alpha/beta/rc tags. -# release = version_info.release_string() -# The short X.Y version. -# version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ec2apidoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Ec2api.tex', u'OpenStack EC2 API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 5c5e6edf..00000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -:tocdepth: 2 - -======== -EC2 API -======== - -Provides a standalone EC2 API service. - -Amazon EC2 API Reference can be found `here -`_. - -.. include:: supported_features.inc - diff --git a/api-ref/source/supported_features.inc b/api-ref/source/supported_features.inc deleted file mode 100644 index 7147eb54..00000000 --- a/api-ref/source/supported_features.inc +++ /dev/null @@ -1,503 +0,0 @@ -.. -*- rst -*- - -Supported features and limitations ----------------------------------- - -General: - * DryRun option is not supported. - * Some exceptions are not exactly the same as reported by AWS. - -+----------+------------------------------------------+-----------------+----------------------------------------+ -| AWS | Command | Functionality | Limitations | -| Component| | group | | -+==========+==========================================+=================+========================================+ -| | **bold** - supported, normal - supported | | | -| | with limitations, *italic* -not supported| | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *AcceptVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **AllocateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AllocateHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssignIpv6Addresses* | network | not supported | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | AssignPrivateIpAddresses | network | allowReassignment parameter | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **AssociateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AssociateDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AssociateRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssociateSubnetCidrBlock* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *AssociateVpcCidrBlock* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *AttachClassicLinkVpc* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachInternetGateway** | internet | | -| | | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, EBS | **AttachVolume** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **AttachVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | AuthorizeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | AuthorizeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *BundleInstance* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelBundleTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelConversionTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelExportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelImportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelReservedInstancesListing* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelSpotFleetRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CancelSpotInstanceRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ConfirmProductInstance* | product codes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *CopyImage* | image | not supported | -| | | provisioning | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *CopySnapshot* | snapshots,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateCustomerGateway | VPC gateways | BGP dynamic routing | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateEgressOnlyInternetGateway* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | CreateImage | images | blockDeviceMapping parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateInstanceExportTask* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateInternetGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **CreateKeyPair** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateNatGateway* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateNetworkAcl* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreatePlacementGroup* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateReservedInstancesListing* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateRoute | routes | vpcPeeringConnection parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **CreateSecurityGroup** | security groups | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **CreateSnapshot** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *CreateSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateSubnet | subnets | availabilityZone parameter | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **CreateTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | CreateVolume | volumes | iops, encrypted, kmsKeyId parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateVpc** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateVpcEndpoint* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *CreateVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateVpnConnection | VPN | BGP dynamic routing | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **CreateVpnConnectionRoute** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | CreateVpnGateway | VPN | BGP dynamic routing | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteCustomerGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteEgressOnlyInternetGateway* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteInternetGateway** | VPC gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DeleteKeyPair** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteNatGateway* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteNetworkAcl* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | *DeletePlacementGroup* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteRoute** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteRouteTable** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DeleteSecurityGroup** | security groups | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeleteSnapshot** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DeleteSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteSubnet** | subnets | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DeleteTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeleteVolume** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpc** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteVpcEndpoints* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DeleteVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnConnection** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnConnectionRoute** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DeleteVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DeregisterImage** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeAccountAttributes | infrastructural | vpc-max-security-groups-per-interface, | -| | | | max-elastic-ips, | -| | | | vpc-max-elastic-ips attributes | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DescribeAddresses** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeAvailabilityZones** | availability | | -| | | zones | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeBundleTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeClassicLinkInstances* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeConversionTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeCustomerGateways** | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeDhcpOptions** | DHCP options | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeEgressOnlyInternetGateways* | VPC gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeExportTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeFlowLogs* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeIdentityIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | DescribeImageAttribute | images | productCodes, sriovNetSupport | -| | | | attributes | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeImages** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeImportImageTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeImportSnapshotTasks* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeInstanceAttribute | instances | same limitations as for | -| | | | ModifyInstanceAttribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, | **DescribeInstances** | instances | | -| EBS, VPC | | | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeInstanceStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeInternetGateways** | gateways | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeKeyPairs** | key pairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeMovingAddresses* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeNatGateways* | NAT gateways | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeNetworkAcls* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeNetworkInterfaces** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | *DescribePlacementGroups* | clusters | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribePrefixLists* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | DescribeRegions | availability | RegionNameparameter | -| | | zones | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesListings* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesModifications* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeReservedInstancesOfferings* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeRouteTables** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeScheduledInstanceAvailability* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSecurityGroupReferences* | security groups | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | DescribeSecurityGroups | security groups | cidr, protocol, port, sourceGroup | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *DescribeSnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeSnapshots** | snapshots | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotDatafeedSubscription* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetRequestHistory* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotFleetRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotInstanceRequests* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeSpotPriceHistory* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeStaleSecurityGroups* | security groups | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeSubnets** | subnets | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **DescribeTags** | tags | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *DescribeVolumeAttribute* | volumes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **DescribeVolumes** | volumes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeVolumeStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcAttribute* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DescribeVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcEndpoints* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcEndpointServices* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DescribeVpcPeeringConnections* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpcs** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpnConnections** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DescribeVpnGateways** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DetachClassicLinkVpc* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachInternetGateway** | VPC | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachNetworkInterface** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, EBS | DetachVolume | volumes | instance_id, device, force parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DetachVpnGateway** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DisableVgwRoutePropagation** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *DisableVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DisableVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **DisassociateAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **DisassociateRouteTable** | routes | | -| | *DisassociateSubnetCidrBlock* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *DisassociateVpcCidrBlock* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **EnableVgwRoutePropagation** | VPN | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *EnableVolumeIO* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *EnableVpcClassicLink* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *EnableVpcClassicLinkDnsSupport* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **GetConsoleOutput** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *GetConsoleScreenshot* | instances | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **GetPasswordData** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportImage* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportInstance* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **ImportKeyPair** | keypairs | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportSnapshot* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ImportVolume* | tasks,s3 | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyIdentityIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyIdFormat* | resource IDs | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | ModifyImageAttribute | images | productCodes attribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | ModifyInstanceAttribute | instances | only disableApiTermination, | -| | | | sourceDestCheck,instanceType supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyInstancePlacement* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ModifyNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyReservedInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ModifySnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifySpotFleetRequest* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifySubnetAttribute* | subnets | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ModifyVolumeAttribute* | volumes | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifyVpcAttribute* | VPC | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ModifyVpcEndpoint* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ModifyVpcPeeringConnectionOptions* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *MonitorInstances* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *MoveAddressToVpc* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *PurchaseReservedInstancesOffering* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *PurchaseScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **RebootInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | RegisterImage | images | virtualizationType, sriovNetSupport | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *RejectVpcPeeringConnection* | cross-VPC | not supported | -| | | connectivity | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | **ReleaseAddress** | addresses | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ReleaseHosts* | dedicated hosts | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ReplaceNetworkAclAssociation* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *ReplaceNetworkAclEntry* | ACL | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ReplaceRoute** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ReplaceRouteTableAssociation** | routes | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *ReportInstanceStatus* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RequestSpotFleet* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RequestSpotInstances* | market | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | **ResetImageAttribute** | images | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | ResetInstanceAttribute | instances | same limitations as for | -| | | | ModifyInstanceAttribute | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **ResetNetworkInterfaceAttribute** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EBS | *ResetSnapshotAttribute* | snapshots | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | *RestoreAddressToClassic* | infrastructural | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | RevokeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, VPC | RevokeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,| -| | | | sourceGroup, ports parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2, | RunInstances | instances | placement, block_device_mapping partial| -| VPC, EBS | | | support, monitoring, | -| | | | iamInstanceProfile, ebsOptimized, | -| | | | shutdownInitiatedInstanceBehavior | -| | | | parameters | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *RunScheduledInstances* | scheduled | not supported | -| | | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **StartInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **StopInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| EC2 | **TerminateInstances** | instances | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *UnassignIpv6Addresses* | network | not supported | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| VPC | **UnassignPrivateIpAddresses** | network | | -| | | interfaces | | -+----------+------------------------------------------+-----------------+----------------------------------------+ -| | *UnmonitorInstances* | monitoring | not supported | -+----------+------------------------------------------+-----------------+----------------------------------------+ diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 6c805167..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,16 +0,0 @@ -====================== - Enabling in Devstack -====================== - -1. Download DevStack - - git clone https://opendev.org/openstack/devstack - cd devstack - -2. Add this repo as an external repository:: - - > cat local.conf - [[local|localrc]] - enable_plugin ec2-api https://opendev.org/openstack/ec2-api - -3. run ``stack.sh`` diff --git a/devstack/create_config b/devstack/create_config deleted file mode 100755 index 25462cb3..00000000 --- a/devstack/create_config +++ /dev/null @@ -1,306 +0,0 @@ -#!/bin/bash -# -# create_config script for devstack plugin script -# Build config for run functional tests with or wuthout tempest - -set -o xtrace -set +o errexit - -TEST_CONFIG="$1" -if [[ -z "$TEST_CONFIG" ]]; then - die $LINENO "Please pass config name" -fi -sudo rm -f $EC2API_DIR/$TEST_CONFIG - -REGULAR_IMAGE_URL="https://cloud-images.ubuntu.com/precise/current/precise-server-cloudimg-i386-disk1.img" -REGULAR_IMAGE_FNAME="precise-server-cloudimg-i386-disk1.img" -REGULAR_IMAGE_NAME="precise" - -CIRROS_IMAGE_URL="http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" -CIRROS_IMAGE_FNAME="cirros-0.3.4-x86_64-disk.img" -CIRROS_IMAGE_NAME="cirros" - -MAX_FAIL=20 -FLAVOR_NAME="m1.ec2api" -FLAVOR_NAME_ALT="m1.ec2api-alt" - -if [[ -n "$TOP_DIR" ]]; then - source $TOP_DIR/openrc admin admin - unset OS_CLOUD - #unset OS_AUTH_TYPE -fi - -openstack endpoint list -if [[ "$?" -ne "0" ]]; then - die $LINENO "OpenStack CLI doesn't work. Looks like credentials are absent." -fi - -EC2_URL=`openstack endpoint list --service ec2 --interface public --os-identity-api-version=3 -c URL -f value` -S3_URL=`openstack endpoint list --service s3 --interface public --os-identity-api-version=3 -c URL -f value` - -venv_dir="$(pwd)/.venv_awscli" -virtualenv "$venv_dir" -if [[ "$?" -ne "0" ]]; then - die $LINENO "Can't setup virtual env." -fi -source "$venv_dir/bin/activate" -pip install awscli -if [[ "$?" -ne "0" ]]; then - die $LINENO "Can't install awscli in virtual env." -fi -aws --version -if [[ "$?" -ne "0" ]]; then - die $LINENO "awscli doesn't work correctly." -fi -deactivate - -project_id=`openstack project show $OS_PROJECT_NAME -c id -f value` -openstack ec2 credentials create 1>&2 -line=`openstack ec2 credentials list | grep " $project_id "` -read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'` -source "$venv_dir/bin/activate" -aws configure set aws_access_key_id $ec2_access_key --profile admin -aws configure set aws_secret_access_key $ec2_secret_key --profile admin -deactivate -AWS_PARAMS="--region $REGION_NAME --endpoint-url $EC2_URL" - -neutron_item=$(openstack service list | grep neutron) - -# prepare flavors -openstack flavor create --public --id 16 --ram 512 --disk 1 --vcpus 1 $FLAVOR_NAME -openstack flavor create --public --id 17 --ram 256 --disk 1 --vcpus 1 $FLAVOR_NAME_ALT - -# prepare cirros image for register_image test. uploading it to S3. -sudo rm /tmp/$CIRROS_IMAGE_FNAME -wget -nv -P /tmp $CIRROS_IMAGE_URL & -cirros_image_wget_pid=$! - -# find simple image -source "$venv_dir/bin/activate" -image_id=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=cirros* --query 'Images[0].ImageId' --output text` -deactivate - -if [[ "$image_id" == 'None' || -z "$image_id" ]]; then - wait $cirros_image_wget_pid - if [[ "$?" -eq "0" ]]; then - openstack image create --disk-format raw --container-format bare --public --file /tmp/$CIRROS_IMAGE_FNAME $CIRROS_IMAGE_NAME - if [[ "$?" -ne "0" ]]; then - echo "Creation of openstack image failed." - fi - source "$venv_dir/bin/activate" - image_id=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=cirros* --query 'Images[0].ImageId' --output text` - deactivate - fi -fi - -# prepare ubuntu image -if [[ $RUN_LONG_TESTS == "1" ]]; then - sudo rm /tmp/$REGULAR_IMAGE_FNAME - wget -nv -P /tmp $REGULAR_IMAGE_URL - if [[ "$?" -ne "0" ]]; then - echo "Downloading of precise image failed." - exit 1 - fi - openstack image create --disk-format raw --container-format bare --public --file /tmp/$REGULAR_IMAGE_FNAME $REGULAR_IMAGE_NAME - if [[ "$?" -ne "0" ]]; then - echo "Creation of precise image failed." - exit 1 - fi - # find this image - source "$venv_dir/bin/activate" - image_id_ubuntu=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=$REGULAR_IMAGE_NAME --query 'Images[0].ImageId' --output text` - deactivate -fi - -# create separate user/project -project_name="project-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" -eval $(openstack project create -f shell -c id $project_name) -project_id=$id -[[ -n "$project_id" ]] || { echo "Can't create project"; exit 1; } -user_name="user-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" -eval $(openstack user create "$user_name" --project "$project_id" --password "password" --email "$user_name@example.com" -f shell -c id) -user_id=$id -[[ -n "$user_id" ]] || { echo "Can't create user"; exit 1; } -# add 'Member' role for swift access -role_id=$(openstack role show Member -c id -f value) -openstack role add --project $project_id --user $user_id $role_id -# create network -if [[ -n "$neutron_item" ]]; then - net_id=$(openstack network create --project $project_id private | grep ' id ' | awk '{print $4}') - [[ -n "$net_id" ]] || { echo "net-create failed"; exit 1; } - subnet_id=$(openstack subnet create --project $project_id --ip-version 4 --gateway 10.0.0.1 --network $net_id --subnet-range 10.0.0.0/24 private_subnet | grep ' id ' | awk '{print $4}') - [[ -n "$subnet_id" ]] || { echo "subnet-create failed"; exit 1; } - router_id=$(openstack router create --project $project_id private_router | grep ' id ' | awk '{print $4}') - [[ -n "$router_id" ]] || { echo "router-create failed"; exit 1; } - sleep 2 - openstack router add subnet $router_id $subnet_id - [[ "$?" -eq 0 ]] || { echo "router-interface-add failed"; exit 1; } - public_net_id=$(openstack network list | awk '/public/{print $2}') - [[ -n "$public_net_id" ]] || { echo "can't find public network"; exit 1; } - openstack router set --external-gateway $public_net_id $router_id - [[ "$?" -eq 0 ]] || { echo "router-gateway-set failed"; exit 1; } -fi -# populate credentials -openstack ec2 credentials create --user $user_id --project $project_id 1>&2 -line=`openstack ec2 credentials list --user $user_id | grep " $project_id "` -read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'` -source "$venv_dir/bin/activate" -aws configure set aws_access_key_id $ec2_access_key --profile user -aws configure set aws_secret_access_key $ec2_secret_key --profile user -deactivate - -env|sort -auth="--os-project-name $project_name --os-username $user_name --os-password password" - -# create EBS image -volume_status() { openstack $auth volume show $1 | awk '/ status / {print $4}'; } -instance_status() { openstack $auth server show $1 | awk '/ status / {print $4}'; } - -openstack_image_id=$(openstack $auth image list --long | grep "cirros" | grep " bare " | head -1 | awk '{print $2}') -if [[ -n "$openstack_image_id" ]]; then - volume_name="vol-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" - volume_id=$(openstack $auth volume create --image $openstack_image_id --size 1 $volume_name | awk '/ id / {print $4}') - [[ -n "$volume_id" ]] || { echo "can't create volume for EBS image creation"; exit 1; } - fail=0 - while [[ true ]] ; do - if ((fail >= MAX_FAIL)); then - die $LINENO "Volume creation fails (timeout)" - fi - echo "attempt "$fail" of "$MAX_FAIL - status=$(volume_status $volume_id) - if [[ $status == "available" ]]; then - break - fi - if [[ $status == "error" || -z "$status" ]]; then - openstack $auth volume show $volume_id - die $LINENO 'Volume creation error' - fi - sleep 10 - ((++fail)) - done - - instance_name="i-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" - instance_id=$(nova $auth boot \ - --flavor "$FLAVOR_NAME" \ - --nic net-id=$net_id \ - --block-device "device=/dev/vda,id=$volume_id,shutdown=remove,source=volume,dest=volume,bootindex=0" \ - "$instance_name" | awk '/ id / {print $4}') -# TODO: find a way how to run with delete-on-terminate or set it after run with openstack client -# instance_id=$(openstack $auth server create \ -# --flavor "$FLAVOR_NAME" \ -# --volume $volume_id \ -# --nic net-id=$net_id \ -# "$instance_name" | awk '/ id / {print $4}') - [[ -n "$instance_id" ]] || { echo "can't boot EBS instance"; exit 1; } - fail=0 - while [[ true ]] ; do - if ((fail >= MAX_FAIL)); then - die $LINENO "Instance active status wait timeout occurred" - fi - echo "attempt "$fail" of "$MAX_FAIL - status=$(instance_status $instance_id) - if [[ "$status" == "ACTIVE" ]]; then - break - fi - if [[ "$status" == "ERROR" || -z "$status" ]]; then - openstack $auth server show $instance_id - die $LINENO 'Instance booting error' - fi - sleep 10 - ((++fail)) - done - - image_name="image-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" - openstack $auth server image create --name $image_name --wait $instance_name - if [[ "$?" -ne "0" ]]; then - die $LINENO "Image creation from instance fails" - fi - source "$venv_dir/bin/activate" - ebs_image_id=`aws $AWS_PARAMS --profile user ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=$image_name --query 'Images[0].ImageId' --output text` - deactivate - openstack $auth server delete $instance_id -fi - -timeout="600" -run_long_tests="False" -if [[ $RUN_LONG_TESTS == "1" ]]; then - run_long_tests="True" -fi - -# right now nova-network is very unstable to run tests that want to ssh into instance -run_ssh="False" -if [[ -n "$neutron_item" ]]; then - run_ssh="True" -fi - -wait $cirros_image_wget_pid -if [[ "$?" -eq "0" && "$CA_CERT" && -e "$CA_CERT" ]]; then - sudo apt-get -fy install ruby - ID="$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)" - WORKING_DIR="/tmp/bi-$ID" - mkdir -p $WORKING_DIR - wget -t 2 -T 60 -q -P $WORKING_DIR http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.zip - unzip -d $WORKING_DIR $WORKING_DIR/ec2-ami-tools.zip - TOOLS_DIR="$WORKING_DIR/$(ls $WORKING_DIR | grep -Eo "ec2-ami-tools-[0-9\.]*")" - - IMAGES_DIR="$WORKING_DIR/images" - # IMPORTANT! bucket name should contain '.' - in this case ami-tools will not build s3 url with bucket name. - AWS_AMI_BUCKET="tmp-bundle.$ID" - - EC2_USER_ID=42424242424242 # ec2api does not use user id, but bundling requires it - EC2_PRIVATE_KEY="$WORKING_DIR/private/pk.pem" - EC2_CSR="$WORKING_DIR/cert.csr" - EC2_CERT="$WORKING_DIR/cert.pem" - - mkdir -p "$WORKING_DIR/private/" - - # generate user certificate - openssl genrsa -out "$EC2_PRIVATE_KEY" 2048 - openssl req -new -key "$EC2_PRIVATE_KEY" -subj "/C=RU/ST=Moscow/L=Moscow/O=Progmatic/CN=functional-tests" -out "$EC2_CSR" - openssl x509 -req -in "$EC2_CSR" -CA "$CA_CERT" -CAkey "$CA_KEY" -CAcreateserial -out "$EC2_CERT" -days 365 - - mkdir -p "$IMAGES_DIR" - $TOOLS_DIR/bin/ec2-bundle-image --cert $EC2_CERT --privatekey $EC2_PRIVATE_KEY --ec2cert $CA_CERT --image /tmp/$CIRROS_IMAGE_FNAME --prefix $CIRROS_IMAGE_FNAME --user $EC2_USER_ID --destination "$IMAGES_DIR" --arch x86_64 - if [[ "$?" -eq "0" ]]; then - $TOOLS_DIR/bin/ec2-upload-bundle --url "$S3_URL" --access-key $ec2_access_key --secret-key $ec2_secret_key --bucket "$AWS_AMI_BUCKET" --manifest "$IMAGES_DIR/$CIRROS_IMAGE_FNAME.manifest.xml" --acl "public-read" --sigv 2 - if [[ "$?" -eq "0" ]]; then - cirros_image_manifest="$AWS_AMI_BUCKET/$CIRROS_IMAGE_FNAME.manifest.xml" - else - warn $LINENO "Uploading of image $CIRROS_IMAGE_URL to S3 failed." - fi - else - warn $LINENO "Bundling of image $CIRROS_IMAGE_URL failed." - fi - # next line is example how to register this image in the cloud - #source "$venv_dir/bin/activate" - #aws --endpoint-url $EC2_URL --region RegionOne --profile admin ec2 register-image --image-location "$AWS_AMI_BUCKET/$CIRROS_IMAGE_FNAME.manifest.xml" --name "$CIRROS_IMAGE_FNAME" --architecture x86_64 - #deactivate -else - warn $LINENO "Downloading of image $CIRROS_IMAGE_URL failed." -fi - -vpnaas_enabled='False' -if openstack extension list | grep -q " vpnaas " ; then - vpnaas_enabled='True' -fi - -sudo bash -c "cat > $EC2API_DIR/$TEST_CONFIG <> $TEMPEST_CONFIG - fi -} - -# main dispatcher -if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing ec2-api" - install_ec2api -elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring ec2-api" - configure_ec2api - create_ec2api_accounts -elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing ec2-api" - init_ec2api - start_ec2api -elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then - configure_functional_tests -fi - -if [[ "$1" == "unstack" ]]; then - stop_ec2api - cleanup_ec2api -fi - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 8130495a..00000000 --- a/devstack/settings +++ /dev/null @@ -1,9 +0,0 @@ -# Devstack settings - -# we have to add ec2-api to enabled services for screen_it to work -enable_service ec2-api -enable_service ec2-api-metadata -enable_service ec2-api-s3 - -# Enable VPNAAS service and set type of ipsec package -IPSEC_PACKAGE=strongswan diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 5a6776d6..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 -os-api-ref>=1.5.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 85ed2b66..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,82 +0,0 @@ - -import os -import sys - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) - -sys.path.insert(0, ROOT) -sys.path.insert(0, BASE_DIR) - -# This is required for ReadTheDocs.org, but isn't a bad idea anyway. -os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.doctest', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', - 'openstackdocstheme'] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/ec2-api' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'ec2-api' -openstackdocs_bug_tag = '' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'EC2API Service' -copyright = '2015, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {"sidebar_mode": "toc"} - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] diff --git a/doc/source/configuration/_flagmappings/ec2api.flagmappings b/doc/source/configuration/_flagmappings/ec2api.flagmappings deleted file mode 100644 index 100a3dd4..00000000 --- a/doc/source/configuration/_flagmappings/ec2api.flagmappings +++ /dev/null @@ -1,143 +0,0 @@ -admin_password disable -admin_tenant_name disable -admin_user disable -api_paste_config common -api_rate_limit disable -bindir disable -buckets_path disable -cert_topic disable -cinder_service_type clients -debug disable -default_flavor ec2 -default_log_levels disable -disable_ec2_classic ec2 -ec2_host ec2 -ec2_path ec2 -ec2_port ec2 -ec2_private_dns_show_ip ec2 -ec2_scheme ec2 -ec2_timestamp_expiry clients -ec2api_listen service -ec2api_listen_port service -ec2api_use_ssl service -ec2api_workers service -external_network ec2 -fatal_deprecations disable -fatal_exception_format_errors disable -image_decryption_dir s3 -instance_format disable -instance_uuid_format disable -internal_service_availability_zone ec2 -keystone_ec2_tokens_url clients -keystone_url disable -log_config_append disable -log_date_format disable -log_dir disable -log_file disable -logging_context_format_string disable -logging_debug_format_suffix disable -logging_default_format_string disable -logging_exception_prefix disable -logging_user_identity_format disable -max_header_line common -metadata_listen metadata -metadata_listen_port metadata -metadata_use_ssl metadata -metadata_workers metadata -my_ip ec2 -network_device_mtu ec2 -nova_service_type clients -publish_errors disable -pybasedir disable -rate_limit_burst disable -rate_limit_except_level disable -rate_limit_interval disable -region_list ec2 -s3_listen disable -s3_listen_port disable -s3_region s3 -s3_url s3 -service_down_time disable -ssl_ca_file disable -ssl_cert_file service -ssl_insecure disable -ssl_key_file service -state_path disable -syslog_log_facility disable -tcp_keepidle common -tempdir disable -use_forwarded_for service -use_journal disable -use_stderr disable -use_syslog disable -watch_log_file disable -wsgi_default_pool_size common -wsgi_log_format common -x509_root_private_key s3 -cache/backend disable -cache/backend_argument disable -cache/config_prefix disable -cache/debug_cache_backend disable -cache/enabled disable -cache/expiration_time disable -cache/memcache_dead_retry disable -cache/memcache_pool_connection_get_timeout disable -cache/memcache_pool_maxsize disable -cache/memcache_pool_unused_timeout disable -cache/memcache_servers disable -cache/memcache_socket_timeout disable -cache/proxies disable -database/backend disable -database/connection disable -database/connection_debug disable -database/connection_recycle_time disable -database/connection_trace disable -database/db_inc_retry_interval disable -database/db_max_retries disable -database/db_max_retry_interval disable -database/db_retry_interval disable -database/max_overflow disable -database/max_pool_size disable -database/max_retries disable -database/min_pool_size disable -database/mysql_enable_ndb disable -database/mysql_sql_mode disable -database/pool_timeout disable -database/retry_interval disable -database/slave_connection disable -database/sqlite_synchronous disable -database/use_db_reconnect disable -database/use_tpool database -keystone_authtoken/admin_password disable -keystone_authtoken/admin_tenant_name disable -keystone_authtoken/admin_token disable -keystone_authtoken/admin_user disable -keystone_authtoken/auth_admin_prefix disable -keystone_authtoken/auth_host disable -keystone_authtoken/auth_port disable -keystone_authtoken/auth_protocol disable -keystone_authtoken/auth_section disable -keystone_authtoken/auth_type disable -keystone_authtoken/www_authenticate_uri disable -keystone_authtoken/auth_version disable -keystone_authtoken/cache disable -keystone_authtoken/cafile disable -keystone_authtoken/certfile disable -keystone_authtoken/delay_auth_decision disable -keystone_authtoken/http_connect_timeout disable -keystone_authtoken/http_request_max_retries disable -keystone_authtoken/identity_uri disable -keystone_authtoken/insecure disable -keystone_authtoken/keyfile disable -keystone_authtoken/timeout disable -metadata/auth_ca_cert metadata -metadata/cache_expiration metadata -metadata/metadata_proxy_shared_secret metadata -metadata/nova_client_cert metadata -metadata/nova_client_priv_key metadata -metadata/nova_metadata_insecure metadata -metadata/nova_metadata_ip metadata -metadata/nova_metadata_port metadata -metadata/nova_metadata_protocol metadata -oslo_concurrency/disable_process_locking disable -oslo_concurrency/lock_path disable diff --git a/doc/source/configuration/_flagmappings/ec2api.headers b/doc/source/configuration/_flagmappings/ec2api.headers deleted file mode 100644 index a492e50b..00000000 --- a/doc/source/configuration/_flagmappings/ec2api.headers +++ /dev/null @@ -1,7 +0,0 @@ -clients OpenStack Clients -common Common Service -database additional Database Client -ec2 EC2API -metadata Metadata -service EC2API Service -s3 S3 Client diff --git a/doc/source/configuration/api.rst b/doc/source/configuration/api.rst deleted file mode 100644 index 9cedb719..00000000 --- a/doc/source/configuration/api.rst +++ /dev/null @@ -1,18 +0,0 @@ -=============================== -EC2API configuration -=============================== - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -The following options allow configuration that EC2API supports. - - -.. include:: ./tables/ec2api-clients.inc -.. include:: ./tables/ec2api-database.inc -.. include:: ./tables/ec2api-service.inc -.. include:: ./tables/ec2api-ec2.inc -.. include:: ./tables/ec2api-s3.inc -.. include:: ./tables/ec2api-common.inc - - diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index db48149d..00000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _configuring: - -=================== -Configuring EC2-API -=================== - -To configure your EC2API installation, you must define configuration options in these files: - -* ``ec2api.conf`` contains EC2API configuration options and resides in the ``/etc/ec2api`` directory. - -* ``api-paste.ini`` defines EC2API limits and resides in the ``/etc/ec2api`` directory. - -A list of config options based on different topics can be found below: - -.. toctree:: - :maxdepth: 1 - - api.rst - metadata.rst diff --git a/doc/source/configuration/metadata.rst b/doc/source/configuration/metadata.rst deleted file mode 100644 index 93446c79..00000000 --- a/doc/source/configuration/metadata.rst +++ /dev/null @@ -1,16 +0,0 @@ -============================== -EC2API Metadata configuration -============================== - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -To configure Metadata Service for ec2api, EC2 API configuration_ common sections - (such as OpenStack Clients, Database Client, EC2API and Common sections) - must be configured as well. - -.. _configuration: ./api.html - -The additional configuration options for EC2 Metadata: - -.. include:: ./tables/ec2api-metadata.inc diff --git a/doc/source/configuration/tables/ec2api-clients.inc b/doc/source/configuration/tables/ec2api-clients.inc deleted file mode 100644 index e817cc1f..00000000 --- a/doc/source/configuration/tables/ec2api-clients.inc +++ /dev/null @@ -1,37 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-clients: - -.. list-table:: Description of OpenStack Clients configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``cinder_service_type`` = ``volumev3`` - - - (String) Service type of Volume API, registered in Keystone catalog. - - * - ``ec2_timestamp_expiry`` = ``300`` - - - (Integer) Time in seconds before ec2 timestamp expires - - * - ``keystone_ec2_tokens_url`` = ``http://localhost:5000/v3/ec2tokens`` - - - (String) URL to authenticate token from ec2 request. - - * - ``nova_service_type`` = ``compute`` - - - (String) Service type of Compute API, registered in Keystone catalog. Should be v2.1 with microversion support. If it is obsolete v2, a lot of useful EC2 compliant instance properties will be unavailable. diff --git a/doc/source/configuration/tables/ec2api-common.inc b/doc/source/configuration/tables/ec2api-common.inc deleted file mode 100644 index 5f7cebb8..00000000 --- a/doc/source/configuration/tables/ec2api-common.inc +++ /dev/null @@ -1,41 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-common: - -.. list-table:: Description of Common Service configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``api_paste_config`` = ``api-paste.ini`` - - - (String) File name for the paste.deploy config for ec2api - - * - ``max_header_line`` = ``16384`` - - - (Integer) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs). - - * - ``tcp_keepidle`` = ``600`` - - - (Integer) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. - - * - ``wsgi_default_pool_size`` = ``1000`` - - - (Integer) Size of the pool of greenthreads used by wsgi - - * - ``wsgi_log_format`` = ``%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f`` - - - (String) A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. diff --git a/doc/source/configuration/tables/ec2api-database.inc b/doc/source/configuration/tables/ec2api-database.inc deleted file mode 100644 index 7ec42b98..00000000 --- a/doc/source/configuration/tables/ec2api-database.inc +++ /dev/null @@ -1,25 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-database: - -.. list-table:: Description of additional Database Client configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[database]** - - - - * - ``use_tpool`` = ``False`` - - - (Boolean) Enable the experimental use of thread pooling for all DB API calls diff --git a/doc/source/configuration/tables/ec2api-ec2.inc b/doc/source/configuration/tables/ec2api-ec2.inc deleted file mode 100644 index 942bc7e1..00000000 --- a/doc/source/configuration/tables/ec2api-ec2.inc +++ /dev/null @@ -1,69 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-ec2: - -.. list-table:: Description of EC2API configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``default_flavor`` = ``m1.small`` - - - (String) A flavor to use as a default instance type - - * - ``disable_ec2_classic`` = ``None`` - - - (Boolean) True if server does not support EC2 Classic mode in favor of default VPC - - * - ``ec2_host`` = ``$my_ip`` - - - (String) The IP address of the EC2 API server - - * - ``ec2_path`` = ``/`` - - - (String) The path prefix used to call the ec2 API server - - * - ``ec2_port`` = ``8788`` - - - (Integer) The port of the EC2 API server - - * - ``ec2_private_dns_show_ip`` = ``False`` - - - (Boolean) Return the IP address as private dns hostname in describe instances - - * - ``ec2_scheme`` = ``http`` - - - (String) The protocol to use when connecting to the EC2 API server (http, https) - - * - ``external_network`` = ``None`` - - - (String) Name of the external network, which is used to connectVPCs to Internet and to allocate Elastic IPs. - - * - ``internal_service_availability_zone`` = ``internal`` - - - (String) The availability_zone to show internal services under - - * - ``my_ip`` = ``10.0.0.1`` - - - (String) IP address of this host - - * - ``network_device_mtu`` = ``1500`` - - - (Integer) MTU size to set by DHCP for instances. Corresponds with the network_device_mtu in ec2api.conf. - - * - ``region_list`` = - - - (List) List of region=fqdn pairs separated by commas diff --git a/doc/source/configuration/tables/ec2api-metadata.inc b/doc/source/configuration/tables/ec2api-metadata.inc deleted file mode 100644 index 58b04af0..00000000 --- a/doc/source/configuration/tables/ec2api-metadata.inc +++ /dev/null @@ -1,76 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-metadata: - -.. list-table:: Description of Metadata configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``metadata_listen`` = ``0.0.0.0`` - - - (String) The IP address on which the metadata API will listen. - - * - ``metadata_listen_port`` = ``8789`` - - - (Integer) The port on which the metadata API will listen. - - * - ``metadata_use_ssl`` = ``False`` - - - (Boolean) Enable ssl connections or not for EC2 API Metadata - - * - ``metadata_workers`` = ``None`` - - - (Integer) Number of workers for metadata service. The default will be the number of CPUs available. - - * - **[metadata]** - - - - * - ``auth_ca_cert`` = ``None`` - - - (String) Certificate Authority public key (CA cert) file for ssl - - * - ``cache_expiration`` = ``15`` - - - (Integer) This option is the time (in seconds) to cache metadata. Increasing this setting should improve response times of the metadata API when under heavy load. Higher values may increase memory usage, and result in longer times for host metadata changes to take effect. - - * - ``metadata_proxy_shared_secret`` = - - - (String) Shared secret to sign instance-id request - - * - ``nova_client_cert`` = - - - (String) Client certificate for nova metadata api server. - - * - ``nova_client_priv_key`` = - - - (String) Private key of client certificate. - - * - ``nova_metadata_insecure`` = ``False`` - - - (Boolean) Allow to perform insecure SSL (https) requests to nova metadata - - * - ``nova_metadata_ip`` = ``127.0.0.1`` - - - (String) IP address used by Nova metadata server. - - * - ``nova_metadata_port`` = ``8775`` - - - (Integer) TCP Port used by Nova metadata server. - - * - ``nova_metadata_protocol`` = ``http`` - - - (String) Protocol to access nova metadata, http or https diff --git a/doc/source/configuration/tables/ec2api-s3.inc b/doc/source/configuration/tables/ec2api-s3.inc deleted file mode 100644 index 4bc4a35e..00000000 --- a/doc/source/configuration/tables/ec2api-s3.inc +++ /dev/null @@ -1,37 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-s3: - -.. list-table:: Description of S3 Client configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``image_decryption_dir`` = ``/tmp`` - - - (String) Parent directory for tempdir used for image decryption - - * - ``s3_region`` = ``RegionOne`` - - - (String) Region of S3 server - - * - ``s3_url`` = ``http://$my_ip:3334`` - - - (String) URL to S3 server - - * - ``x509_root_private_key`` = ``None`` - - - (String) Path to ca private key file diff --git a/doc/source/configuration/tables/ec2api-service.inc b/doc/source/configuration/tables/ec2api-service.inc deleted file mode 100644 index d9a6dfd9..00000000 --- a/doc/source/configuration/tables/ec2api-service.inc +++ /dev/null @@ -1,49 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _ec2api-service: - -.. list-table:: Description of EC2API Service configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``ec2api_listen`` = ``0.0.0.0`` - - - (String) The IP address on which the EC2 API will listen. - - * - ``ec2api_listen_port`` = ``8788`` - - - (Integer) The port on which the EC2 API will listen. - - * - ``ec2api_use_ssl`` = ``False`` - - - (Boolean) Enable ssl connections or not for EC2 API - - * - ``ec2api_workers`` = ``None`` - - - (Integer) Number of workers for EC2 API service. The default will be equal to the number of CPUs available. - - * - ``ssl_cert_file`` = ``None`` - - - (String) SSL certificate of API server - - * - ``ssl_key_file`` = ``None`` - - - (String) SSL private key of API server - - * - ``use_forwarded_for`` = ``False`` - - - (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index 54e5680e..00000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,47 +0,0 @@ -============================ -So You Want to Contribute... -============================ -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the accounts -you need, the basics of interacting with our Gerrit review system, how we -communicate as a community, etc. -Below will cover the more project specific information you need to get started -with ec2-api. - -Communication -~~~~~~~~~~~~~ -* IRC channel #openstack-ec2api at OFTC -* Mailing list (prefix subjects with ``[ec2-api]`` for faster responses) - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss - -Contacting the Core Team -~~~~~~~~~~~~~~~~~~~~~~~~ -Please refer the `ec2-api Core Team -`_ contacts. - -New Feature Planning -~~~~~~~~~~~~~~~~~~~~ -ec2-api features are tracked on `Launchpad `_. - -Task Tracking -~~~~~~~~~~~~~ -We track our tasks in `Launchpad `_. -If you're looking for some smaller, easier work item to pick up and get started -on, search for the 'low-hanging-fruit' tag. - -Reporting a Bug -~~~~~~~~~~~~~~~ -You found an issue and want to make sure we are aware of it? You can do so on -`Launchpad `_. - -Getting Your Patch Merged -~~~~~~~~~~~~~~~~~~~~~~~~~ -All changes proposed to the ec2-api project require one +2 votes -from ec2-api core reviewers with approving patch by giving -``Workflow +1`` vote. - -Project Team Lead Duties -~~~~~~~~~~~~~~~~~~~~~~~~ -All common PTL duties are enumerated in the `PTL guide -`_. diff --git a/doc/source/hacking.rst b/doc/source/hacking.rst deleted file mode 100644 index a2bcf4fd..00000000 --- a/doc/source/hacking.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../HACKING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index e106f8f8..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,69 +0,0 @@ -OpenStack EC2 API -===================== - -Support of EC2 API for OpenStack. -This project provides a standalone EC2 API service which pursues two goals: - -1. Implement VPC API which is now absent in nova's EC2 API - -2. Create a standalone service for EC2 API support which accommodates - not only the VPC API but the rest of the EC2 API currently present in nova as - well. - -It doesn't replace existing nova EC2 API service in deployment, it gets -installed to a different port (8788 by default). - -The ec2-api service consists of the following components: - -``ec2-api`` service - Accepts and responds to end user EC2 and VPC API calls. - -``ec2-api-metadata`` service - Provides the OpenStack Metadata API to servers. The metadata is used to - configure the running servers. - - -Installing EC2API -================= - -.. toctree:: - :maxdepth: 1 - - install/index - -Configuring EC2API -================== - -.. toctree:: - :maxdepth: 1 - - configuration/index - -EC2API Reference -================== - -- `EC2-API Reference`_ - - .. _`EC2-API Reference`: https://docs.openstack.org/api-ref/ec2-api/ - - -.. toctree:: - :maxdepth: 1 - - hacking - -For Contributors -================ - -* If you are a new contributor to ec2-api please refer: :doc:`contributor/contributing` - - .. toctree:: - :hidden: - - contributor/contributing - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/doc/source/install/configuration.rst b/doc/source/install/configuration.rst deleted file mode 100644 index 2393deac..00000000 --- a/doc/source/install/configuration.rst +++ /dev/null @@ -1,66 +0,0 @@ -.. _configuration: - -To configure OpenStack for EC2 API service add to ``/etc/ec2api/ec2api.conf``: - -.. code-block:: ini - - [DEFAULT] - external_network = public - ec2_port = 8788 - ec2api_listen_port = 8788 - keystone_ec2_tokens_url = http://192.168.56.101/identity/v3/ec2tokens - api_paste_config = /etc/ec2api/api-paste.ini - disable_ec2_classic = True - -.. [*] - ``external_network`` option specifies the name of the external network, - which is used to Internet and to allocate Elastic IPs. It must be - specified to get access into VMs from outside of the cloud. - - - ``disable_ec2_classic`` option is not mandatory, but we strongly - recommend it to be specified. It turns off EC2 Classic mode and forces - objects to be created inside VPCs. - - With ``disable_ec2_classic`` = True, any user of the cloud must have - the only network (created with neutron directly and attached to a router - to provide outside access for that VMS), which is used for launch - ec2-classic instances. - - Keep in mind that an operator is not able to change - ``disable_ec2_classic`` setting seamlessly. - -In the *[keystone_authtoken]* section, configure Identity service access. - -.. code-block:: ini - - [keystone_authtoken] - project_domain_name = Default - project_name = service - user_domain_name = Default - password = password - username = ec2api - auth_type = password - -Also you need to configure database connection: - -.. code-block:: ini - - [database] - connection = mysql+pymysql://root:password@127.0.0.1/ec2api?charset=utf8 - -and you need to configure oslo_concurrency lock_path: - -.. code-block:: ini - - [oslo_concurrency] - lock_path = /path/to/oslo_concurrency_lock_dir - -and cache if you want to use it. - -.. code-block:: ini - - [cache] - enabled = True - -You can look for other configuration options in the `Configuration Reference`_ - -.. _`Configuration Reference`: ../configuration/api.html \ No newline at end of file diff --git a/doc/source/install/credentials-creation.rst b/doc/source/install/credentials-creation.rst deleted file mode 100644 index 9cf23e23..00000000 --- a/doc/source/install/credentials-creation.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _credentials-creation: - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - -#. To create the service credentials, complete these steps: - - * Create the ``ec2api`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt ec2api - - * Add the ``admin`` role to the ``ec2api`` user: - - .. code-block:: console - - $ openstack role add --project service --user ec2api admin - - * Create the ec2api service entities: - - .. code-block:: console - - $ openstack service create --name ec2-api --description "ec2api" ec2api diff --git a/doc/source/install/database-creation.rst b/doc/source/install/database-creation.rst deleted file mode 100644 index 98cb6bf9..00000000 --- a/doc/source/install/database-creation.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _database-creation: - -* Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - -* Create the ``ec2api`` database: - - .. code-block:: mysql - - CREATE DATABASE ec2api; - -* Grant proper access to the ``ec2api`` database: - - .. code-block:: ini - - GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'localhost' \ - IDENTIFIED BY 'EC2-API_DBPASS'; - GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'%' \ - IDENTIFIED BY 'EC2-API_DBPASS'; - - Replace ``EC2-API_DBPASS`` with a suitable password. - -* Exit the database access client. - - .. code-block:: mysql - - exit; diff --git a/doc/source/install/endpoints-creation.rst b/doc/source/install/endpoints-creation.rst deleted file mode 100644 index 53e605b9..00000000 --- a/doc/source/install/endpoints-creation.rst +++ /dev/null @@ -1,13 +0,0 @@ -Create the ec2api service API endpoints: - -.. code-block:: console - - $ openstack endpoint create --region RegionOne ec2api \ - public http://controller:XXXX/ - $ openstack endpoint create --region RegionOne ec2api \ - admin http://controller:XXXX/ - $ openstack endpoint create --region RegionOne ec2api \ - internal http://controller:XXXX/ - -- where 'controller' is address your ec2api is installed on -- and 'XXXX' is port (8788 by default) diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 753ccab2..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -===================== -Installing EC2-API -===================== - -This section describes how to install and configure the ec2-api service on the -controller node for Ubuntu (LTS). - -It assumes that you already have a working OpenStack environment with -at least the following components installed: Compute, Networking, Block Storage, -Identity, Image. - -.. toctree:: - :maxdepth: 1 - - install-sh.rst - install-manual.rst - install-devstack.rst - install-ubuntu.rst - -.. toctree:: - :maxdepth: 1 - - verify.rst - next-steps.rst - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial `_. diff --git a/doc/source/install/install-devstack.rst b/doc/source/install/install-devstack.rst deleted file mode 100644 index 9bc20d6b..00000000 --- a/doc/source/install/install-devstack.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _install-devstack: - -Installation on DevStack -~~~~~~~~~~~~~~~~~~~~~~~~~ - -In order to install ec2-api with devstack the following should be added to the local.conf or localrc the following line: - -.. code-block:: ini - - enable_plugin ec2-api https://opendev.org/openstack/ec2-api diff --git a/doc/source/install/install-manual.rst b/doc/source/install/install-manual.rst deleted file mode 100644 index 810ed4d2..00000000 --- a/doc/source/install/install-manual.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. _install-manual: - -Manual Installation -~~~~~~~~~~~~~~~~~~~ - -Install and configure components --------------------------------- - -1. Install the packages in any way you prefer - (**github+setup.py** / **pip** / **packages**) - -2. Create the service credentials - - .. include:: credentials-creation.rst - -3. Create database - - .. include:: database-creation.rst - - There is a script creating 'ec2api' database that is accessible - only on localhost by user 'ec2api' with password 'ec2api'. - https://github.com/openstack/ec2-api/blob/master/tools/db/ec2api-db-setup - -4. Create endpoints: - - .. include:: endpoints-creation.rst - -5. Create configuration files ``/etc/ec2api/api-paste.ini`` - (can be copied from - https://github.com/openstack/ec2-api/blob/master/etc/ec2api/api-paste.ini) - - and ``/etc/ec2api/ec2api.conf`` - - .. include:: configuration.rst - -6. Configure metadata: - - .. include:: metadata-configuration.rst - -7. Start the services as binaries - - .. code-block:: console - - $ /usr/local/bin/ec2-api - $ /usr/local/bin/ec2-api-metadata - - or set up as Linux services. diff --git a/doc/source/install/install-sh.rst b/doc/source/install/install-sh.rst deleted file mode 100644 index 268b6c27..00000000 --- a/doc/source/install/install-sh.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _install-sh: - -Installation by install.sh -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Install and configure components --------------------------------- - -Install the packages: - -.. code-block:: console - - # apt-get update - # git clone https://github.com/openstack/ec2-api.git - # cd ec2-api - -Run install.sh - -The EC2 API service gets installed on port 8788 by default. It can be changed -before the installation in ``/etc/ec2api/ec2api.conf`` configuration file. - -:ref:`configuring`. - -The services afterwards can be started as binaries: - -.. code-block:: console - - $ /usr/local/bin/ec2-api - $ /usr/local/bin/ec2-api-metadata - -or set up as Linux services. - -.. include:: endpoints-creation.rst - -Configuring OpenStack for EC2 API metadata service ---------------------------------------------------- - -.. include:: metadata-configuration.rst diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index 7e16c427..00000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _install-ubuntu: - - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the ec2-api service on the -controller node for Ubuntu (LTS). - -It assumes that you already have a working OpenStack environment with -at least the following components installed: Compute, Networking, Block Storage, -Identity, Image. - -.. toctree:: - :maxdepth: 1 - - install-sh.rst - install-manual.rst - install-devstack.rst diff --git a/doc/source/install/metadata-configuration.rst b/doc/source/install/metadata-configuration.rst deleted file mode 100644 index 2f7ce046..00000000 --- a/doc/source/install/metadata-configuration.rst +++ /dev/null @@ -1,29 +0,0 @@ -EC2 metadata is built in between the nova-metadata and the neutron-metadata, -so we need to configure Neutron so that it sends requests to ec2-api-metadata, -not to the nova. - -To configure OpenStack for EC2 API metadata service for Neutron add: - -.. code-block:: ini - - [DEFAULT] - nova_metadata_port = 8789 - -to ``/etc/neutron/metadata_agent.ini`` for legacy neutron or -to ``neutron_ovn_metadata_agent.ini`` for OVN - -then restart neutron-metadata service. - -If you want to obtain metadata via SSL you need to configure neutron: - -.. code-block:: ini - - [DEFAULT] - nova_metadata_protocol = https - # in case of self-signed certs you may need to specify CA - auth_ca_cert = /path/to/root/cert/if/self/signed - # or skip certs checking - nova_metadata_insecure = True - -And then you'll be able to get EC2-API/Nova metadata from neutron via SSL. -Anyway metadata URL inside the server still be http://169.254.169.254 \ No newline at end of file diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index 1a14807e..00000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the ec2-api service. - -To add more services, see the additional documentation on installing OpenStack. diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 79be1f43..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the ec2-api service. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` project credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . openrc admin admin - -#. List service components to verify successful launch and registration - of each process: - - .. code-block:: console - - $ openstack service list - - -#. Install aws cli. - - .. code-block:: console - - # pip install awscli --upgrade --user - -#. Create configuration file for aws cli in your home directory - ``~/.aws/config`` or by "**aws configure**" command: - - .. code-block:: console - - [default] - aws_access_key_id = 1b013f18d5ed47ae8ed0fbb8debc036b - aws_secret_access_key = 9bbc6f270ffd4dfdbe0e896947f41df3 - region = RegionOne - - Change the aws_access_key_id and aws_secret_acces_key above to the values - appropriate for your cloud (can be obtained by - "**openstack ec2 credentials list**" command). - -#. Run aws cli commands using new EC2 API endpoint URL (can be obtained from - keystone with the new port 8788) like this: - - .. code-block:: console - - aws --endpoint-url http://10.0.2.15:8788 ec2 describe-images diff --git a/ec2api/__init__.py b/ec2api/__init__.py deleted file mode 100644 index 4f62dfdc..00000000 --- a/ec2api/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -:mod:`ec2api` -- Cloud IaaS Platform -=================================== - -.. automodule:: ec2api - :platform: Unix - :synopsis: Infrastructure-as-a-Service Cloud platform. -""" - -import gettext - -gettext.install('ec2api') diff --git a/ec2api/api/__init__.py b/ec2api/api/__init__.py deleted file mode 100644 index 6eb593ed..00000000 --- a/ec2api/api/__init__.py +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Starting point for routing EC2 requests. -""" -import hashlib -import sys - -import botocore -from keystoneauth1 import session as keystone_session -from keystoneclient import access as keystone_access -from keystoneclient.auth.identity import access as keystone_identity_access -from oslo_config import cfg -from oslo_context import context as common_context -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import requests -import webob -import webob.dec -import webob.exc - -from ec2api.api import apirequest -from ec2api.api import ec2utils -from ec2api.api import faults -from ec2api import clients -from ec2api import context -from ec2api import exception -from ec2api.i18n import _ -from ec2api import wsgi - - -LOG = logging.getLogger(__name__) - -ec2_opts = [ - cfg.StrOpt('keystone_ec2_tokens_url', - default='http://localhost:5000/v3/ec2tokens', - help='URL to authenticate token from ec2 request.'), - cfg.IntOpt('ec2_timestamp_expiry', - default=300, - help='Time in seconds before ec2 timestamp expires'), -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) -CONF.import_opt('use_forwarded_for', 'ec2api.api.auth') - - -# Fault Wrapper around all EC2 requests # -class FaultWrapper(wsgi.Middleware): - - """Calls the middleware stack, captures any exceptions into faults.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - return req.get_response(self.application) - except Exception: - LOG.exception("FaultWrapper catches error") - return faults.Fault(webob.exc.HTTPInternalServerError()) - - -class RequestLogging(wsgi.Middleware): - - """Access-Log akin logging for all EC2 API requests.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - start = timeutils.utcnow() - rv = req.get_response(self.application) - self.log_request_completion(rv, req, start) - return rv - - def log_request_completion(self, response, request, start): - apireq = request.environ.get('ec2.request', None) - if apireq: - action = apireq.action - else: - action = None - ctxt = request.environ.get('ec2api.context', None) - delta = timeutils.utcnow() - start - seconds = delta.seconds - microseconds = delta.microseconds - LOG.info( - "%s.%ss %s %s %s %s %s [%s] %s %s", - seconds, - microseconds, - request.remote_addr, - request.method, - "%s%s" % (request.script_name, request.path_info), - action, - response.status_int, - request.user_agent, - request.content_type, - response.content_type, - context=ctxt) - - -class EC2KeystoneAuth(wsgi.Middleware): - - """Authenticate an EC2 request with keystone and convert to context.""" - - def _get_signature(self, req): - """Extract the signature from the request. - - This can be a get/post variable or for version 4 also in a header - called 'Authorization'. - - params['Signature'] == version 0,1,2,3 - - params['X-Amz-Signature'] == version 4 - - header 'Authorization' == version 4 - """ - sig = req.params.get('Signature') or req.params.get('X-Amz-Signature') - if sig is not None: - return sig - - if 'Authorization' not in req.headers: - return None - - auth_str = req.headers['Authorization'] - if not auth_str.startswith('AWS4-HMAC-SHA256'): - return None - - return auth_str.partition("Signature=")[2].split(',')[0] - - def _get_access(self, req): - """Extract the access key identifier. - - For version 0/1/2/3 this is passed as the AccessKeyId parameter, for - version 4 it is either an X-Amz-Credential parameter or a Credential= - field in the 'Authorization' header string. - """ - access = req.params.get('AWSAccessKeyId') - if access is not None: - return access - - cred_param = req.params.get('X-Amz-Credential') - if cred_param: - access = cred_param.split("/")[0] - if access is not None: - return access - - if 'Authorization' not in req.headers: - return None - auth_str = req.headers['Authorization'] - if not auth_str.startswith('AWS4-HMAC-SHA256'): - return None - cred_str = auth_str.partition("Credential=")[2].split(',')[0] - return cred_str.split("/")[0] - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - request_id = common_context.generate_request_id() - - # NOTE(alevine) We need to calculate the hash here because - # subsequent access to request modifies the req.body so the hash - # calculation will yield invalid results. - body_hash = hashlib.sha256(req.body).hexdigest() - - signature = self._get_signature(req) - if not signature: - msg = _("Signature not provided") - return faults.ec2_error_response(request_id, "AuthFailure", msg, - status=400) - access = self._get_access(req) - if not access: - msg = _("Access key not provided") - return faults.ec2_error_response(request_id, "AuthFailure", msg, - status=400) - - if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers: - params = {} - else: - # Make a copy of args for authentication and signature verification - params = dict(req.params) - # Not part of authentication args - params.pop('Signature', None) - - cred_dict = { - 'access': access, - 'signature': signature, - 'host': req.host, - 'verb': req.method, - 'path': req.path, - 'params': params, - # python3 takes only keys for json from headers object - 'headers': {k: req.headers[k] for k in req.headers}, - 'body_hash': body_hash - } - - token_url = CONF.keystone_ec2_tokens_url - if "ec2" in token_url: - creds = {'ec2Credentials': cred_dict} - else: - creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} - creds_json = jsonutils.dumps(creds) - headers = {'Content-Type': 'application/json'} - params = {'data': creds_json, 'headers': headers} - clients.update_request_params_with_ssl(params) - response = requests.request('POST', token_url, **params) - status_code = response.status_code - if status_code != 200: - msg = response.reason - return faults.ec2_error_response(request_id, "AuthFailure", msg, - status=status_code) - - try: - auth_ref = keystone_access.AccessInfo.factory(resp=response, - body=response.json()) - except (NotImplementedError, KeyError): - LOG.exception("Keystone failure") - msg = _("Failure communicating with keystone") - return faults.ec2_error_response(request_id, "AuthFailure", msg, - status=400) - auth = keystone_identity_access.AccessInfoPlugin(auth_ref) - params = {'auth': auth} - clients.update_request_params_with_ssl(params) - session = keystone_session.Session(**params) - remote_address = req.remote_addr - if CONF.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', - remote_address) - - ctxt = context.RequestContext(auth_ref.user_id, auth_ref.project_id, - request_id=request_id, - user_name=auth_ref.username, - project_name=auth_ref.project_name, - remote_address=remote_address, - session=session, - api_version=req.params.get('Version')) - - req.environ['ec2api.context'] = ctxt - - return self.application - - -class Requestify(wsgi.Middleware): - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', - 'SignatureVersion', 'Version', 'Timestamp'] - args = dict(req.params) - try: - expired = ec2utils.is_ec2_timestamp_expired( - req.params, - expires=CONF.ec2_timestamp_expiry) - if expired: - msg = _("Timestamp failed validation.") - LOG.exception(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - # Raise KeyError if omitted - action = req.params['Action'] - # Fix bug lp:720157 for older (version 1) clients - version = req.params.get('SignatureVersion') - if version and int(version) == 1: - non_args.remove('SignatureMethod') - if 'SignatureMethod' in args: - args.pop('SignatureMethod') - for non_arg in non_args: - args.pop(non_arg, None) - except KeyError: - raise webob.exc.HTTPBadRequest() - except exception.InvalidRequest as err: - raise webob.exc.HTTPBadRequest(explanation=err.format_message()) - - LOG.debug('action: %s', action) - for key, value in args.items(): - LOG.debug('arg: %(key)s\t\tval: %(value)s', - {'key': key, 'value': value}) - - # Success! - api_request = apirequest.APIRequest( - action, req.params['Version'], args) - req.environ['ec2.request'] = api_request - return self.application - - -def exception_to_ec2code(ex): - """Helper to extract EC2 error code from exception. - - For other than EC2 exceptions (those without ec2_code attribute), - use exception name. - """ - if hasattr(ex, 'ec2_code'): - code = ex.ec2_code - else: - code = type(ex).__name__ - return code - - -def ec2_error_ex(ex, req, unexpected=False): - """Return an EC2 error response. - - Return an EC2 error response based on passed exception and log - the exception on an appropriate log level: - - * DEBUG: expected errors - * ERROR: unexpected errors - - All expected errors are treated as client errors and 4xx HTTP - status codes are always returned for them. - - Unexpected 5xx errors may contain sensitive information, - suppress their messages for security. - """ - code = exception_to_ec2code(ex) - for status_name in ('code', 'status', 'status_code', 'http_status'): - status = getattr(ex, status_name, None) - if isinstance(status, int): - break - else: - status = 500 - - if unexpected: - log_fun = LOG.error - log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s") - exc_info = sys.exc_info() - else: - log_fun = LOG.debug - log_msg = _("%(ex_name)s raised: %(ex_str)s") - exc_info = None - - context = req.environ['ec2api.context'] - request_id = context.request_id - log_msg_args = { - 'ex_name': type(ex).__name__, - 'ex_str': ex - } - log_fun(log_msg % log_msg_args, context=context, exc_info=exc_info) - - if unexpected and status >= 500: - message = _('Unknown error occurred.') - elif getattr(ex, 'message', None): - message = str(ex.message) - elif ex.args and any(arg for arg in ex.args): - message = " ".join(map(str, ex.args)) - else: - message = str(ex) - if unexpected: - # Log filtered environment for unexpected errors. - env = req.environ.copy() - for k in list(env.keys()): - if not isinstance(env[k], str): - env.pop(k) - log_fun(_('Environment: %s') % jsonutils.dumps(env)) - return faults.ec2_error_response(request_id, code, message, status=status) - - -class Executor(wsgi.Application): - - """Execute an EC2 API request. - - Executes 'ec2.action', passing 'ec2api.context' and - 'ec2.action_args' (all variables in WSGI environ.) Returns an XML - response, or a 400 upon failure. - """ - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - context = req.environ['ec2api.context'] - api_request = req.environ['ec2.request'] - try: - result = api_request.invoke(context) - except botocore.exceptions.ClientError as ex: - error = ex.response.get('Error', {}) - code = ex.response.get('Code', error.get('Code')) - message = ex.response.get('Message', error.get('Message')) - # the early versions of botocore didn't provide HTTPStatusCode - # for 400 errors - status = ex.response.get('ResponseMetadata', {}).get( - 'HTTPStatusCode', 400) - if status < 400 or status > 499: - LOG.exception("Exception from remote server") - return faults.ec2_error_response( - context.request_id, code, message, status=status) - except Exception as ex: - return ec2_error_ex( - ex, req, unexpected=not isinstance(ex, exception.EC2Exception)) - else: - resp = webob.Response() - resp.status = 200 - resp.headers['Content-Type'] = 'text/xml' - resp.body = bytes(result) - - return resp diff --git a/ec2api/api/address.py b/ec2api/api/address.py deleted file mode 100644 index 66c92364..00000000 --- a/ec2api/api/address.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - from neutronclient.common import exceptions as neutron_exception -except ImportError: - pass # clients will log absense of neutronclient in this case -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import internet_gateway as internet_gateway_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -"""Address related API implementation -""" - - -Validator = common.Validator - - -def get_address_engine(): - return AddressEngineNeutron() - - -def allocate_address(context, domain=None): - if domain and domain not in ['vpc', 'standard']: - msg = _("Invalid value '%(domain)s' for domain.") % {'domain': domain} - raise exception.InvalidParameterValue(msg) - - address, os_floating_ip = address_engine.allocate_address(context, domain) - return _format_address(context, address, os_floating_ip) - - -def associate_address(context, public_ip=None, instance_id=None, - allocation_id=None, network_interface_id=None, - private_ip_address=None, allow_reassociation=False): - if not public_ip and not allocation_id: - msg = _('Either public IP or allocation id must be specified') - raise exception.MissingParameter(msg) - if public_ip and allocation_id: - msg = _('You may specify public IP or allocation id, ' - 'but not both in the same call') - raise exception.InvalidParameterCombination(msg) - if not instance_id and not network_interface_id: - msg = _('Either instance ID or network interface id must be specified') - raise exception.MissingParameter(msg) - associationId = address_engine.associate_address( - context, public_ip, instance_id, - allocation_id, network_interface_id, - private_ip_address, allow_reassociation) - if associationId: - return {'return': True, - 'associationId': associationId} - return {'return': True} - - -def disassociate_address(context, public_ip=None, association_id=None): - if not public_ip and not association_id: - msg = _('Either public IP or association id must be specified') - raise exception.MissingParameter(msg) - if public_ip and association_id: - msg = _('You may specify public IP or association id, ' - 'but not both in the same call') - raise exception.InvalidParameterCombination(msg) - address_engine.disassociate_address(context, public_ip, association_id) - return True - - -def release_address(context, public_ip=None, allocation_id=None): - if not public_ip and not allocation_id: - msg = _('Either public IP or allocation id must be specified') - raise exception.MissingParameter(msg) - if public_ip and allocation_id: - msg = _('You may specify public IP or allocation id, ' - 'but not both in the same call') - raise exception.InvalidParameterCombination(msg) - - address_engine.release_address(context, public_ip, allocation_id) - return True - - -class AddressDescriber(common.UniversalDescriber): - - KIND = 'eipalloc' - FILTER_MAP = {'allocation-id': 'allocationId', - 'association-id': 'associationId', - 'domain': 'domain', - 'instance-id': 'instanceId', - 'network-interface-id': 'networkInterfaceId', - 'network-interface-owner-id': 'networkInterfaceOwnerId', - 'private-ip-address': 'privateIpAddress', - 'public-ip': 'publicIp'} - - def __init__(self, os_ports, db_instances): - self.os_ports = os_ports - self.db_instances_dict = {i['os_id']: i for i in (db_instances or [])} - - def format(self, item=None, os_item=None): - return _format_address(self.context, item, os_item, self.os_ports, - self.db_instances_dict) - - def get_os_items(self): - return address_engine.get_os_floating_ips(self.context) - - def auto_update_db(self, item, os_item): - item = super(AddressDescriber, self).auto_update_db(item, os_item) - if (item and 'network_interface_id' in item and - (not os_item.get('port_id') or - os_item['fixed_ip_address'] != item['private_ip_address'])): - _disassociate_address_item(self.context, item) - return item - - def get_name(self, os_item): - return os_item['floating_ip_address'] - - -def describe_addresses(context, public_ip=None, allocation_id=None, - filter=None): - formatted_addresses = AddressDescriber( - address_engine.get_os_ports(context), - db_api.get_items(context, 'i')).describe( - context, allocation_id, public_ip, filter) - return {'addressesSet': formatted_addresses} - - -def _format_address(context, address, os_floating_ip, os_ports=[], - db_instances_dict=None): - ec2_address = {'publicIp': os_floating_ip['floating_ip_address']} - fixed_ip_address = os_floating_ip.get('fixed_ip_address') - if fixed_ip_address: - ec2_address['privateIpAddress'] = fixed_ip_address - os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports) - if os_instance_id: - ec2_address['instanceId'] = ( - _get_instance_ec2_id_by_os_id(context, os_instance_id, - db_instances_dict)) - if not address: - ec2_address['domain'] = 'standard' - else: - ec2_address['domain'] = 'vpc' - ec2_address['allocationId'] = address['id'] - if 'network_interface_id' in address: - ec2_address.update({ - 'associationId': ec2utils.change_ec2_id_kind( - ec2_address['allocationId'], 'eipassoc'), - 'networkInterfaceId': address['network_interface_id'], - 'networkInterfaceOwnerId': context.project_id}) - return ec2_address - - -def _get_instance_ec2_id_by_os_id(context, os_instance_id, db_instances_dict): - db_item = ec2utils.get_db_item_by_os_id(context, 'i', os_instance_id, - db_instances_dict) - return db_item['id'] - - -def _is_address_valid(context, neutron, address): - try: - neutron.show_floatingip(address['os_id']) - except neutron_exception.NotFound: - return False - else: - return True - - -def _associate_address_item(context, address, network_interface_id, - private_ip_address): - address['network_interface_id'] = network_interface_id - address['private_ip_address'] = private_ip_address - db_api.update_item(context, address) - - -def _disassociate_address_item(context, address): - address.pop('network_interface_id') - address.pop('private_ip_address') - db_api.update_item(context, address) - - -def _get_os_instance_id(context, os_floating_ip, os_ports=[]): - port_id = os_floating_ip.get('port_id') - os_instance_id = None - if port_id: - port = next((port for port in os_ports - if port['id'] == port_id), None) - if port and port.get('device_owner').startswith('compute:'): - os_instance_id = port.get('device_id') - return os_instance_id - - -class AddressEngineNeutron(object): - - def allocate_address(self, context, domain=None): - os_public_network = ec2utils.get_os_public_network(context) - neutron = clients.neutron(context) - - with common.OnCrashCleaner() as cleaner: - os_floating_ip = {'floating_network_id': os_public_network['id']} - try: - os_floating_ip = neutron.create_floatingip( - {'floatingip': os_floating_ip}) - except neutron_exception.OverQuotaClient: - raise exception.AddressLimitExceeded() - os_floating_ip = os_floating_ip['floatingip'] - if ((not domain or domain == 'standard') and - not CONF.disable_ec2_classic): - return None, os_floating_ip - cleaner.addCleanup(neutron.delete_floatingip, os_floating_ip['id']) - - address = {'os_id': os_floating_ip['id'], - 'public_ip': os_floating_ip['floating_ip_address']} - address = db_api.add_item(context, 'eipalloc', address) - return address, os_floating_ip - - def release_address(self, context, public_ip, allocation_id): - neutron = clients.neutron(context) - if public_ip: - # TODO(ft): implement search in DB layer - address = next((addr for addr in - db_api.get_items(context, 'eipalloc') - if addr['public_ip'] == public_ip), None) - if address and _is_address_valid(context, neutron, address): - msg = _('You must specify an allocation id when releasing a ' - 'VPC elastic IP address') - raise exception.InvalidParameterValue(msg) - os_floating_ip = self.get_os_floating_ip_by_public_ip(context, - public_ip) - try: - neutron.delete_floatingip(os_floating_ip['id']) - except neutron_exception.NotFound: - pass - return - - address = ec2utils.get_db_item(context, allocation_id) - if not _is_address_valid(context, neutron, address): - raise exception.InvalidAllocationIDNotFound( - id=allocation_id) - - if 'network_interface_id' in address: - if CONF.disable_ec2_classic: - network_interface_id = address['network_interface_id'] - network_interface = db_api.get_item_by_id(context, - network_interface_id) - default_vpc = ec2utils.check_and_create_default_vpc(context) - if default_vpc: - default_vpc_id = default_vpc['id'] - if (network_interface and - network_interface['vpc_id'] == default_vpc_id): - association_id = ec2utils.change_ec2_id_kind(address['id'], - 'eipassoc') - self.disassociate_address( - context, association_id=association_id) - else: - raise exception.InvalidIPAddressInUse( - ip_address=address['public_ip']) - else: - raise exception.InvalidIPAddressInUse( - ip_address=address['public_ip']) - - with common.OnCrashCleaner() as cleaner: - db_api.delete_item(context, address['id']) - cleaner.addCleanup(db_api.restore_item, context, - 'eipalloc', address) - try: - neutron.delete_floatingip(address['os_id']) - except neutron_exception.NotFound: - pass - - def associate_address(self, context, public_ip=None, instance_id=None, - allocation_id=None, network_interface_id=None, - private_ip_address=None, allow_reassociation=False): - instance_network_interfaces = [] - if instance_id: - # TODO(ft): implement search in DB layer - for eni in db_api.get_items(context, 'eni'): - if eni.get('instance_id') == instance_id: - instance_network_interfaces.append(eni) - - neutron = clients.neutron(context) - - if public_ip: - # TODO(ft): implement search in DB layer - address = next((addr for addr in db_api.get_items(context, - 'eipalloc') - if addr['public_ip'] == public_ip), None) - - if not CONF.disable_ec2_classic: - if instance_network_interfaces: - msg = _('You must specify an allocation id when mapping ' - 'an address to a VPC instance') - raise exception.InvalidParameterCombination(msg) - if address and _is_address_valid(context, neutron, address): - msg = _( - "The address '%(public_ip)s' does not belong to you.") - raise exception.AuthFailure(msg % {'public_ip': public_ip}) - - os_instance_id = ec2utils.get_db_item(context, - instance_id)['os_id'] - # NOTE(ft): check the public IP exists to raise AWS exception - # otherwise - self.get_os_floating_ip_by_public_ip(context, public_ip) - nova = clients.nova(context) - nova.servers.add_floating_ip(os_instance_id, public_ip) - return None - - if not address: - msg = _("The address '%(public_ip)s' does not belong to you.") - raise exception.AuthFailure(msg % {'public_ip': public_ip}) - allocation_id = address['id'] - - if instance_id: - if not instance_network_interfaces: - # NOTE(ft): check the instance exists - ec2utils.get_db_item(context, instance_id) - msg = _('You must specify an IP address when mapping ' - 'to a non-VPC instance') - raise exception.InvalidParameterCombination(msg) - if len(instance_network_interfaces) > 1: - raise exception.InvalidInstanceId(instance_id=instance_id) - network_interface = instance_network_interfaces[0] - else: - network_interface = ec2utils.get_db_item(context, - network_interface_id) - if not private_ip_address: - private_ip_address = network_interface['private_ip_address'] - - address = ec2utils.get_db_item(context, allocation_id) - if not _is_address_valid(context, neutron, address): - raise exception.InvalidAllocationIDNotFound( - id=allocation_id) - - if address.get('network_interface_id') == network_interface['id']: - # NOTE(ft): idempotent call - pass - elif address.get('network_interface_id') and not allow_reassociation: - msg = _('resource %(eipalloc_id)s is already associated with ' - 'associate-id %(eipassoc_id)s') - msg = msg % {'eipalloc_id': allocation_id, - 'eipassoc_id': ec2utils.change_ec2_id_kind( - address['id'], 'eipassoc')} - raise exception.ResourceAlreadyAssociated(msg) - else: - internet_gateways = ( - internet_gateway_api.describe_internet_gateways( - context, - filter=[{'name': 'attachment.vpc-id', - 'value': [network_interface['vpc_id']]}]) - ['internetGatewaySet']) - if len(internet_gateways) == 0: - msg = _('Network %(vpc_id)s is not attached to any internet ' - 'gateway') % {'vpc_id': network_interface['vpc_id']} - raise exception.GatewayNotAttached(msg) - - with common.OnCrashCleaner() as cleaner: - _associate_address_item(context, address, - network_interface['id'], - private_ip_address) - cleaner.addCleanup(_disassociate_address_item, context, - address) - - os_floating_ip = {'port_id': network_interface['os_id'], - 'fixed_ip_address': private_ip_address} - neutron.update_floatingip(address['os_id'], - {'floatingip': os_floating_ip}) - # TODO(ft): generate unique association id for each act of association - return ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') - - def disassociate_address(self, context, public_ip=None, - association_id=None): - neutron = clients.neutron(context) - - if public_ip: - # TODO(ft): implement search in DB layer - address = next((addr for addr in db_api.get_items(context, - 'eipalloc') - if addr['public_ip'] == public_ip), None) - - if not CONF.disable_ec2_classic: - if address and _is_address_valid(context, neutron, address): - msg = _('You must specify an association id when ' - 'unmapping an address from a VPC instance') - raise exception.InvalidParameterValue(msg) - # NOTE(tikitavi): check the public IP exists to raise AWS - # exception otherwise - os_floating_ip = self.get_os_floating_ip_by_public_ip( - context, public_ip) - os_ports = self.get_os_ports(context) - os_instance_id = _get_os_instance_id(context, os_floating_ip, - os_ports) - if os_instance_id: - nova = clients.nova(context) - nova.servers.remove_floating_ip(os_instance_id, public_ip) - return None - - if not address: - msg = _("The address '%(public_ip)s' does not belong to you.") - raise exception.AuthFailure(msg % {'public_ip': public_ip}) - if 'network_interface_id' not in address: - msg = _('You must specify an association id when unmapping ' - 'an address from a VPC instance') - raise exception.InvalidParameterValue(msg) - association_id = ec2utils.change_ec2_id_kind(address['id'], - 'eipassoc') - - address = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) - if address is None or not _is_address_valid(context, neutron, address): - raise exception.InvalidAssociationIDNotFound( - id=association_id) - if 'network_interface_id' in address: - with common.OnCrashCleaner() as cleaner: - network_interface_id = address['network_interface_id'] - private_ip_address = address['private_ip_address'] - _disassociate_address_item(context, address) - cleaner.addCleanup(_associate_address_item, context, address, - network_interface_id, private_ip_address) - - neutron.update_floatingip(address['os_id'], - {'floatingip': {'port_id': None}}) - - def get_os_floating_ips(self, context): - neutron = clients.neutron(context) - return neutron.list_floatingips( - tenant_id=context.project_id)['floatingips'] - - def get_os_ports(self, context): - neutron = clients.neutron(context) - return neutron.list_ports(tenant_id=context.project_id)['ports'] - - def get_os_floating_ip_by_public_ip(self, context, public_ip): - os_floating_ip = next((addr for addr in - self.get_os_floating_ips(context) - if addr['floating_ip_address'] == public_ip), - None) - if not os_floating_ip: - msg = _("The address '%(public_ip)s' does not belong to you.") - raise exception.AuthFailure(msg % {'public_ip': public_ip}) - return os_floating_ip - - -address_engine = get_address_engine() diff --git a/ec2api/api/apirequest.py b/ec2api/api/apirequest.py deleted file mode 100644 index 08805d47..00000000 --- a/ec2api/api/apirequest.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -APIRequest class -""" - -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import cloud -from ec2api.api import ec2utils -from ec2api import exception - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _underscore_to_camelcase(st): - return ''.join([x[:1].upper() + x[1:] for x in st.split('_')]) - - -def _database_to_isoformat(datetimeobj): - """Return a xs:dateTime parsable string from datatime.""" - return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' - - -class APIRequest(object): - - def __init__(self, action, version, args): - self.action = action - self.version = version - self.args = args - self.controller = cloud.VpcCloudController() - - def invoke(self, context): - try: - method = getattr(self.controller, - ec2utils.camelcase_to_underscore(self.action)) - except AttributeError: - LOG.exception('Unsupported API request: action = %(action)s', - {'action': self.action}) - raise exception.InvalidRequest() - - args = ec2utils.dict_from_dotted_str(self.args.items()) - - def convert_dicts_to_lists(args): - if not isinstance(args, dict): - return args - for key in args.keys(): - # NOTE(vish): Turn numeric dict keys into lists - # NOTE(Alex): Turn "value"-only dict keys into values - if isinstance(args[key], dict): - if args[key] == {}: - continue - first_subkey = next(iter(args[key].keys())) - if first_subkey.isdigit(): - s = args[key] - args[key] = [convert_dicts_to_lists(s[k]) - for k in sorted(s)] - elif (first_subkey == 'value' and - len(args[key]) == 1): - args[key] = args[key]['value'] - return args - - args = convert_dicts_to_lists(args) - result = method(context, **args) - return self._render_response(result, context.request_id) - - def _render_response(self, response_data, request_id): - response_el = ec2utils.dict_to_xml( - {'return': 'true'} if response_data is True else response_data, - self.action + 'Response') - response_el.attrib['xmlns'] = ('http://ec2.amazonaws.com/doc/%s/' - % self.version) - request_id_el = etree.Element('requestId') - request_id_el.text = request_id - response_el.insert(0, request_id_el) - - response = etree.tostring(response_el, pretty_print=True) - - # Don't write private key to log - if self.action != "CreateKeyPair": - LOG.debug(response) - else: - LOG.debug("CreateKeyPair: Return Private Key") - - return response diff --git a/ec2api/api/auth.py b/ec2api/api/auth.py deleted file mode 100644 index 1aec7653..00000000 --- a/ec2api/api/auth.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Common Auth Middleware. - -""" - -from oslo_config import cfg -from oslo_log import log as logging - - -auth_opts = [ - cfg.BoolOpt('use_forwarded_for', - default=False, - help='Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.'), -] - -CONF = cfg.CONF -CONF.register_opts(auth_opts) - -LOG = logging.getLogger(__name__) - - -def pipeline_factory(loader, global_conf, **local_conf): - """A paste pipeline replica that keys off of auth_strategy.""" - auth_strategy = "keystone" - pipeline = local_conf[auth_strategy] - pipeline = pipeline.split() - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for fltr in filters: - app = fltr(app) - return app diff --git a/ec2api/api/availability_zone.py b/ec2api/api/availability_zone.py deleted file mode 100644 index 31bd9f80..00000000 --- a/ec2api/api/availability_zone.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import netutils - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api import exception - - -availability_zone_opts = [ - cfg.StrOpt('internal_service_availability_zone', - default='internal', - help='The availability_zone to show internal services under'), - cfg.StrOpt('my_ip', - default=netutils.get_my_ipv4(), - help='IP address of this host'), - cfg.StrOpt('ec2_host', - default='$my_ip', - help='The IP address of the EC2 API server'), - cfg.IntOpt('ec2_port', - default=8788, - help='The port of the EC2 API server'), - cfg.StrOpt('ec2_scheme', - default='http', - help='The protocol to use when connecting to the EC2 API ' - 'server (http, https)'), - cfg.StrOpt('ec2_path', - default='/', - help='The path prefix used to call the ec2 API server'), - cfg.ListOpt('region_list', - default=[], - help='List of region=fqdn pairs separated by commas'), -] - -CONF = cfg.CONF -CONF.register_opts(availability_zone_opts) -LOG = logging.getLogger(__name__) - -"""Availability zones, regions, account attributes related API implementation -""" - - -Validator = common.Validator - - -def get_account_attribute_engine(): - return AccountAttributeEngineNeutron() - - -class AvailabilityZoneDescriber(common.UniversalDescriber): - - KIND = 'az' - FILTER_MAP = {'state': 'zoneState', - 'zone-name': 'zoneName'} - - def format(self, item=None, os_item=None): - return _format_availability_zone(os_item) - - def get_db_items(self): - return [] - - def get_os_items(self): - nova = clients.nova(self.context) - zones = nova.availability_zones.list(detailed=False) - for zone in zones: - if zone.zoneName == CONF.internal_service_availability_zone: - zones.remove(zone) - return zones - - def get_name(self, os_item): - return os_item.zoneName - - def get_id(self, os_item): - return '' - - def auto_update_db(self, item, os_item): - pass - - -def describe_availability_zones(context, zone_name=None, filter=None): - # NOTE(Alex): Openstack extension, AWS-incompability - # Checking for 'verbose' in zone_name. - if zone_name and 'verbose' in zone_name: - return _describe_verbose(context) - - formatted_availability_zones = AvailabilityZoneDescriber().describe( - context, names=zone_name, filter=filter) - return {'availabilityZoneInfo': formatted_availability_zones} - - -def describe_regions(context, region_name=None, filter=None): - # TODO(andrey-mp): collect regions from keystone catalog - if CONF.region_list: - regions = [] - for region in CONF.region_list: - name, _sep, host = region.partition('=') - if not host: - host = CONF.ec2_host - endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme, - host, - CONF.ec2_port, - CONF.ec2_path) - regions.append({'regionName': name, - 'regionEndpoint': endpoint}) - else: - # NOTE(andrey-mp): RegionOne is a default region name that is used - # in keystone, nova and some other projects - regions = [{'regionName': 'RegionOne', - 'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme, - CONF.ec2_host, - CONF.ec2_port, - CONF.ec2_path)}] - return {'regionInfo': regions} - - -def describe_account_attributes(context, attribute_name=None): - def get_max_instances(): - nova = clients.nova(context) - quotas = nova.quotas.get(context.project_id, context.user_id) - return quotas.instances - - attribute_getters = { - 'supported-platforms': ( - account_attribute_engine.get_supported_platforms), - 'default-vpc': functools.partial( - account_attribute_engine.get_default_vpc, context), - 'max-instances': get_max_instances, - } - - formatted_attributes = [] - for attribute in (attribute_name or attribute_getters): - if attribute not in attribute_getters: - raise exception.InvalidParameter(name=attribute) - formatted_attributes.append( - _format_account_attribute(attribute, - attribute_getters[attribute]())) - return {'accountAttributeSet': formatted_attributes} - - -def _format_availability_zone(zone): - return {'zoneName': zone.zoneName, - 'zoneState': ('available' - if zone.zoneState.get('available') - else 'unavailable') - } - - -def _format_account_attribute(attribute, value): - if not isinstance(value, list): - value = [value] - return {'attributeName': attribute, - 'attributeValueSet': [{'attributeValue': val} for val in value]} - - -# NOTE(Alex): Openstack extension, AWS-incompability -# The whole function and its result is incompatible with AWS. - -def _describe_verbose(context): - nova = clients.nova(context) - availability_zones = nova.availability_zones.list() - - formatted_availability_zones = [] - for availability_zone in availability_zones: - formatted_availability_zones.append( - _format_availability_zone(availability_zone)) - for host, services in availability_zone.hosts.items(): - formatted_availability_zones.append( - {'zoneName': '|- %s' % host, - 'zoneState': ''}) - for service, values in services.items(): - active = ":-)" if values['active'] else "XXX" - enabled = 'enabled' if values['available'] else 'disabled' - formatted_availability_zones.append( - {'zoneName': '| |- %s' % service, - 'zoneState': ('%s %s %s' % (enabled, active, - values['updated_at']))}) - - return {'availabilityZoneInfo': formatted_availability_zones} - - -class AccountAttributeEngineNeutron(object): - - def get_supported_platforms(self): - if CONF.disable_ec2_classic: - return ['VPC'] - else: - return ['EC2', 'VPC'] - - def get_default_vpc(self, context): - if CONF.disable_ec2_classic: - default_vpc = ec2utils.check_and_create_default_vpc(context) - if default_vpc: - return default_vpc['id'] - return 'none' - - -account_attribute_engine = get_account_attribute_engine() diff --git a/ec2api/api/cloud.py b/ec2api/api/cloud.py deleted file mode 100644 index 89dc4bd7..00000000 --- a/ec2api/api/cloud.py +++ /dev/null @@ -1,2115 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -import collections - -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import address -from ec2api.api import availability_zone -from ec2api.api import customer_gateway -from ec2api.api import dhcp_options -from ec2api.api import image -from ec2api.api import instance -from ec2api.api import internet_gateway -from ec2api.api import key_pair -from ec2api.api import network_interface -from ec2api.api import route_table -from ec2api.api import security_group -from ec2api.api import snapshot -from ec2api.api import subnet -from ec2api.api import tag -from ec2api.api import volume -from ec2api.api import vpc -from ec2api.api import vpn_connection -from ec2api.api import vpn_gateway -from ec2api import exception - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def module_and_param_types(module, *args, **kwargs): - """Decorator to check types and call function.""" - - param_types = args - - def wrapped(func): - - def func_wrapped(*args, **kwargs): - impl_func = getattr(module, func.__name__) - context = args[1] - params = collections.OrderedDict(zip( - func.__code__.co_varnames[2:], param_types)) - param_num = 0 - mandatory_params_num = (func.__code__.co_argcount - 2 - - len(func.__defaults__ or [])) - for param_name, param_type in params.items(): - param_value = kwargs.get(param_name) - if param_value is not None: - validator = module.Validator(param_name, func.__name__, - params) - validation_func = getattr(validator, param_type) - validation_func(param_value) - param_num += 1 - elif param_num < mandatory_params_num: - raise exception.MissingParameter(param=param_name) - return impl_func(context, **kwargs) - return func_wrapped - - return wrapped - - -class CloudController(object): - - """Cloud Controller - - Provides the critical dispatch between - inbound API calls through the endpoint and messages - sent to the other nodes. - """ - - def __init__(self): - pass - - def __str__(self): - return 'CloudController' - - @module_and_param_types(address, 'str255') - def allocate_address(self, context, domain=None): - """Acquires an Elastic IP address. - - Args: - context (RequestContext): The request context. - domain (str): Set to vpc to allocate the address for use with - instances in a VPC. - Default: The address is for use in EC2-Classic. - Valid values: vpc - - Returns: - The Elastic IP address information. - - An Elastic IP address is for use either in the EC2-Classic platform - or in a VPC. - """ - - @module_and_param_types(address, 'ip', 'i_id', - 'eipalloc_id', 'eni_id', - 'ip', 'bool') - def associate_address(self, context, public_ip=None, instance_id=None, - allocation_id=None, network_interface_id=None, - private_ip_address=None, allow_reassociation=False): - """Associates an Elastic IP with an instance or a network interface. - - Args: - context (RequestContext): The request context. - public_ip (str): The Elastic IP address. - Required for Elastic IP addresses for use with instances - in EC2-Classic. - instance_id (str): The ID of the instance. - The operation fails if you specify an instance ID unless - exactly one network interface is attached. - Required for EC2-Classic. - allocation_id (str): The allocation ID. - Required for EC2-VPC. - network_interface_id (str): The ID of the network interface. - private_ip_address (str): The primary or secondary private IP. - allow_reassociation (boolean): Allows an Elastic IP address that is - already associated to be re-associated. - Otherwise, the operation fails. - - Returns: - true if the request succeeds. - [EC2-VPC] The ID that represents the association of the Elastic IP. - - For a VPC, you can specify either instance_id or network_interface_id, - but not both. - If the instance has more than one network interface, you must specify - a network interface ID. - If no private IP address is specified, the Elastic IP address - is associated with the primary private IP address. - [EC2-Classic, default VPC] If the Elastic IP address is already - associated with a different instance, it is disassociated from that - instance and associated with the specified instance. - This is an idempotent operation. - """ - - @module_and_param_types(address, 'ip', - 'eipassoc_id') - def disassociate_address(self, context, public_ip=None, - association_id=None): - """Disassociates an Elastic IP address. - - Args: - context (RequestContext): The request context. - public_ip (str): The Elastic IP address. - Required for EC2-Classic. - assossiation_id (str): The association ID. - Required for EC2-VPC - - Returns: - true if the request succeeds. - - Disassociates an Elastic IP address from the instance or network - interface it's associated with. - This is an idempotent action. - """ - - @module_and_param_types(address, 'ip', - 'eipalloc_id') - def release_address(self, context, public_ip=None, allocation_id=None): - """Releases the specified Elastic IP address. - - Args: - context (RequestContext): The request context. - public_ip (str): The Elastic IP address. - allocation_id (str): The allocation ID. - - Returns: - true if the requests succeeds. - - If you attempt to release an Elastic IP address that you already - released, you'll get an AuthFailure error if the address is already - allocated to another AWS account. - [EC2-Classic, default VPC] Releasing an Elastic IP address - automatically disassociates it from any instance that it's associated - with. - [Nondefault VPC] You must use DisassociateAddress to disassociate the - Elastic IP address before you try to release it. - """ - - @module_and_param_types(address, 'ips', 'eipalloc_ids', - 'filter') - def describe_addresses(self, context, public_ip=None, allocation_id=None, - filter=None): - """Describes one or more of your Elastic IP addresses. - - Args: - context (RequestContext): The request context. - public_ip (list of str): One or more Elastic IP addresses. - allocation_id (list of str): One or more allocation IDs. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain Elastic IP - addresses. - - Returns: - A list of Elastic IP addresses. - """ - - @module_and_param_types(security_group, 'security_group_strs', - 'sg_ids', 'filter') - def describe_security_groups(self, context, group_name=None, - group_id=None, filter=None): - """Describes one or more of your security groups. - - Args: - context (RequestContext): The request context. - group_name (list of str): One or more security group names. - group_id (list of str): One or more security group IDs. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain security groups. - - Returns: - A list of security groups. - """ - - @module_and_param_types(security_group, 'security_group_str', - 'security_group_str', 'vpc_id') - def create_security_group(self, context, group_name, - group_description, vpc_id=None): - """Creates a security group. - - Args: - context (RequestContext): The request context. - group_name (str): The name of the security group. - group_description (str): A description for the security group. - vpc_id (str): [EC2-VPC] The ID of the VPC. - - Returns: - true if the requests succeeds. - The ID of the security group. - - You can have a security group for use in EC2-Classic with the same name - as a security group for use in a VPC. However, you can't have two - security groups for use in EC2-Classic with the same name or two - security groups for use in a VPC with the same name. - You have a default security group for use in EC2-Classic and a default - security group for use in your VPC. If you don't specify a security - group when you launch an instance, the instance is launched into the - appropriate default security group. A default security group includes - a default rule that grants instances unrestricted network access to - each other. - group_name and group_description restrictions: - up to 255 characters in length, - EC2-Classic: ASCII characters, - EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* - """ - - @module_and_param_types(security_group, 'security_group_str', 'sg_id') - def delete_security_group(self, context, group_name=None, group_id=None): - """Deletes a security group. - - Args: - context (RequestContext): The request context. - group_name (str): The name of the security group. - group_id (str): The ID of the security group. - - Returns: - true if the requests succeeds. - - [EC2-Classic, default VPC] You can specify either GroupName or GroupId - If you attempt to delete a security group that is associated with an - instance, or is referenced by another security group, the operation - fails. - """ - - @module_and_param_types(security_group, 'sg_id', - 'security_group_str', 'dummy') - def authorize_security_group_ingress(self, context, group_id=None, - group_name=None, ip_permissions=None): - """Adds one or more ingress rules to a security group. - - Args: - context (RequestContext): The request context. - group_id (str): The ID of the security group. - group_name (str): [EC2-Classic, default VPC] The name of the - security group. - ip_permissions (list of dicts): Dict can contain: - ip_protocol (str): The IP protocol name or number. - Use -1 to specify all. - For EC2-Classic, security groups can have rules only for - TCP, UDP, and ICMP. - from_port (str): The start of port range for the TCP and UDP - protocols, or an ICMP type number. For the ICMP type - number, you can use -1 to specify all ICMP types. - to_port (str): The end of port range for the TCP and UDP - protocols, or an ICMP code number. For the ICMP code - number, you can use -1 to specify all ICMP codes for the - ICMP type. - groups (list of dicts): Dict can contain: - group_id (str): The ID of the source security group. You - can't specify a source security group and a CIDR IP - address range. - user_id (str): [EC2-Classic] The ID of the AWS account that - owns the source security group, if it's not the current - AWS account. - cidr_ip (str): The CIDR IP address range. You can't specify - this parameter when specifying a source security group. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(security_group, 'sg_id', - 'security_group_str', 'dummy') - def revoke_security_group_ingress(self, context, group_id=None, - group_name=None, ip_permissions=None): - """Removes one or more ingress rules from a security group. - - Args: - context (RequestContext): The request context. - group_id (str): The ID of the security group. - group_name (str): [EC2-Classic, default VPC] The name of the - security group. - ip_permissions (list of dicts): See - authorize_security_group_ingress - - Returns: - true if the requests succeeds. - - The values that you specify in the revoke request (for example, ports) - must match the existing rule's values for the rule to be removed. - """ - - @module_and_param_types(security_group, 'sg_id', 'dummy') - def authorize_security_group_egress(self, context, group_id, - ip_permissions=None): - """Adds one or more egress rules to a security group for use with a VPC. - - Args: - context (RequestContext): The request context. - group_id (str): The ID of the security group. - ip_permissions (list of dicts): See - authorize_security_group_ingress - - Returns: - true if the requests succeeds. - - This action doesn't apply to security groups for use in EC2-Classic. - """ - - @module_and_param_types(security_group, 'sg_id', 'dummy') - def revoke_security_group_egress(self, context, group_id, - ip_permissions=None): - """Removes one or more egress rules from a security group for EC2-VPC. - - Args: - context (RequestContext): The request context. - group_id (str): The ID of the security group. - ip_permissions (list of dicts): See - authorize_security_group_ingress - - Returns: - true if the requests succeeds. - - The values that you specify in the revoke request (for example, ports) - must match the existing rule's values for the rule to be revoked. - This action doesn't apply to security groups for use in EC2-Classic. - """ - - @module_and_param_types(instance, 'ami_id', 'int', 'int', - 'str255', 'sg_ids', - 'security_group_strs', 'str', 'str', - 'dummy', 'aki_id', 'ari_id', - 'dummy', 'dummy', - 'subnet_id', 'bool', - 'str', - 'ip', 'str64', - 'dummy', 'dummy', - 'bool') - def run_instances(self, context, image_id, min_count, max_count, - key_name=None, security_group_id=None, - security_group=None, user_data=None, instance_type=None, - placement=None, kernel_id=None, ramdisk_id=None, - block_device_mapping=None, monitoring=None, - subnet_id=None, disable_api_termination=None, - instance_initiated_shutdown_behavior=None, - private_ip_address=None, client_token=None, - network_interface=None, iam_instance_profile=None, - ebs_optimized=None): - """Launches the specified number of instances using an AMI. - - Args: - context (RequestContext): The request context. - image_id (str): The ID of the AMI. - min_count (int): The minimum number of instances to launch. - If you specify a minimum that is more instances than EC2 can - launch in the target Availability Zone, EC2 launches no - instances. - max_count (int): The maximum number of instances to launch. - If you specify more instances than EC2 can launch in the target - Availability Zone, EC2 launches the largest possible number - of instances above max_count. - key_name (str): The name of the key pair. - security_group_id (list of str): One or more security group IDs. - security_group (list of str): One or more security group names. - For VPC mode, you must use security_group_id. - user_data (str): Base64-encoded MIME user data for the instances. - instance_type (str): The instance type. - placement (dict): Dict can contain: - availability_zone (str): Availability Zone for the instance. - group_name (str): The name of an existing placement group. - Not used now. - tenancy (str): The tenancy of the instance. - Not used now. - kernel_id (str): The ID of the kernel. - ramdisk_id (str): The ID of the RAM disk. - block_device_mapping (list of dict): Dict can contain: - device_name (str): The device name exposed to the instance - (for example, /dev/sdh or xvdh). - virtual_name (str): The virtual device name (ephemeral[0..3]). - ebs (dict): Dict can contain: - volume_id (str): The ID of the volume (Nova extension). - snapshot_id (str): The ID of the snapshot. - volume_size (str): The size of the volume, in GiBs. - volume_type (str): The volume type. - Not used now. - delete_on_termination (bool): Indicates whether to delete - the volume on instance termination. - iops (int): he number of IOPS to provision for the volume. - Not used now. - encrypted (boolean): Whether the volume is encrypted. - Not used now. - no_device (str): Suppresses the device mapping. - monitoring (dict): Dict can contains: - enabled (boolean): Enables monitoring for the instance. - Not used now. - subnet_id (str): The ID of the subnet to launch the instance into. - disable_api_termination (boolean): If you set this parameter to - true, you can't terminate the instance using the GUI console, - CLI, or API. - Not used now. - instance_initiated_shutdown_behavior (str): Indicates whether an - instance stops or terminates when you initiate shutdown from - the instance. - Not used now. - private_ip_address (str): The primary IP address. - You must specify a value from the IP address range - of the subnet. - client_token (str): Unique, case-sensitive identifier you provide - to ensure idempotency of the request. - network_interface (list of dicts): Dict can contain: - network_interface_id (str): An existing interface to attach - to a single instance. Requires n=1 instances. - device_index (int): The device index. If you are specifying - a network interface in the request, you must provide the - device index. - subnet_id (str): The subnet ID. Applies only when creating - a network interface. - description (str): A description. Applies only when creating - a network interface. - private_ip_address (str): The primary private IP address. - Applies only when creating a network interface. - Requires n=1 network interfaces in launch. - security_group_id (str): The ID of the security group. - Applies only when creating a network interface. - delete_on_termination (str): Indicates whether to delete - the network interface on instance termination. - private_ip_addresses (list of dicts): Dict can contain: - private_ip_address (str): The private IP address. - primary (boolean): Indicates whether the private IP address - is the primary private IP address. - secondary_private_ip_address_count (int): The number of - private IP addresses to assign to the network - interface. For a single network interface, you can't - specify this option and specify more than one private - IP address using private_ip_address. - associate_public_ip_address (boolean): Indicates whether - to assign a public IP address to an instance in a VPC. - iam_instance_profile (dict): Dict can contains: - arn (str): ARN to associate with the instances. - Not used now. - name (str): Name of the IIP to associate with the instances. - Not used now. - ebs_optimized (boolean): Whether the instance is optimized for EBS. - Not used now. - - Returns: - The instance reservation that was created. - - If you don't specify a security group when launching an instance, EC2 - uses the default security group. - """ - - @module_and_param_types(instance, 'i_ids') - def terminate_instances(self, context, instance_id): - """Shuts down one or more instances. - - Args: - context (RequestContext): The request context. - instance_id (list of str): One or more instance IDs. - - Returns: - A list of instance state changes. - - This operation is idempotent; if you terminate an instance more than - once, each call succeeds. - """ - - @module_and_param_types(instance, 'i_ids', 'filter', - 'int', 'str') - def describe_instances(self, context, instance_id=None, filter=None, - max_results=None, next_token=None): - """Describes one or more of your instances. - - Args: - context (RequestContext): The request context. - instance_id (list of str): One or more instance IDs. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain instances. - max_results (int): The maximum number of items to return. - next_token (str): The token for the next set of items to return. - - Returns: - A list of reservations. - - If you specify one or more instance IDs, Amazon EC2 returns information - for those instances. If you do not specify instance IDs, you receive - information for all relevant instances. If you specify an invalid - instance ID, you receive an error. If you specify an instance that you - don't own, we don't include it in the results. - """ - - @module_and_param_types(instance, 'i_ids') - def reboot_instances(self, context, instance_id): - """Requests a reboot of one or more instances. - - Args: - context (RequestContext): The request context. - instance_id (list of str): One or more instance IDs. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(instance, 'i_ids', 'bool') - def stop_instances(self, context, instance_id, force=False): - """Stops one or more instances. - - Args: - context (RequestContext): The request context. - instance_id (list of str): One or more instance IDs. - force (boolean): Forces the instances to stop. The instances do not - have an opportunity to flush file system caches or file system - metadata. - Not used now. Equivalent value is True. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(instance, 'i_ids') - def start_instances(self, context, instance_id): - """Starts one or more instances. - - Args: - context (RequestContext): The request context. - instance_id (list of str): One or more instance IDs. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(instance, 'i_id', 'str255') - def describe_instance_attribute(self, context, instance_id, attribute): - """Describes the specified attribute of the specified instance. - - Args: - context (RequestContext): The request context. - instance_id (str): The ID of the instance. - attribute (str): The instance attribute. - Valid values: blockDeviceMapping | disableApiTermination | - ebsOptimized (unsupported now) | groupSet | - instanceInitiatedShutdownBehavior | instanceType | kernel | - productCodes (unsupported now) | ramdisk | rootDeviceName | - sourceDestCheck (unsupported now) | - sriovNetSupport (unsupported now) | userData - - Returns: - Specified attribute. - """ - - @module_and_param_types(instance, 'i_id', 'str', - 'dummy', 'bool', - 'dummy', - 'bool', - 'bool', 'sg_ids', - 'str', - 'str', 'str', - 'str', 'str', - 'str') - def modify_instance_attribute(self, context, instance_id, attribute=None, - value=None, source_dest_check=None, - block_device_mapping=None, - disable_api_termination=None, - ebs_optimized=None, group_id=None, - instance_initiated_shutdown_behavior=None, - instance_type=None, kernel=None, - ramdisk=None, sriov_net_support=None, - user_data=None): - """Modifies the specified attribute of the specified instance. - - Args: - context (RequestContext): The request context. - instance_id (str): The ID of the instance. - attribute (str): The name of the attribute. - value: The value of the attribute being modified. - source_dest_check: Indicates whether source/destination checking is - enabled. A value of true means checking is enabled, and false - means checking is disabled. - This value must be false for a NAT instance to perform NAT. - Unsupported now. - block_device_mapping (list of dict): - Modifies the DeleteOnTermination attribute for volumes that are - currently attached. The volume must be owned by the caller. If - no value is specified for DeleteOnTermination, the default is - true and the volume is deleted when the instance is terminated. - Dict can contain: - device_name (str): The device name exposed to the instance - (for example, /dev/sdh or xvdh). - virtual_name (str): The virtual device name (ephemeral[0..3]). - ebs (dict): Dict can contain: - volume_id (str): The ID of the volume (Nova extension). - delete_on_termination (bool): Indicates whether to delete - the volume on instance termination. - no_device (str): Suppresses the device mapping. - Unsupported now. - disable_api_termination (boolean): If the value is true, you can't - terminate the instance using the Amazon EC2 console, CLI, or - API; otherwise, you can. - ebs_optimized (boolean): Whether the instance is optimized for EBS. - Unsupported now. - group_id (list of str): [EC2-VPC] Changes the security - groups of the instance. You must specify at least one security - group, even if it's just the default security group for the - VPC. You must specify the security group ID, not the security - group name. - Unsupported now. - instance_initiated_shutdown_behavior (str): Indicates whether an - instance stops or terminates when you initiate shutdown from - the instance. - Unsupported now. - instance_type (str): Changes the instance type to the specified - value. For more information, see Instance Types. If the - instance type is not valid, the error returned is - InvalidInstanceAttributeValue. - Unsupported now. - kernel (str): Changes the instance's kernel to the specified value. - Unsupported now. - ramdisk (str): Changes the instance's RAM disk. - Unsupported now. - sriov_net_support (str): SR-IOV mode for networking. - Unsupported now. - user_data (str): Changes the instance's user data. - Unsupported now. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(instance, 'i_id', 'str') - def reset_instance_attribute(self, context, instance_id, attribute): - """Resets an attribute of an instance to its default value. - - To reset the kernel or ramdisk, the instance must be in a stopped - state. To reset the SourceDestCheck, the instance can be either - running or stopped. - - Args: - context (RequestContext): The request context. - instance_id (str): The ID of the instance. - attribute (str): The attribute to reset. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(key_pair, 'str255s', 'filter') - def describe_key_pairs(self, context, key_name=None, filter=None): - """Describes one or more of your key pairs. - - Args: - context (RequestContext): The request context. - key_name (list of str): On or more keypair names. - filter (list of filter dict): On or more filters. - - Returns: - Specified keypairs. - """ - - @module_and_param_types(key_pair, 'str255') - def create_key_pair(self, context, key_name): - """Creates a 2048-bit RSA key pair with the specified name. - - Args: - context (RequestContext): The request context. - key_name (str): A unique name for the key pair. - - Returns: - Created keypair. - """ - - @module_and_param_types(key_pair, 'str255') - def delete_key_pair(self, context, key_name): - """Deletes the specified key pair. - - Args: - context (RequestContext): The request context. - key_name (str): Name of the keypair. - - Returns: - Returns true if the request succeeds. - """ - - @module_and_param_types(key_pair, 'str255', 'str') - def import_key_pair(self, context, key_name, public_key_material): - """Imports the public key from an existing RSA key pair. - - Args: - context (RequestContext): The request context. - key_name (str): A unique name for the key pair. - public_key_material (str): The public key. You must base64 encode - the public key material before sending it. - - Returns: - Imported keypair. - """ - - @module_and_param_types(availability_zone, 'strs', 'filter') - def describe_availability_zones(self, context, zone_name=None, - filter=None): - """Describes one or more of the available Availability Zones. - - Args: - context (RequestContext): The request context. - zone_name (list of str): On or more zone names. - filter (list of filter dict): On or more filters. - - Returns: - Specified availability zones. - """ - - @module_and_param_types(availability_zone, 'strs', 'filter') - def describe_regions(self, context, region_name=None, filter=None): - """Describes one or more regions that are currently available to you. - - Args: - context (RequestContext): The request context. - region_name (list of str): On or more region names. - filter (list of filter dict): On or more filters. - - Returns: - Specified regions. - """ - - @module_and_param_types(availability_zone, 'strs') - def describe_account_attributes(self, context, attribute_name=None): - """Describes attributes of your EC2 account. - - Args: - context (RequestContext): The request context. - attribute_name (list of str): One or more account attribute names. - The following are the supported account attributes: - supported-platforms | default-vpc | max-instances | - vpc-max-security-groups-per-interface (unsupported now) | - max-elastic-ips (unsupported now) | - vpc-max-elastic-ips (unsupported now) - - Returns: - Information about one or more account attributes. - """ - - @module_and_param_types(instance, 'i_id_or_ids') - def get_password_data(self, context, instance_id): - """Retrieves the encrypted administrator password for Windows instance. - - Args: - context (RequestContext): The request context. - instance_id (str): ID of the Windows instance - - Returns: - The password of the instance, timestamp and instance id. - - The password is encrypted using the key pair that you specified when - you launched the instance. - """ - - @module_and_param_types(instance, 'i_id_or_ids') - def get_console_output(self, context, instance_id): - """Gets the console output for the specified instance. - - Args: - context (RequestContext): The request context. - instance_id (str): ID of the instance - - Returns: - The console output of the instance, timestamp and instance id. - """ - - @module_and_param_types(volume, 'str', 'int', - 'snap_id', 'str', 'int', - 'bool', 'str', 'str') - def create_volume(self, context, availability_zone=None, size=None, - snapshot_id=None, volume_type=None, iops=None, - encrypted=None, kms_key_id=None, client_token=None): - """Creates an EBS volume. - - Args: - context (RequestContext): The request context. - availability_zone (str): The Availability Zone in which to create - the volume. - It's required by AWS but optional for legacy Nova EC2 API. - instance_id (str): The size of the volume, in GiBs. - Valid values: 1-1024 - If you're creating the volume from a snapshot and don't specify - a volume size, the default is the snapshot size. - snapshot_id (str): The snapshot from which to create the volume. - Required if you are creating a volume from a snapshot. - volume_type (str): The volume type. One of volume types created - in used Block Storage. - iops (int): The number of IOPS to provision for the volume. - Valid values: Range is 100 to 4,000. - Not used now. - encrypted (boolean): Whether the volume should be encrypted. - Not used now. - kms_key_id (str): The full ARN of AWS KMS master key to use when - creating the encrypted volume. - Not used now. - client_token (str): Unique, case-sensitive identifier that you - provide to ensure the idempotency of the request. - - Returns: - Information about the volume. - - You can create a new empty volume or restore a volume from an EBS - snapshot. - """ - - @module_and_param_types(volume, 'vol_id', 'i_id', 'str') - def attach_volume(self, context, volume_id, instance_id, device): - """Attaches an EBS volume to a running or stopped instance. - - Args: - context (RequestContext): The request context. - volume_id (str): The ID of the volume. - instance_id (str): The ID of the instance. - device_name (str): The device name to expose to the instance. - - Returns: - Information about the attachment. - - The instance and volume must be in the same Availability Zone. - """ - - @module_and_param_types(volume, 'vol_id', 'i_id', 'str') - def detach_volume(self, context, volume_id, instance_id=None, device=None, - force=None): - """Detaches an EBS volume from an instance. - - Args: - context (RequestContext): The request context. - volume_id (str): The ID of the volume. - instance_id (str): The ID of the instance. - Not used now. - device (str): The device name. - Not used now. - force (boolean): Forces detachment. - Not used now. - - Returns: - Information about the detachment. - """ - - @module_and_param_types(volume, 'vol_id') - def delete_volume(self, context, volume_id): - """Deletes the specified EBS volume. - - Args: - context (RequestContext): The request context. - volume_id (str): The ID of the volume. - - Returns: - Returns true if the request succeeds. - - The volume must be in the available state. - """ - - @module_and_param_types(volume, 'vol_ids', 'filter', - 'int', 'str') - def describe_volumes(self, context, volume_id=None, filter=None, - max_results=None, next_token=None): - """Describes the specified EBS volumes. - - Args: - context (RequestContext): The request context. - volume_id (list of str): One or more volume IDs. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain volumes. - max_results (int): The maximum number of items to return. - next_token (str): The token for the next set of items to return. - - Returns: - A list of volumes. - """ - - @module_and_param_types(snapshot, 'vol_id', 'str') - def create_snapshot(self, context, volume_id, description=None): - """Creates a snapshot of an EBS volume. - - Args: - context (RequestContext): The request context. - volume_id (str): The ID of the volume. - description (str): A description for the snapshot. - - Returns: - Information about the snapshot. - """ - - @module_and_param_types(snapshot, 'snap_id') - def delete_snapshot(self, context, snapshot_id): - """Deletes the specified snapshot. - - Args: - context (RequestContext): The request context. - snapshot_id (str): The ID of the snapshot. - - Returns: - Returns true if the request succeeds. - """ - - @module_and_param_types(snapshot, 'snap_ids', 'strs', - 'strs', 'filter', - 'int', 'str') - def describe_snapshots(self, context, snapshot_id=None, owner=None, - restorable_by=None, filter=None, - max_results=None, next_token=None): - """Describes one or more of the snapshots available to you. - - Args: - context (RequestContext): The request context. - snapshot_id (list of str): One or more snapshot IDs. - owner (list of str): Returns the snapshots owned by the specified - owner. - Not used now. - restorable_by (list of str): One or more accounts IDs that can - create volumes from the snapshot. - Not used now. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain snapshots. - max_results (int): The maximum number of items to return. - next_token (str): The token for the next set of items to return. - - Returns: - A list of snapshots. - """ - - @module_and_param_types(image, 'i_id', 'str', 'str', - 'bool', 'dummy') - def create_image(self, context, instance_id, name=None, description=None, - no_reboot=False, block_device_mapping=None): - """Creates an EBS-backed AMI from an EBS-backed instance. - - Args: - context (RequestContext): The request context. - instance_id (str): The ID of the instance. - name (str): A name for the new image. - It's required by AWS but optional for legacy Nova EC2 API. - description (str): A description for the new image. - Not used now. - no_reboot (boolean): When the parameter is set to false, EC2 - attempts to shut down the instance cleanly before image - creation and then reboots the instance. - block_device_mapping (list of dict): Dict can contain: - device_name (str): The device name exposed to the instance - (for example, /dev/sdh or xvdh). - virtual_name (str): The virtual device name (ephemeral[0..3]). - ebs (dict): Dict can contain: - volume_id (str): The ID of the volume (Nova extension). - snapshot_id (str): The ID of the snapshot. - volume_size (str): The size of the volume, in GiBs. - volume_type (str): The volume type. - Not used now. - delete_on_termination (bool): Indicates whether to delete - the volume on instance termination. - iops (int): he number of IOPS to provision for the volume. - Not used now. - encrypted (boolean): Whether the volume is encrypted. - Not used now. - no_device (str): Suppresses the device mapping. - - Returns: - The ID of the new AMI. - """ - return image.create_image(context, instance_id, name, description, - no_reboot, block_device_mapping) - - @module_and_param_types(image, 'str', 'str', - 'str', 'str', - 'str', 'dummy', - 'str', 'aki_id', - 'ari_id', 'str') - def register_image(self, context, name=None, image_location=None, - description=None, architecture=None, - root_device_name=None, block_device_mapping=None, - virtualization_type=None, kernel_id=None, - ramdisk_id=None, sriov_net_support=None): - """Registers an AMI. - - Args: - context (RequestContext): The request context. - name (str): A name for your AMI. - It's required by AWS but optional for legacy Nova EC2 API. - image_location (str): The full path to AMI manifest in S3 storage. - description (str): A description for your AMI. - Not used now. - architecture (str): The architecture of the AMI. - Not used now. - root_device_name (str): The name of the root device - block_device_mapping (list of dict): Dict can contain: - device_name (str): The device name exposed to the instance - (for example, /dev/sdh or xvdh). - virtual_name (str): The virtual device name (ephemeral[0..3]). - ebs (dict): Dict can contain: - volume_id (str): The ID of the volume (Nova extension). - snapshot_id (str): The ID of the snapshot. - volume_size (str): The size of the volume, in GiBs. - volume_type (str): The volume type. - Not used now. - delete_on_termination (bool): Indicates whether to delete - the volume on instance termination. - iops (int): he number of IOPS to provision for the volume. - Not used now. - encrypted (boolean): Whether the volume is encrypted. - Not used now. - no_device (str): Suppresses the device mapping. - virtualization_type (str): The type of virtualization. - Not used now. - kernel_id (str): The ID of the kernel. - Not used now. - ramdisk_id (str): The ID of the RAM disk. - Not used now. - sriov_net_support (str): SR-IOV mode for networking. - Not used now. - - Returns: - The ID of the new AMI. - """ - - @module_and_param_types(image, 'amiariaki_id') - def deregister_image(self, context, image_id): - """Deregisters the specified AMI. - - Args: - context (RequestContext): The request context. - image_id (str): The ID of the AMI. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(image, 'strs', 'amiariaki_ids', - 'strs', 'filter') - def describe_images(self, context, executable_by=None, image_id=None, - owner=None, filter=None): - """Describes one or more of the images available to you. - - Args: - context (RequestContext): The request context. - executable_by (list of str): Filters the images by users with - explicit launch permissions. - Not used now. - image_id (list of str): One or more image IDs. - owner (list of str): Filters the images by the owner. - Not used now. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain images. - - Returns: - A list of images. - """ - - @module_and_param_types(image, 'amiariaki_id', 'str') - def describe_image_attribute(self, context, image_id, attribute): - """Describes the specified attribute of the specified AMI. - - Args: - context (RequestContext): The request context. - image_id (str): The ID of the image. - attribute (str): The attribute of the network interface. - Valid values: description (unsupported now)| kernel | ramdisk | - launchPermission | productCodes (unsupported now)| - blockDeviceMapping | rootDeviceName (Nova EC2 extension) - - Returns: - Specified attribute. - """ - return image.describe_image_attribute(context, image_id, attribute) - - @module_and_param_types(image, 'amiariaki_id', 'str', - 'strs', 'str', - 'str', 'dummy', - 'dummy', 'dummy', 'str') - def modify_image_attribute(self, context, image_id, attribute=None, - user_group=None, operation_type=None, - description=None, launch_permission=None, - product_code=None, user_id=None, value=None): - """Modifies the specified attribute of the specified AMI. - - Args: - context (RequestContext): The request context. - image_id (str): The ID of the image. - attribute (str): The name of the attribute to modify. - user_group (list of str): One or more user groups. - Only 'all' group is supported now. - operation_type (str): The operation type. - Only 'add' and 'remove' operation types are supported now. - description: A description for the AMI. - launch_permission: : A launch permission modification. - product_code: : Not supported now. - user_id: : Not supported now. - value: : The value of the attribute being modified. - This is only valid when modifying the description attribute. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(image, 'amiariaki_id', 'str') - def reset_image_attribute(self, context, image_id, attribute): - """Resets an attribute of an AMI to its default value. - - Args: - context (RequestContext): The request context. - image_id (str): The ID of the image. - attribute (str): The attribute to reset (currently you can only - reset the launch permission attribute). - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(tag, 'ec2_ids', 'key_value_dict_list') - def create_tags(self, context, resource_id, tag): - """Adds or overwrites one or more tags for the specified resources. - - Args: - context (RequestContext): The request context. - resource_id (list of str): The IDs of one or more resources to tag. - tag (list of dict): Dict can contain: - key (str): The key of the tag. - value (str): The value of the tag. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(tag, 'ec2_ids', 'dummy') - def delete_tags(self, context, resource_id, tag=None): - """Deletes the specified tags from the specified resources. - - Args: - context (RequestContext): The request context. - resource_id (list of str): The IDs of one or more resources to tag. - tag (list of dict): One or more tags to delete. - Dict can contain: - key (str): The key of the tag. - value (str): The value of the tag. - - Returns: - true if the request succeeds. - - If you omit the value in tag parameter, we delete the tag regardless of - its value. If you specify this parameter with an empty string as the - value, we delete the key only if its value is an empty string. - """ - - @module_and_param_types(tag, 'filter', 'int', - 'str') - def describe_tags(self, context, filter=None, max_results=None, - next_token=None): - """Describes one or more of the tags for your EC2 resources. - - Args: - context (RequestContext): The request context. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain tags. - max_results (int): The maximum number of items to return. - next_token (str): The token for the next set of items to return. - - Returns: - A list of tags. - """ - - -class VpcCloudController(CloudController): - - """VPC Cloud Controller - - Adds full VPC functionality which requires Neutron to work. - """ - - @module_and_param_types(vpc, 'vpc_cidr', 'str255') - def create_vpc(self, context, cidr_block, instance_tenancy='default'): - """Creates a VPC with the specified CIDR block. - - Args: - context (RequestContext): The request context. - cidr_block (str): The CIDR block for the VPC - (for example, 10.0.0.0/16). - instance_tenancy (str): The supported tenancy options for - instances launched into the VPC. - Valid values: default | dedicated - Not used now. - - Returns: - Information about the VPC. - - The smallest VPC you can create uses a /28 netmask (16 IP addresses), - and the largest uses a /16 netmask. - """ - - @module_and_param_types(vpc, 'vpc_id') - def delete_vpc(self, context, vpc_id): - """Deletes the specified VPC. - - Args: - context (RequestContext): The request context. - vpc_id (str): The ID of the VPC. - - Returns: - true if the request succeeds. - - You must detach or delete all gateways and resources that are - associated with the VPC before you can delete it. For example, you must - terminate all instances running in the VPC, delete all security groups - associated with the VPC (except the default one), delete all route - tables associated with the VPC (except the default one), and so on. - """ - return vpc.delete_vpc(context, vpc_id) - - @module_and_param_types(vpc, 'vpc_ids', 'filter') - def describe_vpcs(self, context, vpc_id=None, filter=None): - """Describes one or more of your VPCs. - - Args: - context (RequestContext): The request context. - vpc_id (list of str): One or more VPC IDs. - Default: Describes all your VPCs. - filter (list of filter dict): You can specify filters so that - the response includes information for only certain VPCs. - - Returns: - A list of VPCs. - """ - return vpc.describe_vpcs(context, vpc_id, filter) - - @module_and_param_types(internet_gateway) - def create_internet_gateway(self, context): - """Creates an Internet gateway for use with a VPC. - - Args: - context (RequestContext): The request context. - - Returns: - Information about the Internet gateway. - """ - - @module_and_param_types(internet_gateway, 'igw_id', 'vpc_id') - def attach_internet_gateway(self, context, internet_gateway_id, vpc_id): - """Attaches an Internet gateway to a VPC. - - Args: - context (RequestContext): The request context. - internet_gateway_id (str): The ID of the Internet gateway. - vpc_id (str): The ID of the VPC. - - Returns: - Returns true if the request succeeds. - - Attaches an Internet gateway to a VPC, enabling connectivity between - the Internet and the VPC. - """ - - @module_and_param_types(internet_gateway, 'igw_id', 'vpc_id') - def detach_internet_gateway(self, context, internet_gateway_id, vpc_id): - """Detaches an Internet gateway from a VPC. - - Args: - context (RequestContext): The request context. - internet_gateway_id (str): The ID of the Internet gateway. - vpc_id (str): The ID of the VPC. - - Returns: - Returns true if the request succeeds. - - Detaches an Internet gateway from a VPC, disabling connectivity between - the Internet and the VPC. The VPC must not contain any running - instances with Elastic IP addresses. - """ - - @module_and_param_types(internet_gateway, 'igw_id') - def delete_internet_gateway(self, context, internet_gateway_id): - """Deletes the specified Internet gateway. - - Args: - context (RequestContext): The request context. - internet_gateway_id (str): The ID of the Internet gateway. - - Returns: - Returns true if the request succeeds. - - You must detach the Internet gateway from the VPC before you can - delete it. - """ - - @module_and_param_types(internet_gateway, 'igw_ids', - 'filter') - def describe_internet_gateways(self, context, internet_gateway_id=None, - filter=None): - """Describes one or more of your Internet gateways. - - Args: - context (RequestContext): The request context. - internet_gateway_id (list of str): One or more Internet gateway - IDs. - Default: Describes all your Internet gateways. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain Internet - gateways. - - Returns: - A list of Internet gateways. - """ - - @module_and_param_types(subnet, 'vpc_id', 'subnet_cidr', - 'str255') - def create_subnet(self, context, vpc_id, cidr_block, - availability_zone=None): - """Creates a subnet in an existing VPC. - - Args: - context (RequestContext): The request context. - vpc_id (str): The ID of the VPC. - cidr_block (str): The CIDR block for the subnet. - For example, 10.0.0.0/24. - availability_zone (str): The Availability Zone for the subnet. - If None or empty EC2 selects one for you. - - Returns: - Information about the subnet. - - The subnet's CIDR block can be the same as the VPC's CIDR block, - or a subset of the VPC's CIDR block. If you create more than one subnet - in a VPC, the subnets' CIDR blocks must not overlap. The smallest - subnet you can create uses a /28 netmask (16 IP addresses), - and the largest uses a /16 netmask. - - EC2 reserves both the first four and the last IP address - in each subnet's CIDR block. They're not available for use. - - If you add more than one subnet to a VPC, they're set up - in a star topology with a logical router in the middle. - """ - - @module_and_param_types(subnet, 'subnet_id') - def delete_subnet(self, context, subnet_id): - """Deletes the specified subnet. - - Args: - context (RequestContext): The request context. - subnet_id (str): The ID of the subnet. - - Returns: - true if the request succeeds. - - You must terminate all running instances in the subnet before - you can delete the subnet. - """ - - @module_and_param_types(subnet, 'subnet_ids', 'filter') - def describe_subnets(self, context, subnet_id=None, filter=None): - """Describes one or more of your subnets. - - - Args: - context (RequestContext): The request context. - subnet_id (list of str): One or more subnet IDs. - Default: Describes all your subnets. - filter (list of filter dict): You can specify filters so that - the response includes information for only certain subnets. - - Returns: - A list of subnets. - """ - - @module_and_param_types(route_table, 'vpc_id') - def create_route_table(self, context, vpc_id): - """Creates a route table for the specified VPC. - - Args: - context (RequestContext): The request context. - vpc_id (str): The ID of the VPC. - - Returns: - Information about the route table. - - After you create a route table, you can add routes and associate the - table with a subnet. - """ - - @module_and_param_types(route_table, 'rtb_id', 'cidr', - 'igw_or_vgw_id', 'i_id', - 'eni_id', - 'dummy') - def create_route(self, context, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, - network_interface_id=None, - vpc_peering_connection_id=None): - """Creates a route in a route table within a VPC. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table for the route. - destination_cidr_block (str): The CIDR address block used for the - destination match. Routing decisions are based on the most - specific match. - gateway_id (str): The ID of an Internet gateway or virtual private - gateway attached to your VPC. - instance_id (str): The ID of a NAT instance in your VPC. - The operation fails if you specify an instance ID unless - exactly one network interface is attached. - network_interface_id (str): The ID of a network interface. - vpc_peering_connection_id (str): The ID of a VPC peering - connection. - - Returns: - true if the requests succeeds. - - The route's target can be an Internet gateway or virtual private - gateway attached to the VPC, a VPC peering connection, or a NAT - instance in the VPC. - """ - - @module_and_param_types(route_table, 'rtb_id', 'cidr', - 'igw_or_vgw_id', 'i_id', - 'eni_id', - 'dummy') - def replace_route(self, context, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, - network_interface_id=None, - vpc_peering_connection_id=None): - """Replaces an existing route within a route table in a VPC. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table for the route. - destination_cidr_block (str): The CIDR address block used for the - destination match. Routing decisions are based on the most - specific match. - gateway_id (str): The ID of an Internet gateway or virtual private - gateway attached to your VPC. - instance_id (str): The ID of a NAT instance in your VPC. - The operation fails if you specify an instance ID unless - exactly one network interface is attached. - network_interface_id (str): The ID of a network interface. - vpc_peering_connection_id (str): The ID of a VPC peering - connection. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(route_table, 'rtb_id', 'cidr') - def delete_route(self, context, route_table_id, destination_cidr_block): - """Deletes the specified route from the specified route table. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table. - destination_cidr_block (str): The CIDR range for the route. - The value you specify must match the CIDR for the route - exactly. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(route_table, 'rtb_id', - 'vgw_id') - def enable_vgw_route_propagation(self, context, route_table_id, - gateway_id): - """Enables a VGW to propagate routes to the specified route table. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table. - gateway_id (str): The ID of the virtual private gateway. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(route_table, 'rtb_id', - 'vgw_id') - def disable_vgw_route_propagation(self, context, route_table_id, - gateway_id): - """Disables a (VGW) from propagating routes to a specified route table. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table. - gateway_id (str): The ID of the virtual private gateway. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(route_table, 'rtb_id', 'subnet_id') - def associate_route_table(self, context, route_table_id, subnet_id): - """Associates a subnet with a route table. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table. - subnet_id (str): The ID of the subnet. - - Returns: - The route table association ID - - The subnet and route table must be in the same VPC. This association - causes traffic originating from the subnet to be routed according to - the routes in the route table. The action returns an association ID, - which you need in order to disassociate the route table from the subnet - later. A route table can be associated with multiple subnets. - """ - - @module_and_param_types(route_table, 'rtbassoc_id', - 'rtb_id') - def replace_route_table_association(self, context, association_id, - route_table_id): - """Changes the route table associated with a given subnet in a VPC. - - Args: - context (RequestContext): The request context. - association_id (str): The association ID. - route_table_id (str): The ID of the new route table to associate - with the subnet. - - Returns: - The ID of the new association. - - After the operation completes, the subnet uses the routes in the new - route table it's associated with. - You can also use this action to change which table is the main route - table in the VPC. - """ - - @module_and_param_types(route_table, 'rtbassoc_id') - def disassociate_route_table(self, context, association_id): - """Disassociates a subnet from a route table. - - Args: - context (RequestContext): The request context. - association_id (str): The association ID. - - Returns: - true if the requests succeeds. - - After you perform this action, the subnet no longer uses the routes in - the route table. Instead, it uses the routes in the VPC's main route - table. - """ - - @module_and_param_types(route_table, 'rtb_id') - def delete_route_table(self, context, route_table_id): - """Deletes the specified route table. - - Args: - context (RequestContext): The request context. - route_table_id (str): The ID of the route table. - - You must disassociate the route table from any subnets before you can - delete it. You can't delete the main route table. - - Returns: - true if the requests succeeds. - """ - - @module_and_param_types(route_table, 'rtb_ids', 'filter') - def describe_route_tables(self, context, route_table_id=None, filter=None): - """Describes one or more of your route tables. - - Args: - context (RequestContext): The request context. - route_table_id (str): One or more route table IDs. - filter (list of filter dict): You can specify filters so that the - response includes information for only certain tables. - - Returns: - A list of route tables - """ - - @module_and_param_types(dhcp_options, 'key_value_dict_list') - def create_dhcp_options(self, context, dhcp_configuration): - """Creates a set of DHCP options for your VPC. - - Args: - context (RequestContext): The request context. - dhcp_configuration (list of dict): Dict can contain - 'key' (str) and - 'value' (str) for each option. - You can specify the following options: - - domain-name-servers: up to 4 DNS servers, - IPs are in value separated by commas - - domain-name: domain name - - ntp-servers: up to 4 NTP servers - - netbios-name-servers: up to 4 NetBIOS name servers - - netbios-node-type: the NetBIOS node type (1,2,4 or 8) - Returns: - A set of DHCP options - - """ - - @module_and_param_types(dhcp_options, 'dopt_ids', - 'filter') - def describe_dhcp_options(self, context, dhcp_options_id=None, - filter=None): - """Describes the specified DHCP options. - - - Args: - context (RequestContext): The request context. - dhcp_options_id (list of str): DHCP options id. - filter (list of filter dict): You can specify filters so that - the response includes information for only certain DHCP - options. - - Returns: - DHCP options. - """ - - @module_and_param_types(dhcp_options, 'dopt_id') - def delete_dhcp_options(self, context, dhcp_options_id): - """Deletes the specified set of DHCP options - - Args: - context (RequestContext): The request context. - dhcp_options_id (str): DHCP options id - - Returns: - true if the request succeeds - - You must disassociate the set of DHCP options before you can delete it. - You can disassociate the set of DHCP options by associating either a - new set of options or the default set of options with the VPC. - """ - - @module_and_param_types(dhcp_options, 'dopt_id_or_default', 'vpc_id') - def associate_dhcp_options(self, context, dhcp_options_id, vpc_id): - """Associates a set of DHCP options with the specified VPC. - - Args: - context (RequestContext): The request context. - dhcp_options_id (str): DHCP options id or "default" to associate no - DHCP options with the VPC - - Returns: - true if the request succeeds - """ - - @module_and_param_types(network_interface, 'subnet_id', - 'ip', - 'dummy', - 'int', - 'str', - 'sg_ids', - 'str') - def create_network_interface(self, context, subnet_id, - private_ip_address=None, - private_ip_addresses=None, - secondary_private_ip_address_count=None, - description=None, - security_group_id=None, - client_token=None): - """Creates a network interface in the specified subnet. - - Args: - subnet_id (str): The ID of the subnet to associate with the - network interface. - private_ip_address (str): The primary private IP address of the - network interface. If you don't specify an IP address, - EC2 selects one for you from the subnet range. - private_ip_addresses (list of dict): Dict can contain - 'private_ip_address' (str) and - 'primary' (boolean) for each address. - The private IP addresses of the specified network interface and - indicators which one is primary. Only one private IP address - can be designated as primary. - You can't specify this parameter when - private_ip_addresses['primary'] is true if you specify - private_ip_address. - secondary_private_ip_address_count (integer): The number of - secondary private IP addresses to assign to a network - interface. EC2 selects these IP addresses within the subnet - range. For a single network interface, you can't specify this - option and specify more than one private IP address using - private_ip_address and/or private_ip_addresses. - description (str): A description for the network interface. - security_group_id (list of str): The list of security group IDs - for the network interface. - client_token (str): Unique, case-sensitive identifier that you - provide to ensure the idempotency of the request. - - Returns: - The network interface that was created. - """ - - @module_and_param_types(network_interface, 'eni_id') - def delete_network_interface(self, context, network_interface_id): - """Deletes the specified network interface. - - - Args: - context (RequestContext): The request context. - network_interface_id (str): The ID of the network interface. - - Returns: - true if the request succeeds. - - You must detach the network interface before you can delete it. - """ - - @module_and_param_types(network_interface, 'eni_ids', - 'filter') - def describe_network_interfaces(self, context, network_interface_id=None, - filter=None): - """Describes one or more of your network interfaces. - - - Args: - context (RequestContext): The request context. - network_interface_id (list of str): One or more network interface - IDs. - Default: Describes all your network interfaces. - filter (list of filter dict): You can specify filters so that - the response includes information for only certain interfaces. - - Returns: - A list of network interfaces. - """ - return network_interface.describe_network_interfaces( - context, network_interface_id, filter) - - @module_and_param_types(network_interface, 'eni_id', - 'str') - def describe_network_interface_attribute(self, context, - network_interface_id, - attribute=None): - """Describes the specified attribute of the specified network interface. - - - Args: - context (RequestContext): The request context. - network_interface_id: Network interface ID. - attribute: The attribute of the network interface. - - Returns: - Specified attribute. - - You can specify only one attribute at a time. - """ - return network_interface.describe_network_interface_attribute( - context, network_interface_id, attribute) - - @module_and_param_types(network_interface, 'eni_id', - 'str', - 'bool', - 'sg_ids', - 'dummy') - def modify_network_interface_attribute(self, context, - network_interface_id, - description=None, - source_dest_check=None, - security_group_id=None, - attachment=None): - """Modifies the specified attribute of the specified network interface. - - - Args: - context (RequestContext): The request context. - network_interface_id: Network interface ID. - description: New description. - source_dest_check: Indicates whether source/destination checking is - enabled. A value of true means checking is enabled, and false - means checking is disabled. - This value must be false for a NAT instance to perform NAT. - security_group_id [list of str]: List of secuirity groups to attach - attachment: Information about the interface attachment. If - modifying the 'delete on termination' attribute, you must - specify the ID of the interface attachment. - - Returns: - true if the request succeeds. - - You can specify only one attribute at a time. - """ - - @module_and_param_types(network_interface, 'eni_id', - 'str') - def reset_network_interface_attribute(self, context, - network_interface_id, - attribute): - """Resets the specified attribute of the specified network interface. - - - Args: - context (RequestContext): The request context. - network_interface_id: Network interface ID. - attribute: The attribute to reset. Valid values "SourceDestCheck" - (reset to True) - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(network_interface, 'eni_id', - 'i_id', 'int') - def attach_network_interface(self, context, network_interface_id, - instance_id, device_index): - """Attach a network interface to an instance. - - Args: - context (RequestContext): The request context. - network_interface_id (str): The ID of the network interface. - instance_id (str): The ID of the instance. - device_index (int): The index of the device for the network - interface attachment. - - Returns: - Attachment Id - """ - - @module_and_param_types(network_interface, 'eni_attach_id', - 'bool') - def detach_network_interface(self, context, attachment_id, - force=None): - """Detach a network interface from an instance. - - Args: - context (RequestContext): The request context. - attachment_id (str): The ID of the attachment. - force (boolean): Specifies whether to force a detachment - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(network_interface, 'eni_id', - 'ips', - 'int', - 'bool') - def assign_private_ip_addresses(self, context, network_interface_id, - private_ip_address=None, - secondary_private_ip_address_count=None, - allow_reassignment=False): - """Assigns secondary private IP addresses to the network interface. - - Args: - network_interface_id (str): The ID of the network interface. - private_ip_address (list of str): List of IP addresses to assign. - secondary_private_ip_address_count (integer): The number of - secondary private IP addresses to assign. EC2 selects these - IP addresses within the subnet range. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(network_interface, 'eni_id', - 'ips') - def unassign_private_ip_addresses(self, context, network_interface_id, - private_ip_address=None): - """Unassigns secondary IP addresses from the network interface. - - Args: - network_interface_id (str): The ID of the network interface. - private_ip_address (list of str): List of secondary private IP - addresses to unassign. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(vpn_gateway, 'vpn_connection_type', 'str') - def create_vpn_gateway(self, context, type, availability_zone=None): - """Creates a virtual private gateway. - - Args: - context (RequestContext): The request context. - type (str): The type of VPN connection this virtual private - gateway supports (ipsec.1). - availability_zone (str): The Availability Zone for the virtual - private gateway. - - Returns: - Information about the virtual private gateway. - """ - - @module_and_param_types(vpn_gateway, 'vpc_id', 'vgw_id') - def attach_vpn_gateway(self, context, vpc_id, vpn_gateway_id): - """Attaches a virtual private gateway to a VPC. - - Args: - context (RequestContext): The request context. - vpc_id (str): The ID of the VPC. - vpn_gateway_id (str): he ID of the virtual private gateway. - - Returns: - Information about the attachment. - """ - - @module_and_param_types(vpn_gateway, 'vpc_id', 'vgw_id') - def detach_vpn_gateway(self, context, vpc_id, vpn_gateway_id): - """Detaches a virtual private gateway from a VPC. - - Args: - context (RequestContext): The request context. - vpc_id (str): The ID of the VPC. - vpn_gateway_id (str): he ID of the virtual private gateway. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(vpn_gateway, 'vgw_id') - def delete_vpn_gateway(self, context, vpn_gateway_id): - """Deletes the specified virtual private gateway. - - Args: - context (RequestContext): The request context. - vpn_gateway_id (str): The ID of the virtual private gateway. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(vpn_gateway, 'vgw_ids', 'filter') - def describe_vpn_gateways(self, context, vpn_gateway_id=None, filter=None): - """Describes one or more of your virtual private gateways. - - Args: - context (RequestContext): The request context. - vpn_gateway_id (list of str): One or more virtual private gateway - IDs. - filter (list of filter dict): One or more filters. - - Returns: - Information about one or more virtual private gateways. - """ - - @module_and_param_types(customer_gateway, 'vpn_connection_type', - 'int', 'ip', 'ip') - def create_customer_gateway(self, context, type, - bgp_asn=None, ip_address=None, public_ip=None): - """Provides information to EC2 API about VPN customer gateway device. - - Args: - context (RequestContext): The request context. - ip_address (str): The Internet-routable IP address for the - customer gateway's outside interface. - type (str): The type of VPN connection that this customer gateway - supports (ipsec.1). - bgp_asn (int): For devices that support BGP, - the customer gateway's BGP ASN (65000 otherwise). - - Returns: - Information about the customer gateway. - - You cannot create more than one customer gateway with the same VPN - type, IP address, and BGP ASN parameter values. If you run an - identical request more than one time, subsequent requests return - information about the existing customer gateway. - """ - - @module_and_param_types(customer_gateway, 'cgw_id') - def delete_customer_gateway(self, context, customer_gateway_id): - """Deletes the specified customer gateway. - - Args: - context (RequestContext): The request context. - customer_gateway_id (str): The ID of the customer gateway. - - Returns: - true if the request succeeds. - - You must delete the VPN connection before you can delete the customer - gateway. - """ - - @module_and_param_types(customer_gateway, 'cgw_ids', - 'filter') - def describe_customer_gateways(self, context, customer_gateway_id=None, - filter=None): - """Describes one or more of your VPN customer gateways. - - Args: - context (RequestContext): The request context. - customer_gateway_id (list of str): One or more customer gateway - IDs. - filter (list of filter dict): One or more filters. - - Returns: - Information about one or more customer gateways. - """ - - @module_and_param_types(vpn_connection, 'cgw_id', - 'vgw_id', 'vpn_connection_type', 'dummy') - def create_vpn_connection(self, context, customer_gateway_id, - vpn_gateway_id, type, options=None): - """Creates a VPN connection. - - Args: - context (RequestContext): The request context. - customer_gateway_id (str): The ID of the customer gateway. - vpn_gateway_id (str): The ID of the virtual private gateway. - type (str): The type of VPN connection (ipsec.1). - options (dict of options): Indicates whether the VPN connection - requires static routes. - - Returns: - Information about the VPN connection. - - Creates a VPN connection between an existing virtual private gateway - and a VPN customer gateway. - """ - - @module_and_param_types(vpn_connection, 'vpn_id', - 'cidr') - def create_vpn_connection_route(self, context, vpn_connection_id, - destination_cidr_block): - """Creates a static route associated with a VPN connection. - - Args: - context (RequestContext): The request context. - vpn_connection_id (str): The ID of the VPN connection. - destination_cidr_block (str): The CIDR block associated with the - local subnet of the customer network. - - Returns: - true if the request succeeds. - - The static route allows traffic to be routed from the virtual private - gateway to the VPN customer gateway. - """ - - @module_and_param_types(vpn_connection, 'vpn_id', - 'cidr') - def delete_vpn_connection_route(self, context, vpn_connection_id, - destination_cidr_block): - """Deletes the specified static route associated with a VPN connection. - - Args: - context (RequestContext): The request context. - vpn_connection_id (str): The ID of the VPN connection. - destination_cidr_block (str): The CIDR block associated with the - local subnet of the customer network. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(vpn_connection, 'vpn_id') - def delete_vpn_connection(self, context, vpn_connection_id): - """Deletes the specified VPN connection. - - Args: - context (RequestContext): The request context. - vpn_connection_id (str): The ID of the VPN connection. - - Returns: - true if the request succeeds. - """ - - @module_and_param_types(vpn_connection, 'vpn_ids', - 'filter') - def describe_vpn_connections(self, context, vpn_connection_id=None, - filter=None): - """Describes one or more of your VPN connections. - - Args: - context (RequestContext): The request context. - vpn_connection_id (list of str): One or more VPN connection IDs. - filter (list of filter dict): One or more filters. - - Returns: - Information about one or more VPN connections. - """ diff --git a/ec2api/api/common.py b/ec2api/api/common.py deleted file mode 100644 index 24eeb49e..00000000 --- a/ec2api/api/common.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import collections -import fnmatch -import inspect -import operator - -from ec2api.api import ec2utils -from ec2api.api import validator -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ -from oslo_config import cfg -from oslo_log import log as logging - - -ec2_opts = [ - cfg.BoolOpt('disable_ec2_classic', - help='True if server does not support EC2 Classic mode ' - 'in favor of default VPC'), -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) -LOG = logging.getLogger(__name__) - - -class OnCrashCleaner(object): - - def __init__(self): - self._cleanups = [] - self._suppress_exception = False - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - if exc_type is None: - return - self._run_cleanups(self._cleanups) - return self._suppress_exception - - def addCleanup(self, function, *args, **kwargs): - self._cleanups.append((function, args, kwargs)) - - def approveChanges(self): - del self._cleanups[:] - self._suppress_exception = True - - def _run_cleanups(self, cleanups): - for function, args, kwargs in reversed(cleanups): - try: - function(*args, **kwargs) - except Exception: - if inspect.ismethod(function): - cmodule = function.__self__.__class__.__module__ - cname = function.__self__.__class__.__name__ - name = '%s.%s.%s' % (cmodule, cname, function.__name__) - elif inspect.isfunction(function): - name = '%s.%s' % (function.__module__, function.__name__) - else: - name = '%s.%s' % (function.__class__.__module__, - function.__class__.__name__) - formatted_args = '' - args_string = ', '.join([repr(arg) for arg in args]) - kwargs_string = ', '.join([ - '%s=%r' % (key, value) for key, value in kwargs.items() - ]) - if args_string: - formatted_args = args_string - if kwargs_string: - if formatted_args: - formatted_args += ', ' - formatted_args += kwargs_string - LOG.warning( - 'Error cleaning up %(name)s(%(args)s)' % - {'name': name, 'args': formatted_args}, - exc_info=True) - pass - - -class Validator(object): - - def __init__(self, param_name="", action="", params=[]): - self.param_name = param_name - self.action = action - self.params = params - - def multi(self, items, validation_func): - validator.validate_list(items, self.param_name) - for item in items: - validation_func(item) - - def dummy(self, value): - pass - - def bool(self, value): - validator.validate_bool(value, self.param_name) - - def int(self, value): - validator.validate_int(value, self.param_name) - - def str(self, value): - validator.validate_str(value, self.param_name) - - def strs(self, values): - self.multi(values, self.str) - - def str64(self, value): - validator.validate_str(value, self.param_name, 64) - - def str255(self, value): - validator.validate_str(value, self.param_name, 255) - - def str255s(self, values): - self.multi(values, self.str255) - - def ip(self, ip): - validator.validate_ipv4(ip, self.param_name) - - def ips(self, ips): - self.multi(ips, self.ip) - - def cidr(self, cidr): - validator.validate_cidr(cidr, self.param_name) - - def subnet_cidr(self, cidr): - validator.validate_subnet_cidr(cidr) - - def vpc_cidr(self, cidr): - validator.validate_vpc_cidr(cidr) - - def filter(self, filter): - validator.validate_filter(filter) - - def key_value_dict_list(self, dict_list): - validator.validate_key_value_dict_list(dict_list, self.param_name) - - def ec2_id(self, id, prefices=None): - validator.validate_ec2_id(id, self.param_name, prefices) - - def ec2_ids(self, ids): - self.multi(ids, self.ec2_id) - - def i_id(self, id): - self.ec2_id(id, ['i']) - - def i_ids(self, ids): - self.multi(ids, self.i_id) - - def ami_id(self, id): - self.ec2_id(id, ['ami']) - - def aki_id(self, id): - self.ec2_id(id, ['aki']) - - def ari_id(self, id): - self.ec2_id(id, ['ari']) - - def amiariaki_id(self, id): - self.ec2_id(id, ['ami', 'ari', 'aki']) - - def amiariaki_ids(self, ids): - self.multi(ids, self.amiariaki_id) - - def sg_id(self, id): - self.ec2_id(id, ['sg']) - - def sg_ids(self, ids): - self.multi(ids, self.sg_id) - - def subnet_id(self, id): - self.ec2_id(id, ['subnet']) - - def subnet_ids(self, ids): - self.multi(ids, self.subnet_id) - - def igw_id(self, id): - self.ec2_id(id, ['igw']) - - def igw_ids(self, ids): - self.multi(ids, self.igw_id) - - def rtb_id(self, id): - self.ec2_id(id, ['rtb']) - - def rtb_ids(self, ids): - self.multi(ids, self.rtb_id) - - def eni_id(self, id): - self.ec2_id(id, ['eni']) - - def eni_ids(self, ids): - self.multi(ids, self.eni_id) - - def vpc_id(self, id): - self.ec2_id(id, ['vpc']) - - def vpc_ids(self, ids): - self.multi(ids, self.vpc_id) - - def eipalloc_id(self, id): - self.ec2_id(id, ['eipalloc']) - - def eipalloc_ids(self, ids): - self.multi(ids, self.eipalloc_id) - - def eipassoc_id(self, id): - self.ec2_id(id, ['eipassoc']) - - def rtbassoc_id(self, id): - self.ec2_id(id, ['rtbassoc']) - - def eni_attach_id(self, id): - self.ec2_id(id, ['eni-attach']) - - def snap_id(self, id): - self.ec2_id(id, ['snap']) - - def snap_ids(self, ids): - self.multi(ids, self.snap_id) - - def vol_id(self, id): - self.ec2_id(id, ['vol']) - - def vol_ids(self, ids): - self.multi(ids, self.vol_id) - - def dopt_id(self, id): - self.ec2_id(id, ['dopt']) - - def dopt_ids(self, ids): - self.multi(ids, self.dopt_id) - - def vgw_id(self, id): - self.ec2_id(id, ['vgw']) - - def vgw_ids(self, ids): - self.multi(ids, self.vgw_id) - - def cgw_id(self, id): - self.ec2_id(id, ['cgw']) - - def cgw_ids(self, ids): - self.multi(ids, self.cgw_id) - - def vpn_id(self, id): - self.ec2_id(id, ['vpn']) - - def vpn_ids(self, ids): - self.multi(ids, self.vpn_id) - - def security_group_str(self, value): - validator.validate_security_group_str(value, self.param_name, - self.params.get('vpc_id')) - - def security_group_strs(self, values): - self.multi(values, self.security_group_str) - - def vpn_connection_type(self, value): - validator.validate_vpn_connection_type(value) - - -VPC_KINDS = ['vpc', 'igw', 'subnet', 'eni', 'dopt', 'eipalloc', 'rtb', - 'vgw', 'cgw', 'vpn'] - - -class UniversalDescriber(object): - """Abstract Describer class for various Describe implementations.""" - - KIND = '' - SORT_KEY = '' - FILTER_MAP = {} - - def format(self, item=None, os_item=None): - pass - - def post_format(self, formatted_item, item): - pass - - def get_db_items(self): - return ec2utils.get_db_items(self.context, self.KIND, self.ids) - - def get_os_items(self): - return [] - - def auto_update_db(self, item, os_item): - if item is None and self.KIND not in VPC_KINDS: - item = ec2utils.auto_create_db_item(self.context, self.KIND, - self.get_id(os_item)) - LOG.info( - 'Item %(item)s was updated to %(os_item)s.', - {'item': str(item), 'os_item': str(os_item)}) - return item - - def get_id(self, os_item): - return os_item['id'] if isinstance(os_item, dict) else os_item.id - - def get_name(self, os_item): - return os_item['name'] - - def delete_obsolete_item(self, item): - LOG.info('Deleting obsolete item %(item)s', {'item': str(item)}) - db_api.delete_item(self.context, item['id']) - - def is_filtering_value_found(self, filter_value, value): - if fnmatch.fnmatch(str(value), str(filter_value)): - return True - - def filtered_out(self, item, filters): - if filters is None: - return False - for filter in filters: - filter_name = self.FILTER_MAP.get(filter['name']) - if filter_name is None: - raise exception.InvalidParameterValue( - value=filter['name'], parameter='filter', - reason='invalid filter') - values = self.get_values_by_filter(filter_name, item) - if not values: - return True - filter_values = filter['value'] - for filter_value in filter_values: - if any(self.is_filtering_value_found(filter_value, value) - for value in values): - break - else: - return True - return False - - def get_values_by_filter(self, filter_name, item): - if isinstance(filter_name, list): - values = [] - value_set = item.get(filter_name[0], []) - for value in value_set: - vals = self.get_values_by_filter(filter_name[1], value) - if vals: - values += vals - else: - if isinstance(filter_name, tuple): - value = item.get(filter_name[0], {}).get(filter_name[1]) - else: - value = item.get(filter_name) - values = [value] if value is not None else [] - return values - - def get_paged(self, formatted_items, max_results, next_token): - self.next_token = None - if not max_results and not next_token: - return formatted_items - - if max_results and max_results > 1000: - max_results = 1000 - formatted_items = sorted(formatted_items, - key=operator.itemgetter(self.SORT_KEY)) - - next_item = 0 - if next_token: - next_item = int(base64.b64decode(next_token).decode()) - if next_item: - formatted_items = formatted_items[next_item:] - if max_results and max_results < len(formatted_items): - self.next_token = base64.b64encode( - str(next_item + max_results).encode()) - formatted_items = formatted_items[:max_results] - - return formatted_items - - def is_selected_item(self, context, os_item_name, item): - return (os_item_name in self.names or - (item and item['id'] in self.ids)) - - def handle_unpaired_item(self, item): - self.delete_obsolete_item(item) - - def describe(self, context, ids=None, names=None, filter=None, - max_results=None, next_token=None): - if max_results and max_results < 5: - msg = (_('Value ( %s ) for parameter maxResults is invalid. ' - 'Expecting a value greater than 5.') % max_results) - raise exception.InvalidParameterValue(msg) - - self.context = context - self.selective_describe = ids is not None or names is not None - self.ids = set(ids or []) - self.names = set(names or []) - self.items = self.get_db_items() - self.os_items = self.get_os_items() - formatted_items = [] - - self.items_dict = {i['os_id']: i for i in (self.items or [])} - paired_items_ids = set() - for os_item in self.os_items: - os_item_name = self.get_name(os_item) - os_item_id = self.get_id(os_item) - item = self.items_dict.get(os_item_id, None) - if item: - paired_items_ids.add(item['id']) - # NOTE(Alex): Filter out items not requested in names or ids - if (self.selective_describe and - not self.is_selected_item(context, os_item_name, item)): - continue - # NOTE(Alex): Autoupdate DB for autoupdatable items - item = self.auto_update_db(item, os_item) - # NOTE(andrey-mp): save item id again - # (if item has created by auto update) - if item: - paired_items_ids.add(item['id']) - formatted_item = self.format(item, os_item) - self.post_format(formatted_item, item) - if os_item_name in self.names: - self.names.remove(os_item_name) - if item and item['id'] in self.ids: - self.ids.remove(item['id']) - if (formatted_item and - not self.filtered_out(formatted_item, filter)): - formatted_items.append(formatted_item) - # NOTE(Alex): delete obsolete items - for item in self.items: - if item['id'] in paired_items_ids: - continue - formatted_item = self.handle_unpaired_item(item) - if formatted_item: - if not self.filtered_out(formatted_item, filter): - formatted_items.append(formatted_item) - if item['id'] in self.ids: - self.ids.remove(item['id']) - # NOTE(Alex): some requested items are not found - if self.ids or self.names: - params = {'id': next(iter(self.ids or self.names))} - raise ec2utils.NOT_FOUND_EXCEPTION_MAP[self.KIND](**params) - - return self.get_paged(formatted_items, max_results, next_token) - - -class TaggableItemsDescriber(UniversalDescriber): - - tags = None - - def __init__(self): - super(TaggableItemsDescriber, self).__init__() - self.FILTER_MAP['tag-key'] = ['tagSet', 'key'] - self.FILTER_MAP['tag-value'] = ['tagSet', 'value'] - self.FILTER_MAP['tag'] = 'tagSet' - - def get_tags(self): - return db_api.get_tags(self.context, (self.KIND,), self.ids) - - def post_format(self, formatted_item, item): - if not item or not formatted_item: - return - - if self.tags is None: - tags = collections.defaultdict(list) - for tag in self.get_tags(): - tags[tag['item_id']].append(tag) - self.tags = tags - - formatted_tags = [] - for tag in self.tags[item['id']]: - formatted_tags.append({'key': tag['key'], - 'value': tag['value']}) - if formatted_tags: - # NOTE(ft): AWS returns tagSet element for all objects (there are - # errors in AWS docs) - formatted_item['tagSet'] = formatted_tags - - def describe(self, context, ids=None, names=None, filter=None, - max_results=None, next_token=None): - if filter: - for f in filter: - if f['name'].startswith('tag:'): - tag_key = f['name'].split(':')[1] - tag_values = f['value'] - f['name'] = 'tag' - f['value'] = [{'key': tag_key, - 'value': tag_values}] - return super(TaggableItemsDescriber, self).describe( - context, ids=ids, names=names, filter=filter, - max_results=max_results, next_token=next_token) - - def is_filtering_value_found(self, filter_value, value): - if isinstance(filter_value, dict): - for tag_pair in value: - if (not isinstance(tag_pair, dict) or - filter_value.get('key') != tag_pair.get('key')): - continue - for filter_dict_value in filter_value.get('value'): - if super(TaggableItemsDescriber, - self).is_filtering_value_found( - filter_dict_value, - tag_pair.get('value')): - return True - return False - return super(TaggableItemsDescriber, - self).is_filtering_value_found(filter_value, value) - - -class NonOpenstackItemsDescriber(UniversalDescriber): - """Describer class for non-Openstack items Describe implementations.""" - - def describe(self, context, ids=None, names=None, filter=None, - max_results=None, next_token=None): - if max_results and max_results < 5: - msg = (_('Value ( %s ) for parameter maxResults is invalid. ' - 'Expecting a value greater than 5.') % max_results) - raise exception.InvalidParameterValue(msg) - - self.context = context - self.ids = ids - self.items = self.get_db_items() - formatted_items = [] - - for item in self.items: - formatted_item = self.format(item) - self.post_format(formatted_item, item) - if (formatted_item and - not self.filtered_out(formatted_item, filter)): - formatted_items.append(formatted_item) - - return self.get_paged(formatted_items, max_results, next_token) diff --git a/ec2api/api/customer_gateway.py b/ec2api/api/customer_gateway.py deleted file mode 100644 index f9c572b7..00000000 --- a/ec2api/api/customer_gateway.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -"""Customer gateways related API implementation -""" - - -Validator = common.Validator - - -DEFAULT_BGP_ASN = 65000 - - -def create_customer_gateway(context, type, bgp_asn=None, - ip_address=None, public_ip=None): - if ip_address: - ip_addr = ip_address - elif (ip_address == None) and public_ip: - ip_addr = public_ip - elif (ip_address == None) and (public_ip == None): - raise exception.Unsupported("GW without ip not supported") - if bgp_asn and bgp_asn != DEFAULT_BGP_ASN: - raise exception.Unsupported("BGP dynamic routing is unsupported") - # testing output to get ec2 failures - customer_gateway = next((cgw for cgw in db_api.get_items(context, 'cgw') - if cgw['ip_address'] == ip_addr), None) - if not customer_gateway: - customer_gateway = db_api.add_item(context, 'cgw', - {'ip_address': ip_addr}) - return {'customerGateway': _format_customer_gateway(customer_gateway)} - - -def delete_customer_gateway(context, customer_gateway_id): - customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) - vpn_connections = db_api.get_items(context, 'vpn') - if any(vpn['customer_gateway_id'] == customer_gateway['id'] - for vpn in vpn_connections): - raise exception.IncorrectState( - reason=_('The customer gateway is in use.')) - db_api.delete_item(context, customer_gateway['id']) - return True - - -def describe_customer_gateways(context, customer_gateway_id=None, - filter=None): - formatted_cgws = CustomerGatewayDescriber().describe( - context, ids=customer_gateway_id, filter=filter) - return {'customerGatewaySet': formatted_cgws} - - -class CustomerGatewayDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'cgw' - FILTER_MAP = {'bgp-asn': 'bgpAsn', - 'customer-gateway-id': 'customerGatewayId', - 'ip-address': 'ipAddress', - 'state': 'state', - 'type': 'type'} - - def format(self, customer_gateway): - return _format_customer_gateway(customer_gateway) - - -def _format_customer_gateway(customer_gateway): - return {'customerGatewayId': customer_gateway['id'], - 'ipAddress': customer_gateway['ip_address'], - 'state': 'available', - 'type': 'ipsec.1', - 'bgpAsn': DEFAULT_BGP_ASN} diff --git a/ec2api/api/dhcp_options.py b/ec2api/api/dhcp_options.py deleted file mode 100644 index fcf92c04..00000000 --- a/ec2api/api/dhcp_options.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import netaddr -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -LOG = logging.getLogger(__name__) - -ec2_opts = [ - cfg.IntOpt('network_device_mtu', - default=1500, - help='MTU size to set by DHCP for instances. Corresponds ' - 'with the network_device_mtu in ec2api.conf.') -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) - - -"""DHCP options related API implementation -""" - - -class Validator(common.Validator): - - def dopt_id_or_default(self, id): - if id == 'default': - return - self.ec2_id(id, ['dopt']) - - -DHCP_OPTIONS_MAP = {'domain-name-servers': 'dns-server', - 'domain-name': 'domain-name', - 'ntp-servers': 'ntp-server', - 'netbios-name-servers': 'netbios-ns', - 'netbios-node-type': 'netbios-nodetype'} - - -def create_dhcp_options(context, dhcp_configuration): - dhcp_options = {} - for dhcp_option in dhcp_configuration: - key = dhcp_option['key'] - values = dhcp_option['value'] - if key not in DHCP_OPTIONS_MAP: - raise exception.InvalidParameterValue( - value=values, - parameter=key, - reason='Unrecognized key is specified') - if not type(values) is list: - raise exception.InvalidParameterValue( - value=values, - parameter=key, - reason='List of values is expected') - if key not in ['domain-name', 'netbios-node-type']: - ips = [] - for ip in values: - ip_address = netaddr.IPAddress(ip) - if not ip_address: - raise exception.InvalidParameterValue( - value=ip, - parameter=key, - reason='Invalid list of IPs is specified') - ips.append(ip) - dhcp_options[key] = ips - else: - dhcp_options[key] = values - dhcp_options = db_api.add_item(context, 'dopt', - {'dhcp_configuration': dhcp_options}) - return {'dhcpOptions': _format_dhcp_options(context, dhcp_options)} - - -def delete_dhcp_options(context, dhcp_options_id): - if not dhcp_options_id: - raise exception.MissingParameter( - _('DHCP options ID must be specified')) - dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) - vpcs = db_api.get_items(context, 'vpc') - for vpc in vpcs: - if dhcp_options['id'] == vpc.get('dhcp_options_id'): - raise exception.DependencyViolation( - obj1_id=dhcp_options['id'], - obj2_id=vpc['id']) - db_api.delete_item(context, dhcp_options['id']) - return True - - -class DhcpOptionsDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'dopt' - FILTER_MAP = {'dhcp_options_id': 'dhcpOptionsId', - 'key': ['dhcpConfigurationSet', 'key'], - 'value': ['dhcpConfigurationSet', ['valueSet', 'value']]} - - def format(self, dhcp_options): - return _format_dhcp_options(self.context, dhcp_options) - - -def describe_dhcp_options(context, dhcp_options_id=None, - filter=None): - formatted_dhcp_options = DhcpOptionsDescriber().describe( - context, ids=dhcp_options_id, filter=filter) - return {'dhcpOptionsSet': formatted_dhcp_options} - - -def associate_dhcp_options(context, dhcp_options_id, vpc_id): - vpc = ec2utils.get_db_item(context, vpc_id) - rollback_dhcp_options_id = vpc.get('dhcp_options_id') - if dhcp_options_id == 'default': - dhcp_options_id = None - dhcp_options = None - else: - dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) - dhcp_options_id = dhcp_options['id'] - neutron = clients.neutron(context) - os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] - network_interfaces = db_api.get_items(context, 'eni') - rollback_dhcp_options_object = ( - db_api.get_item_by_id(context, rollback_dhcp_options_id) - if dhcp_options_id is not None else - None) - with common.OnCrashCleaner() as cleaner: - _associate_vpc_item(context, vpc, dhcp_options_id) - cleaner.addCleanup(_associate_vpc_item, context, vpc, - rollback_dhcp_options_id) - for network_interface in network_interfaces: - os_port = next((p for p in os_ports - if p['id'] == network_interface['os_id']), None) - if not os_port: - continue - _add_dhcp_opts_to_port(context, dhcp_options, - network_interface, os_port, neutron) - cleaner.addCleanup(_add_dhcp_opts_to_port, context, - rollback_dhcp_options_object, network_interface, - os_port, neutron) - return True - - -def _add_dhcp_opts_to_port(context, dhcp_options, network_interface, os_port, - neutron=None): - dhcp_opts = [{'opt_name': 'mtu', - 'opt_value': str(CONF.network_device_mtu)}] - if dhcp_options is not None: - for key, values in dhcp_options['dhcp_configuration'].items(): - strvalues = [str(v) for v in values] - dhcp_opts.append({'opt_name': DHCP_OPTIONS_MAP[key], - 'opt_value': ','.join(strvalues)}) - if not neutron: - neutron = clients.neutron(context) - neutron.update_port(os_port['id'], - {'port': {'extra_dhcp_opts': dhcp_opts}}) - - -def _format_dhcp_options(context, dhcp_options): - dhcp_configuration = [] - for key, values in dhcp_options['dhcp_configuration'].items(): - items = [{'value': v} for v in values] - dhcp_configuration.append({'key': key, 'valueSet': items}) - return {'dhcpOptionsId': dhcp_options['id'], - 'dhcpConfigurationSet': dhcp_configuration} - - -def _associate_vpc_item(context, vpc, dhcp_options_id): - vpc['dhcp_options_id'] = dhcp_options_id - db_api.update_item(context, vpc) diff --git a/ec2api/api/ec2utils.py b/ec2api/api/ec2utils.py deleted file mode 100644 index 4e43b75b..00000000 --- a/ec2api/api/ec2utils.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import re - -from glanceclient.common import exceptions as glance_exception -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - -LOG = logging.getLogger(__name__) - -ec2_opts = [ - cfg.StrOpt('external_network', - default=None, - help='Name of the external network, which is used to connect' - 'VPCs to Internet and to allocate Elastic IPs.'), -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) - -LEGACY_BDM_FIELDS = set(['device_name', 'delete_on_termination', 'snapshot_id', - 'volume_id', 'volume_size', 'no_device']) - -_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') - - -def camelcase_to_underscore(str): - return _c2u.sub(r'_\1', str).lower().strip('_') - - -def _try_convert(value): - """Return a non-string from a string or unicode, if possible. - - ============= ===================================================== - When value is returns - ============= ===================================================== - zero-length '' - 'None' None - 'True' True case insensitive - 'False' False case insensitive - '0', '-0' 0 - 0xN, -0xN int from hex (positive) (N is any number) - 0bN, -0bN int from binary (positive) (N is any number) - * try conversion to int, float, complex, fallback value - - """ - def _negative_zero(value): - epsilon = 1e-7 - return 0 if abs(value) < epsilon else value - - if len(value) == 0: - return '' - if value == 'None': - return None - lowered_value = value.lower() - if lowered_value == 'true': - return True - if lowered_value == 'false': - return False - for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]: - try: - if lowered_value.startswith((prefix, "-" + prefix)): - return int(lowered_value, base) - except ValueError: - pass - try: - return _negative_zero(float(value)) - except ValueError: - return value - - -def dict_from_dotted_str(items): - """parse multi dot-separated argument into dict. - - EBS boot uses multi dot-separated arguments like - BlockDeviceMapping.1.DeviceName=snap-id - Convert the above into - {'block_device_mapping': {'1': {'device_name': snap-id}}} - """ - args = {} - for key, value in items: - parts = key.split(".") - key = str(camelcase_to_underscore(parts[0])) - if isinstance(value, str): - # NOTE(vish): Automatically convert strings back - # into their respective values - value = _try_convert(value) - - if len(parts) > 1: - d = args.get(key, {}) - args[key] = d - for k in parts[1:-1]: - k = camelcase_to_underscore(k) - v = d.get(k, {}) - d[k] = v - d = v - d[camelcase_to_underscore(parts[-1])] = value - else: - args[key] = value - - return args - - -def _render_dict(el, data): - try: - for key, val in data.items(): - sub_el = etree.SubElement(el, key) - _render_data(sub_el, val) - except Exception: - LOG.debug(data) - raise - - -def _render_data(el, data): - if isinstance(data, list): - for item in data: - sub_el = etree.SubElement(el, 'item') - _render_data(sub_el, item) - elif isinstance(data, dict): - _render_dict(el, data) - elif hasattr(data, '__dict__'): - _render_dict(el, data.__dict__) - elif isinstance(data, bool): - el.text = str(data).lower() - elif isinstance(data, datetime.datetime): - el.text = _database_to_isoformat(data) - elif isinstance(data, bytes): - el.text = data.decode("utf-8") - elif data is not None: - el.text = str(data) - - -def _database_to_isoformat(datetimeobj): - """Return a xs:dateTime parsable string from datatime.""" - return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' - - -def dict_to_xml(data_dict, root_tag): - root = etree.Element(root_tag) - _render_dict(root, data_dict) - return root - - -_ms_time_regex = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$') - - -def is_ec2_timestamp_expired(request, expires=None): - """Checks the timestamp or expiry time included in an EC2 request - - and returns true if the request is expired - """ - query_time = None - timestamp = request.get('Timestamp') - expiry_time = request.get('Expires') - - def parse_strtime(strtime): - if _ms_time_regex.match(strtime): - # NOTE(MotoKen): time format for aws-sdk-java contains millisecond - time_format = "%Y-%m-%dT%H:%M:%S.%fZ" - else: - time_format = "%Y-%m-%dT%H:%M:%SZ" - return timeutils.parse_strtime(strtime, time_format) - - try: - if timestamp and expiry_time: - msg = _("Request must include either Timestamp or Expires," - " but cannot contain both") - LOG.error(msg) - raise exception.InvalidRequest(msg) - elif expiry_time: - query_time = parse_strtime(expiry_time) - return timeutils.is_older_than(query_time, -1) - elif timestamp: - query_time = parse_strtime(timestamp) - - # Check if the difference between the timestamp in the request - # and the time on our servers is larger than 5 minutes, the - # request is too old (or too new). - if query_time and expires: - return (timeutils.is_older_than(query_time, expires) or - timeutils.is_newer_than(query_time, expires)) - return False - except ValueError: - LOG.exception("Timestamp is invalid: ") - return True - - -# NOTE(ft): extra functions to use in vpc specific code or instead of -# malformed existed functions - - -def get_ec2_id_kind(obj_id): - return obj_id.split('-')[0] - - -def change_ec2_id_kind(obj_id, new_kind): - return '%(kind)s-%(id)s' % {'kind': new_kind, - 'id': obj_id.split('-')[-1]} - - -NOT_FOUND_EXCEPTION_MAP = { - 'vpc': exception.InvalidVpcIDNotFound, - 'igw': exception.InvalidInternetGatewayIDNotFound, - 'subnet': exception.InvalidSubnetIDNotFound, - 'eni': exception.InvalidNetworkInterfaceIDNotFound, - 'dopt': exception.InvalidDhcpOptionsIDNotFound, - 'eipalloc': exception.InvalidAllocationIDNotFound, - 'sg': exception.InvalidGroupNotFound, - 'rtb': exception.InvalidRouteTableIDNotFound, - 'i': exception.InvalidInstanceIDNotFound, - 'kp': exception.InvalidKeypairNotFound, - 'az': exception.InvalidAvailabilityZoneNotFound, - 'vol': exception.InvalidVolumeNotFound, - 'snap': exception.InvalidSnapshotNotFound, - 'ami': exception.InvalidAMIIDNotFound, - 'aki': exception.InvalidAMIIDNotFound, - 'ari': exception.InvalidAMIIDNotFound, - 'vgw': exception.InvalidVpnGatewayIDNotFound, - 'cgw': exception.InvalidCustomerGatewayIDNotFound, - 'vpn': exception.InvalidVpnConnectionIDNotFound, -} - - -def get_db_item(context, ec2_id, expected_kind=None): - """Get an DB item, raise AWS compliant exception if it's not found. - - Args: - context (RequestContext): The request context. - ec2_id (str): The ID of the requested item. - expected_kind (str): The expected kind of the requested item. - It should be specified for a kind of ec2_id to be validated, - if you need it. - - Returns: - The DB item. - """ - item = db_api.get_item_by_id(context, ec2_id) - if (item is None or - expected_kind and get_ec2_id_kind(ec2_id) != expected_kind): - kind = expected_kind or get_ec2_id_kind(ec2_id) - params = {'id': ec2_id} - raise NOT_FOUND_EXCEPTION_MAP[kind](**params) - return item - - -def get_db_items(context, kind, ec2_ids): - if not ec2_ids: - return db_api.get_items(context, kind) - - if not isinstance(ec2_ids, set): - ec2_ids = set(ec2_ids) - items = db_api.get_items_by_ids(context, ec2_ids) - if len(items) < len(ec2_ids): - missed_ids = ec2_ids - set((item['id'] for item in items)) - params = {'id': next(iter(missed_ids))} - raise NOT_FOUND_EXCEPTION_MAP[kind](**params) - return items - - -_auto_create_db_item_extensions = {} - - -def register_auto_create_db_item_extension(kind, extension): - _auto_create_db_item_extensions[kind] = extension - - -def auto_create_db_item(context, kind, os_id, **extension_kwargs): - item = {'os_id': os_id} - extension = _auto_create_db_item_extensions.get(kind) - if extension: - extension(context, item, **extension_kwargs) - return db_api.add_item(context, kind, item) - - -def get_db_item_by_os_id(context, kind, os_id, items_by_os_id=None, - **extension_kwargs): - """Get DB item by OS id (create if it doesn't exist). - - Args: - context (RequestContext): The request context. - kind (str): The kind of item. - os_id (str): OS id of an object. - items_by_os_id (dict of items): The dict of known DB items, - OS id is used as a key. - extension_kwargs (dict): Additional parameters passed to - a registered extension at creating item. - - Returns: - A found or created item. - - Search item in passed dict. If it's not found - create a new item, and - add it to the dict (if it's passed). - If an extension is registered on corresponding item kind, call it - passing extension_kwargs to it. - """ - if os_id is None: - return None - if items_by_os_id is not None: - item = items_by_os_id.get(os_id) - if item: - return item - else: - item = next((i for i in db_api.get_items(context, kind) - if i['os_id'] == os_id), None) - if not item: - item = auto_create_db_item(context, kind, os_id, **extension_kwargs) - if items_by_os_id is not None: - items_by_os_id[os_id] = item - return item - - -# TODO(Alex): The project_id passing mechanism can be potentially -# reconsidered in future. -def os_id_to_ec2_id(context, kind, os_id, items_by_os_id=None, - ids_by_os_id=None, project_id=None): - if os_id is None: - return None - if ids_by_os_id is not None: - item_id = ids_by_os_id.get(os_id) - if item_id: - return item_id - if items_by_os_id is not None: - item = items_by_os_id.get(os_id) - if item: - return item['id'] - ids = db_api.get_items_ids(context, kind, item_os_ids=(os_id,)) - if len(ids): - item_id, _os_id = ids[0] - else: - item_id = db_api.add_item_id(context, kind, os_id, - project_id=project_id) - if ids_by_os_id is not None: - ids_by_os_id[os_id] = item_id - return item_id - - -def get_os_image(context, ec2_image_id): - kind = get_ec2_id_kind(ec2_image_id) - ids = db_api.get_items_ids(context, kind, item_ids=(ec2_image_id,)) - if not ids: - raise exception.InvalidAMIIDNotFound(id=ec2_image_id) - _id, os_id = ids[0] - if not os_id: - return None - glance = clients.glance(context) - try: - return glance.images.get(os_id) - except glance_exception.HTTPNotFound: - raise exception.InvalidAMIIDNotFound(id=ec2_image_id) - - -def deserialize_os_image_properties(os_image): - def prepare_property(property_name): - if property_name in os_image_dict: - os_image_dict[property_name] = jsonutils.loads( - os_image_dict[property_name]) - - os_image_dict = dict(os_image) - prepare_property('mappings') - prepare_property('block_device_mapping') - return os_image_dict - - -def create_virtual_bdm(device_name, virtual_name): - bdm = {'device_name': device_name, - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': virtual_name} - if virtual_name == 'swap': - bdm['guest_format'] = 'swap' - return bdm - - -def get_os_image_mappings(os_image_properties): - mappings = [] - names = set() - # TODO(ft): validate device names for both virtual and block device - # mappings - - def is_virtual(virtual_name): - return virtual_name == 'swap' or (virtual_name and - _ephemeral.match(virtual_name)) - - # NOTE(ft): substitute mapping if the same device name is specified - def add_mapping(mapping): - device_name = block_device_strip_dev(mapping.get('device_name')) - if device_name in names: - for i, m in enumerate(mappings): - if (device_name == - block_device_strip_dev(m.get('device_name'))): - mappings[i] = mapping - break - else: - if device_name: - names.add(device_name) - mappings.append(mapping) - - # TODO(ft): From Juno virtual device mapping has precedence of block one - # in boot logic. This function should do the same, despite Nova EC2 - # behavior. - - # NOTE(ft): Nova EC2 prepended device names for virtual device mappings. - # But AWS doesn't do it. - for vdm in os_image_properties.get('mappings', []): - if is_virtual(vdm.get('virtual')): - add_mapping(create_virtual_bdm( - block_device_prepend_dev(vdm.get('device')), vdm['virtual'])) - - legacy_mapping = not os_image_properties.get('bdm_v2', False) - for bdm in os_image_properties.get('block_device_mapping', []): - if legacy_mapping: - virtual_name = bdm.get('virtual_name') - if is_virtual(virtual_name): - new_bdm = create_virtual_bdm(bdm.get('device_name'), - virtual_name) - else: - new_bdm = {key: val for key, val in bdm.items() - if key in LEGACY_BDM_FIELDS} - if bdm.get('snapshot_id'): - new_bdm.update({'source_type': 'snapshot', - 'destination_type': 'volume'}) - elif bdm.get('volume_id'): - new_bdm.update({'source_type': 'volume', - 'destination_type': 'volume'}) - bdm = new_bdm - - bdm.setdefault('delete_on_termination', False) - add_mapping(bdm) - - return mappings - - -def get_os_public_network(context): - neutron = clients.neutron(context) - search_opts = {'router:external': True, 'name': CONF.external_network} - os_networks = neutron.list_networks(**search_opts)['networks'] - if len(os_networks) != 1: - if CONF.external_network: - if len(os_networks) == 0: - msg = "No external network with name '%s' is found" - else: - msg = "More than one external network with name '%s' is found" - LOG.error(msg, CONF.external_network) - else: - if len(os_networks) == 0: - msg = 'No external network is found' - else: - msg = 'More than one external network is found' - LOG.error(msg) - raise exception.Unsupported(_('Feature is restricted by OS admin')) - return os_networks[0] - - -def get_attached_gateway(context, vpc_id, gateway_kind): - # TODO(ft): move search by vpc_id to DB api - return next((gw for gw in db_api.get_items(context, gateway_kind) - if gw['vpc_id'] == vpc_id), None) - - -_check_and_create_default_vpc = None - - -def check_and_create_default_vpc(context): - return _check_and_create_default_vpc(context) - - -def set_check_and_create_default_vpc(check_and_create_default_vpc): - global _check_and_create_default_vpc - _check_and_create_default_vpc = check_and_create_default_vpc - - -def get_default_vpc(context): - default_vpc = check_and_create_default_vpc(context) - if not default_vpc: - raise exception.VPCIdNotSpecified() - return default_vpc - - -# NOTE(ft): following functions are copied from various parts of Nova - -_ephemeral = re.compile(r'^ephemeral(\d|[1-9]\d+)$') - -_dev = re.compile(r'^/dev/') - - -def block_device_strip_dev(device_name): - """remove leading '/dev/'.""" - return _dev.sub('', device_name) if device_name else device_name - - -def block_device_prepend_dev(device_name): - """Make sure there is a leading '/dev/'.""" - return device_name and '/dev/' + block_device_strip_dev(device_name) - - -def block_device_properties_root_device_name(properties): - """get root device name from image meta data. - - If it isn't specified, return None. - """ - if 'root_device_name' in properties: - return properties.get('root_device_name') - elif 'mappings' in properties: - return next((bdm['device'] for bdm in properties['mappings'] - if bdm['virtual'] == 'root'), None) - else: - return None - - -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - - # Python provides a similar instance method for datetime.datetime objects - # called isoformat(). The format of the strings generated by isoformat() - # have a couple of problems: - # 1) The strings generated by isotime are used in tokens and other public - # APIs that we can't change without a deprecation period. The strings - # generated by isoformat are not the same format, so we can't just - # change to it. - # 2) The strings generated by isoformat do not include the microseconds if - # the value happens to be 0. This will likely show up as random failures - # as parsers may be written to always expect microseconds, and it will - # parse correctly most of the time. - - if not at: - at = timeutils.utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st diff --git a/ec2api/api/faults.py b/ec2api/api/faults.py deleted file mode 100644 index 43dd292b..00000000 --- a/ec2api/api/faults.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_context import context as common_context -from oslo_log import log as logging -import webob.dec -import webob.exc - -import ec2api.api -from ec2api import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def ec2_error_response(request_id, code, message, status=500): - """Helper to construct an EC2 compatible error response.""" - LOG.debug('EC2 error response: %(code)s: %(message)s', - {'code': code, 'message': message}) - resp = webob.Response() - resp.status = status - resp.headers['Content-Type'] = 'text/xml' - resp.body = ( - '\n' - '%s' - '%s' - '%s' % - (utils.xhtml_escape(code), - utils.xhtml_escape(message), - utils.xhtml_escape(request_id))).encode() - return resp - - -class Fault(webob.exc.HTTPException): - - """Captures exception and return REST Response.""" - - def __init__(self, exception): - """Create a response for the given webob.exc.exception.""" - self.wrapped_exc = exception - - @webob.dec.wsgify - def __call__(self, req): - """Generate a WSGI response based on the exception passed to ctor.""" - code = ec2api.api.exception_to_ec2code(self.wrapped_exc) - status = self.wrapped_exc.status_int - message = self.wrapped_exc.explanation - - if status == 501: - message = "The requested function is not supported" - - if 'AWSAccessKeyId' not in req.params: - raise webob.exc.HTTPBadRequest() - - resp = ec2_error_response(common_context.generate_request_id(), code, - message=message, status=status) - return resp diff --git a/ec2api/api/image.py b/ec2api/api/image.py deleted file mode 100644 index 305eaf1e..00000000 --- a/ec2api/api/image.py +++ /dev/null @@ -1,1100 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import binascii -import os -import shutil -import tarfile -import tempfile -import time - -import botocore.client -import botocore.config -import botocore.session -from cinderclient import exceptions as cinder_exception -from cryptography.hazmat import backends -from cryptography.hazmat.primitives.asymmetric import padding -from cryptography.hazmat.primitives import serialization -import eventlet -from glanceclient.common import exceptions as glance_exception -from lxml import etree -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import instance as instance_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ -import urllib.parse as parse - -LOG = logging.getLogger(__name__) - -s3_opts = [ - cfg.StrOpt('image_decryption_dir', - default='/tmp', - help='Parent directory for tempdir used for image decryption'), - cfg.StrOpt('s3_url', - default='http://$my_ip:3334', - help='URL to S3 server'), - # TODO(andrey-mp): this should be reworked with all region`s logic - cfg.StrOpt('s3_region', - default='RegionOne', - help='Region of S3 server'), - cfg.StrOpt('x509_root_private_key', - help='Path to ca private key file'), -] - -CONF = cfg.CONF -CONF.register_opts(s3_opts) - -rpcapi_opts = [ - cfg.StrOpt('cert_topic', - default='cert', - deprecated_for_removal=True, - deprecated_reason='"nova_cert" service is removed', - help='The topic cert nodes listen on'), -] - -CONF.register_opts(rpcapi_opts) - - -"""Images related API implementation -""" - - -Validator = common.Validator - - -CONTAINER_TO_KIND = {'aki': 'aki', - 'ari': 'ari', - 'ami': 'ami', - # NOTE(ft): this mappings are ported from legacy Nova EC2 - # There is no idea about its actuality - 'kernel': 'aki', - 'ramdisk': 'ari'} -IMAGE_TYPES = {'aki': 'kernel', - 'ari': 'ramdisk', - 'ami': 'machine'} -GLANCE_STATUS_TO_EC2 = {'queued': 'pending', - 'saving': 'pending', - 'active': 'available', - 'killed': 'deregistered', - 'pending_delete': 'deregistered', - 'deleted': 'deregistered', - 'deactivated': 'invalid'} -EPHEMERAL_PREFIX_LEN = len('ephemeral') - - -# TODO(yamahata): race condition -# At the moment there is no way to prevent others from -# manipulating instances/volumes/snapshots. -# As other code doesn't take it into consideration, here we don't -# care of it for now. Ostrich algorithm -def create_image(context, instance_id, name=None, description=None, - no_reboot=False, block_device_mapping=None): - instance = ec2utils.get_db_item(context, instance_id) - - if not instance_api._is_ebs_instance(context, instance['os_id']): - msg = _('Instance does not have a volume attached at root (null).') - raise exception.InvalidParameterValue(value=instance_id, - parameter='InstanceId', - reason=msg) - - nova = clients.nova(context) - os_instance = nova.servers.get(instance['os_id']) - restart_instance = False - if not no_reboot and os_instance.status != 'SHUTOFF': - if os_instance.status != 'ACTIVE': - # TODO(ft): Change the error code and message with the real AWS - # ones - msg = _('Instance must be run or stopped') - raise exception.IncorrectState(reason=msg) - - restart_instance = True - - # meaningful image name - name_map = dict(instance=instance['os_id'], now=ec2utils.isotime()) - name = name or _('image of %(instance)s at %(now)s') % name_map - - def delayed_create(context, image, name, os_instance): - try: - os_instance.stop() - - # wait instance for really stopped - start_time = time.time() - while os_instance.status != 'SHUTOFF': - time.sleep(1) - os_instance.get() - # NOTE(yamahata): timeout and error. 1 hour for now for safety. - # Is it too short/long? - # Or is there any better way? - timeout = 1 * 60 * 60 - if time.time() > start_time + timeout: - err = (_("Couldn't stop instance within %d sec") % timeout) - raise exception.EC2Exception(message=err) - - # NOTE(ft): create an image with ec2_id metadata to let other code - # link os and db objects in race conditions - os_image_id = os_instance.create_image( - name, metadata={'ec2_id': image['id']}) - image['os_id'] = os_image_id - db_api.update_item(context, image) - except Exception: - LOG.exception('Failed to complete image %s creation', image['id']) - try: - image['state'] = 'failed' - db_api.update_item(context, image) - except Exception: - LOG.warning("Couldn't set 'failed' state for db image %s", - image['id'], exc_info=True) - - try: - os_instance.start() - except Exception: - LOG.warning('Failed to start instance %(i_id)s after ' - 'completed creation of image %(image_id)s', - {'i_id': instance['id'], - 'image_id': image['id']}, - exc_info=True) - - image = {'is_public': False, - 'description': description} - if restart_instance: - # NOTE(ft): image type is hardcoded, because we don't know it now, - # but cannot change it later. But Nova doesn't specify container format - # for snapshots of volume backed instances, so that it is 'ami' in fact - image = db_api.add_item(context, 'ami', image) - eventlet.spawn_n(delayed_create, context, image, name, os_instance) - else: - glance = clients.glance(context) - with common.OnCrashCleaner() as cleaner: - os_image_id = os_instance.create_image(name) - cleaner.addCleanup(glance.images.delete, os_image_id) - # TODO(andrey-mp): snapshot and volume also must be deleted in case - # of error - os_image = glance.images.get(os_image_id) - image['os_id'] = os_image_id - image = db_api.add_item(context, _get_os_image_kind(os_image), - image) - return {'imageId': image['id']} - - -def register_image(context, name=None, image_location=None, - description=None, architecture=None, - root_device_name=None, block_device_mapping=None, - virtualization_type=None, kernel_id=None, - ramdisk_id=None, sriov_net_support=None): - - # Setup default flags - is_s3_import = False - is_url_import = False - - # Process the input arguments - if not image_location and not root_device_name: - # NOTE(ft): for backward compatibility with a hypothetical code - # which uses name as image_location - image_location = name - if not image_location and not root_device_name: - msg = _("Either imageLocation or rootDeviceName must be set.") - raise exception.InvalidParameterCombination(msg) - if not image_location and not name: - msg = _('The request must contain the parameter name') - raise exception.MissingParameter(msg) - - # TODO(ft): check parameters - metadata = {} - if name: - # TODO(ft): check the name is unique (at least for EBS image case) - metadata['name'] = name - if image_location: - - # Resolve the import type - metadata['image_location'] = image_location - parsed_url = parse.urlparse(image_location) - is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3') - is_url_import = not is_s3_import - - # Check if the name is in the metadata - if 'name' not in metadata: - # NOTE(ft): it's needed for backward compatibility - metadata['name'] = image_location - if root_device_name: - metadata['root_device_name'] = root_device_name - cinder = clients.cinder(context) - if block_device_mapping: - mappings = instance_api._parse_block_device_mapping( - context, block_device_mapping) - # TODO(ft): merge with image manifets's virtual device mappings - short_root_device_name = ( - ec2utils.block_device_strip_dev(root_device_name)) - for bdm in mappings: - instance_api._populate_parsed_bdm_parameter( - bdm, short_root_device_name) - if 'volume_size' in bdm: - continue - try: - if bdm['source_type'] == 'snapshot': - snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) - bdm['volume_size'] = snapshot.size - elif bdm['source_type'] == 'volume': - volume = cinder.volumes.get(bdm['volume_id']) - bdm['volume_size'] = volume.size - except cinder_exception.NotFound: - pass - metadata['bdm_v2'] = 'True' - metadata['block_device_mapping'] = jsonutils.dumps(mappings) - if architecture is not None: - metadata['architecture'] = architecture - if kernel_id: - metadata['kernel_id'] = ec2utils.get_os_image(context, - kernel_id).id - if ramdisk_id: - metadata['ramdisk_id'] = ec2utils.get_os_image(context, - ramdisk_id).id - - # Begin the import/registration process - with common.OnCrashCleaner() as cleaner: - - # Setup the glance client - glance = clients.glance(context) - - # Check if this is an S3 import - if is_s3_import: - os_image = _s3_create(context, metadata) - - # Condition for all non-S3 imports - else: - - # Create the image in glance - metadata.update({'visibility': 'private', - 'container_format': 'bare', - 'disk_format': 'raw'}) - os_image = glance.images.create(**metadata) - - # Kick-off the URL image import if from URL - if is_url_import: - glance.images.image_import(os_image.id, method='web-download', - uri=metadata['image_location']) - - # Otherwise, use the default method - else: - glance.images.upload(os_image.id, '', image_size=0) - - # Add cleanups and complete the registration process - cleaner.addCleanup(glance.images.delete, os_image.id) - kind = _get_os_image_kind(os_image) - image = db_api.add_item(context, kind, {'os_id': os_image.id, - 'is_public': False, - 'description': description}) - - # Return the image ID for the registration process - return {'imageId': image['id']} - - -def deregister_image(context, image_id): - os_image = ec2utils.get_os_image(context, image_id) - if not os_image: - image = db_api.get_item_by_id(context, image_id) - if image.get('state') != 'failed': - # TODO(ft): figure out corresponding AWS error - raise exception.IncorrectState( - reason='Image is still being created') - else: - _check_owner(context, os_image) - - glance = clients.glance(context) - try: - glance.images.delete(os_image.id) - except glance_exception.HTTPNotFound: - pass - db_api.delete_item(context, image_id) - return True - - -class ImageDescriber(common.TaggableItemsDescriber): - - KIND = 'ami' - FILTER_MAP = {'architecture': 'architecture', - 'block-device-mapping.device-name': ['blockDeviceMapping', - 'deviceName'], - 'block-device-mapping.snapshot-id': ['blockDeviceMapping', - ('ebs', 'snapshotId')], - 'block-device-mapping.volume-size': ['blockDeviceMapping', - ('ebs', 'volumeSize')], - 'description': 'description', - 'image-id': 'imageId', - 'image-type': 'imageType', - 'is-public': 'isPublic', - 'kernel_id': 'kernelId', - 'name': 'name', - 'owner-id': 'imageOwnerId', - 'ramdisk-id': 'ramdiskId', - 'root-device-name': 'rootDeviceName', - 'root-device-type': 'rootDeviceType', - 'state': 'imageState', - } - - def format(self, image, os_image): - return _format_image(self.context, image, os_image, self.items_dict, - self.ids_dict, self.snapshot_ids) - - def get_db_items(self): - # TODO(ft): we can't get all images from DB per one request due - # different kinds. It's need to refactor DB API and ec2utils functions - # to work with kind smarter - if self.ids: - local_images = db_api.get_items_by_ids(self.context, self.ids) - else: - local_images = sum((db_api.get_items(self.context, kind) - for kind in ('ami', 'ari', 'aki')), []) - public_images = sum((db_api.get_public_items(self.context, kind, - self.ids) - for kind in ('ami', 'ari', 'aki')), []) - - mapped_ids = [] - if self.ids: - mapped_ids = [{'id': item_id, - 'os_id': os_id} - for kind in ('ami', 'ari', 'aki') - for item_id, os_id in db_api.get_items_ids( - self.context, kind, item_ids=self.ids)] - - # NOTE(ft): mapped_ids must be the first to let complete items from - # next lists to override mappings, which do not have item body data - images = sum((mapped_ids, local_images, public_images), []) - if self.ids: - # NOTE(ft): public images, owned by a current user, appear in both - # local and public lists of images. Therefore it's not enough to - # just compare length of requested and retrieved lists to make sure - # that all requested images are retrieved. - images_ids = set(i['id'] for i in images) - if len(images_ids) < len(self.ids): - missed_ids = self.ids - images_ids - raise exception.InvalidAMIIDNotFound(id=next(iter(missed_ids))) - self.pending_images = {i['id']: i for i in local_images - if not i['os_id']} - self.snapshot_ids = dict( - (s['os_id'], s['id']) - for s in db_api.get_items(self.context, 'snap')) - self.local_images_os_ids = set(i['os_id'] for i in local_images) - self.ids_dict = {} - return images - - def get_os_items(self): - os_images = list(clients.glance(self.context).images.list()) - self.ec2_created_os_images = { - os_image.ec2_id: os_image - for os_image in os_images - if (hasattr(os_image, 'ec2_id') and - self.context.project_id == os_image.owner)} - return os_images - - def auto_update_db(self, image, os_image): - if not image: - kind = _get_os_image_kind(os_image) - if self.context.project_id == os_image.owner: - if getattr(os_image, 'ec2_id', None) in self.pending_images: - # NOTE(ft): the image is being creating, Glance had created - # image, but creating thread doesn't yet update db item - image = self.pending_images[os_image.ec2_id] - image['os_id'] = os_image.id - image['is_public'] = os_image.visibility == 'public' - db_api.update_item(self.context, image) - else: - image = ec2utils.get_db_item_by_os_id( - self.context, kind, os_image.id, self.items_dict, - os_image=os_image) - else: - image_id = ec2utils.os_id_to_ec2_id( - self.context, kind, os_image.id, - items_by_os_id=self.items_dict, ids_by_os_id=self.ids_dict) - image = {'id': image_id, - 'os_id': os_image.id} - elif (self.context.project_id == os_image.owner and - image.get('is_public') != os_image.visibility == 'public'): - image['is_public'] = os_image.visibility == 'public' - if image['id'] in self.local_images_os_ids: - db_api.update_item(self.context, image) - else: - # TODO(ft): currently update_item can not update id mapping, - # because its project_id is None. Instead of upgrade db_api, - # we use add_item. But its execution leads to a needless - # DB call. This should be reworked in the future. - kind = ec2utils.get_ec2_id_kind(image['id']) - db_api.add_item(self.context, kind, image) - return image - - def get_name(self, os_item): - return '' - - def delete_obsolete_item(self, image): - if image['os_id'] in self.local_images_os_ids: - db_api.delete_item(self.context, image['id']) - - def get_tags(self): - return db_api.get_tags(self.context, ('ami', 'ari', 'aki'), self.ids) - - def handle_unpaired_item(self, item): - if item['os_id']: - return super(ImageDescriber, self).handle_unpaired_item(item) - - if 'is_public' not in item: - return None - - # NOTE(ft): process creating images, ignoring ids mapping - # NOTE(ft): the image is being creating, Glance had created - # image, but creating thread doesn't yet update db item - os_image = self.ec2_created_os_images.get(item['id']) - if os_image: - item['os_id'] = os_image.id - item['is_public'] = os_image.visibility == 'public' - db_api.update_item(self.context, item) - image = self.format(item, os_image) - else: - # NOTE(ft): Glance image is not yet created, but DB item - # exists. So that we adds EC2 image to output results - # with all data we have. - # TODO(ft): check if euca2ools can process such result - image = {'imageId': item['id'], - 'imageOwnerId': self.context.project_id, - 'imageType': IMAGE_TYPES[ - ec2utils.get_ec2_id_kind(item['id'])], - 'isPublic': item['is_public']} - if 'description' in item: - image['description'] = item['description'] - image['imageState'] = item.get('state', 'pending') - return image - - -def describe_images(context, executable_by=None, image_id=None, - owner=None, filter=None): - formatted_images = ImageDescriber().describe( - context, ids=image_id, filter=filter) - return {'imagesSet': formatted_images} - - -def describe_image_attribute(context, image_id, attribute): - def _block_device_mapping_attribute(os_image, image, result): - properties = ec2utils.deserialize_os_image_properties(os_image) - mappings = _format_mappings(context, properties) - if mappings: - result['blockDeviceMapping'] = mappings - - def _description_attribute(os_image, image, result): - result['description'] = {'value': image.get('description')} - - def _launch_permission_attribute(os_image, image, result): - result['launchPermission'] = [] - if os_image.visibility == 'public': - result['launchPermission'].append({'group': 'all'}) - - def _kernel_attribute(os_image, image, result): - kernel_id = getattr(os_image, 'kernel_id', None) - if kernel_id: - result['kernel'] = { - 'value': ec2utils.os_id_to_ec2_id(context, 'aki', kernel_id) - } - - def _ramdisk_attribute(os_image, image, result): - ramdisk_id = getattr(os_image, 'ramdisk_id', None) - if ramdisk_id: - result['ramdisk'] = { - 'value': ec2utils.os_id_to_ec2_id(context, 'ari', ramdisk_id) - } - - # NOTE(ft): Openstack extension, AWS-incompability - def _root_device_name_attribute(os_image, image, result): - properties = ec2utils.deserialize_os_image_properties(os_image) - result['rootDeviceName'] = ( - ec2utils.block_device_properties_root_device_name(properties)) - - supported_attributes = { - 'blockDeviceMapping': _block_device_mapping_attribute, - 'description': _description_attribute, - 'launchPermission': _launch_permission_attribute, - 'kernel': _kernel_attribute, - 'ramdisk': _ramdisk_attribute, - # NOTE(ft): Openstack extension, AWS-incompability - 'rootDeviceName': _root_device_name_attribute, - } - - fn = supported_attributes.get(attribute) - if fn is None: - raise exception.InvalidRequest() - - os_image = ec2utils.get_os_image(context, image_id) - if not os_image: - # TODO(ft): figure out corresponding AWS error - raise exception.IncorrectState( - reason='Image is still being created or failed') - _check_owner(context, os_image) - image = ec2utils.get_db_item(context, image_id) - - result = {'imageId': image_id} - fn(os_image, image, result) - return result - - -def modify_image_attribute(context, image_id, attribute=None, - user_group=None, operation_type=None, - description=None, launch_permission=None, - product_code=None, user_id=None, value=None): - os_image = ec2utils.get_os_image(context, image_id) - if not os_image: - # TODO(ft): figure out corresponding AWS error - raise exception.IncorrectState( - reason='Image is still being created or failed') - - attributes = set() - - # NOTE(andrey-mp): launchPermission structure is converted here - # to plain parameters: attribute, user_group, operation_type, user_id - if launch_permission is not None: - attributes.add('launchPermission') - user_group = list() - user_id = list() - if len(launch_permission) == 0: - msg = _('No operation specified for launchPermission attribute.') - raise exception.InvalidParameterCombination(msg) - if len(launch_permission) > 1: - msg = _('Only one operation can be specified.') - raise exception.InvalidParameterCombination(msg) - operation_type, permissions = launch_permission.popitem() - for index_key in permissions: - permission = permissions[index_key] - if 'group' in permission: - user_group.append(permission['group']) - if 'user_id' in permission: - user_id.append(permission['user_id']) - if attribute == 'launchPermission': - attributes.add('launchPermission') - - if description is not None: - attributes.add('description') - value = description - if attribute == 'description': - attributes.add('description') - - # check attributes - if len(attributes) == 0: - if product_code is not None: - attribute = 'productCodes' - if attribute in ['kernel', 'ramdisk', 'productCodes', - 'blockDeviceMapping']: - raise exception.InvalidParameter( - _('Parameter %s is invalid. ' - 'The attribute is not supported.') % attribute) - raise exception.InvalidParameterCombination('No attributes specified.') - if len(attributes) > 1: - raise exception.InvalidParameterCombination( - _('Fields for multiple attribute types specified: %s') - % str(attributes)) - - if 'launchPermission' in attributes: - if not user_group: - msg = _('No operation specified for launchPermission attribute.') - raise exception.InvalidParameterCombination(msg) - if len(user_group) != 1 and user_group[0] != 'all': - msg = _('only group "all" is supported') - raise exception.InvalidParameterValue(parameter='UserGroup', - value=user_group, - reason=msg) - if operation_type not in ['add', 'remove']: - msg = _('operation_type must be add or remove') - raise exception.InvalidParameterValue(parameter='OperationType', - value='operation_type', - reason=msg) - - _check_owner(context, os_image) - glance = clients.glance(context) - visibility = 'public' if operation_type == 'add' else 'private' - glance.images.update(os_image.id, visibility=visibility) - return True - - if 'description' in attributes: - if not value: - raise exception.MissingParameter( - 'The request must contain the parameter description') - - _check_owner(context, os_image) - image = ec2utils.get_db_item(context, image_id) - image['description'] = value - db_api.update_item(context, image) - return True - - -def reset_image_attribute(context, image_id, attribute): - if attribute != 'launchPermission': - raise exception.InvalidRequest() - - os_image = ec2utils.get_os_image(context, image_id) - _check_owner(context, os_image) - glance = clients.glance(context) - glance.images.update(os_image.id, visibility='private') - return True - - -def _check_owner(context, os_image): - if os_image.owner != context.project_id: - raise exception.AuthFailure(_('Not authorized for image:%s') - % os_image.id) - - -def _format_image(context, image, os_image, images_dict, ids_dict, - snapshot_ids=None): - ec2_image = {'imageId': image['id'], - 'imageOwnerId': os_image.owner, - 'imageType': IMAGE_TYPES[ - ec2utils.get_ec2_id_kind(image['id'])], - 'isPublic': os_image.visibility == 'public', - 'architecture': getattr(os_image, 'architecture', None), - 'creationDate': os_image.created_at - } - if 'description' in image: - ec2_image['description'] = image['description'] - if 'state' in image: - state = image['state'] - else: - state = GLANCE_STATUS_TO_EC2.get(os_image.status, 'error') - if state in ('available', 'pending'): - state = _s3_image_state_map.get(getattr(os_image, 'image_state', None), - state) - ec2_image['imageState'] = state - - kernel_id = getattr(os_image, 'kernel_id', None) - if kernel_id: - ec2_image['kernelId'] = ec2utils.os_id_to_ec2_id( - context, 'aki', kernel_id, - items_by_os_id=images_dict, ids_by_os_id=ids_dict) - ramdisk_id = getattr(os_image, 'ramdisk_id', None) - if ramdisk_id: - ec2_image['ramdiskId'] = ec2utils.os_id_to_ec2_id( - context, 'ari', ramdisk_id, - items_by_os_id=images_dict, ids_by_os_id=ids_dict) - - name = os_image.name - img_loc = getattr(os_image, 'image_location', None) - if img_loc: - ec2_image['imageLocation'] = img_loc - else: - ec2_image['imageLocation'] = "%s (%s)" % (img_loc, name) - if not name and img_loc: - # This should only occur for images registered with ec2 api - # prior to that api populating the glance name - ec2_image['name'] = img_loc - else: - ec2_image['name'] = name - - properties = ec2utils.deserialize_os_image_properties(os_image) - root_device_name = ( - ec2utils.block_device_properties_root_device_name(properties)) - mappings = _format_mappings(context, properties, root_device_name, - snapshot_ids, os_image.owner) - if mappings: - ec2_image['blockDeviceMapping'] = mappings - - root_device_type = 'instance-store' - if root_device_name: - ec2_image['rootDeviceName'] = root_device_name - - short_root_device_name = ec2utils.block_device_strip_dev( - root_device_name) - if any((short_root_device_name == - ec2utils.block_device_strip_dev(bdm.get('deviceName'))) - for bdm in mappings): - root_device_type = 'ebs' - ec2_image['rootDeviceType'] = root_device_type - - return ec2_image - - -def _format_mappings(context, os_image_properties, root_device_name=None, - snapshot_ids=None, project_id=None): - formatted_mappings = [] - bdms = ec2utils.get_os_image_mappings(os_image_properties) - ephemeral_numbers = _ephemeral_free_number_generator(bdms) - for bdm in bdms: - # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? - # TODO(ft): figure out AWS and Nova behaviors - if bdm.get('no_device'): - continue - item = {} - if bdm.get('boot_index') == 0 and root_device_name: - item['deviceName'] = root_device_name - elif 'device_name' in bdm: - item['deviceName'] = bdm['device_name'] - if bdm.get('destination_type') == 'volume': - ebs = _format_volume_mapping( - context, bdm, snapshot_ids=snapshot_ids, project_id=project_id) - if not ebs: - # TODO(ft): what to do with the wrong bdm? - continue - item['ebs'] = ebs - elif bdm.get('destination_type') == 'local': - virtual_name = _format_virtual_name(bdm, ephemeral_numbers) - if not virtual_name: - # TODO(ft): what to do with the wrong bdm? - continue - item['virtualName'] = virtual_name - else: - # TODO(ft): what to do with the wrong bdm? - continue - formatted_mappings.append(item) - - return formatted_mappings - - -def _format_volume_mapping(context, bdm, snapshot_ids=None, project_id=None): - ebs = {'deleteOnTermination': bdm['delete_on_termination']} - # TODO(ft): set default volumeSize from the source - if bdm.get('volume_size') is not None: - ebs['volumeSize'] = bdm['volume_size'] - if bdm.get('source_type') == 'snapshot': - if bdm.get('snapshot_id'): - ebs['snapshotId'] = ec2utils.os_id_to_ec2_id( - context, 'snap', bdm['snapshot_id'], - ids_by_os_id=snapshot_ids, project_id=project_id) - # NOTE(ft): Openstack extension, AWS-incompability - elif bdm.get('source_type') == 'volume': - if bdm.get('volume_id'): - ebs['snapshotId'] = ec2utils.os_id_to_ec2_id( - context, 'vol', bdm['volume_id'], project_id=project_id) - # NOTE(ft): extension, AWS-incompability - elif bdm.get('source_type') == 'image': - if bdm.get('image_id'): - ebs['snapshotId'] = ec2utils.os_id_to_ec2_id( - context, 'ami', bdm['image_id']) - if ebs.get('snapshotId') or bdm.get('source_type') == 'blank': - return ebs - - -def _format_virtual_name(bdm, ephemeral_numbers): - if bdm.get('source_type') == 'blank': - if bdm.get('guest_format') == 'swap': - return 'swap' - else: - return (bdm.get('virtual_name') or - 'ephemeral%s' % next(ephemeral_numbers)) - - -def _ephemeral_free_number_generator(bdms): - named_ephemeral_nums = set( - int(bdm['virtual_name'][EPHEMERAL_PREFIX_LEN:]) - for bdm in bdms - if (bdm.get('destination_type') == 'local' and - bdm.get('source_type') == 'blank' and - bdm.get('guest_format') != 'swap' and - bdm.get('virtual_name'))) - ephemeral_free_num = 0 - while True: - if ephemeral_free_num not in named_ephemeral_nums: - yield ephemeral_free_num - ephemeral_free_num += 1 - - -def _get_os_image_kind(os_image): - # NOTE(ft): for 'get' operation Glance image doesn't have an attribute - # if it isn't sent by Glance. But Glance doesn't send null-value - # attributes, and the attributes above are null for volume-backed images. - if not hasattr(os_image, 'container_format'): - return 'ami' - return CONTAINER_TO_KIND.get(os_image.container_format, 'ami') - - -def _auto_create_image_extension(context, image, os_image): - image['is_public'] = os_image.visibility == 'public' - - -ec2utils.register_auto_create_db_item_extension( - 'ami', _auto_create_image_extension) -ec2utils.register_auto_create_db_item_extension( - 'ari', _auto_create_image_extension) -ec2utils.register_auto_create_db_item_extension( - 'aki', _auto_create_image_extension) - - -# NOTE(ft): following functions are copied from various parts of Nova - -# translate our internal state to states valid by the EC2 API documentation -_s3_image_state_map = {'downloading': 'pending', - 'failed_download': 'failed', - 'decrypting': 'pending', - 'failed_decrypt': 'failed', - 'untarring': 'pending', - 'failed_untar': 'failed', - 'uploading': 'pending', - 'failed_upload': 'failed', - 'available': 'available'} - - -def _s3_create(context, metadata): - """Gets a manifest from s3 and makes an image.""" - - # Parse the metadata into bucket and manifest path - parsed_url = parse.urlparse(metadata['image_location']) - if parsed_url.hostname is not None: - # Handle s3:/// case - bucket_name = parsed_url.hostname - manifest_path = parsed_url.path[1:] - else: - # Handle / case - bucket_name = parsed_url.path.split('/')[0] - manifest_path = '/'.join(parsed_url.path.split('/')[1:]) - - # Continue with S3 import - s3_client = _s3_conn(context) - image_location = '/'.join([bucket_name, manifest_path]) - key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path) - body = key['Body'] - if isinstance(body, str): - manifest = body - else: - # TODO(andrey-mp): check big objects - manifest = body.read() - - (image_metadata, image_parts, - encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest) - metadata.update(image_metadata) - metadata.update({'image_state': 'pending', - 'visibility': 'private'}) - - # TODO(bcwaldon): right now, this removes user-defined ids - # We need to re-enable this. - metadata.pop('id', None) - - glance = clients.glance(context) - image = glance.images.create(**metadata) - - def _update_image_state(image_state): - glance.images.update(image.id, image_state=image_state) - - def delayed_create(): - """This handles the fetching and decrypting of the part files.""" - context.update_store() - try: - image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir) - log_vars = {'image_location': image_location, - 'image_path': image_path} - - _update_image_state('downloading') - try: - parts = [] - for part_name in image_parts: - part = _s3_download_file(s3_client, bucket_name, - part_name, image_path) - parts.append(part) - - # NOTE(vish): this may be suboptimal, should we use cat? - enc_filename = os.path.join(image_path, 'image.encrypted') - with open(enc_filename, 'wb') as combined: - for filename in parts: - with open(filename, "rb") as part: - combined.write(part.read()) - - except Exception: - LOG.exception('Failed to download %(image_location)s ' - 'to %(image_path)s', log_vars) - _update_image_state('failed_download') - return - - _update_image_state('decrypting') - try: - dec_filename = os.path.join(image_path, 'image.tar.gz') - _s3_decrypt_image(context, enc_filename, encrypted_key, - encrypted_iv, dec_filename) - except Exception: - LOG.exception('Failed to decrypt %(image_location)s ' - 'to %(image_path)s', log_vars) - _update_image_state('failed_decrypt') - return - - _update_image_state('untarring') - try: - unz_filename = _s3_untarzip_image(image_path, dec_filename) - except Exception: - LOG.exception('Failed to untar %(image_location)s ' - 'to %(image_path)s', log_vars) - _update_image_state('failed_untar') - return - - _update_image_state('uploading') - try: - with open(unz_filename, "rb") as image_file: - glance.images.upload(image.id, image_file) - except Exception: - LOG.exception('Failed to upload %(image_location)s ' - 'to %(image_path)s', log_vars) - _update_image_state('failed_upload') - return - - _update_image_state('available') - - shutil.rmtree(image_path) - except glance_exception.HTTPNotFound: - LOG.info('Image %swas deleted underneath us', image.id) - except Exception: - LOG.exception('Failed to complete image %s creation', image.id) - - eventlet.spawn_n(delayed_create) - - return image - - -def _s3_parse_manifest(context, manifest): - manifest = etree.fromstring(manifest) - - try: - arch = manifest.find('machine_configuration/architecture').text - except Exception: - arch = 'x86_64' - - metadata = {'architecture': arch} - - mappings = [] - try: - block_device_mapping = manifest.findall('machine_configuration/' - 'block_device_mapping/' - 'mapping') - for bdm in block_device_mapping: - mappings.append({'virtual': bdm.find('virtual').text, - 'device': bdm.find('device').text}) - except Exception: - mappings = [] - - if mappings: - metadata['mappings'] = mappings - - def set_dependent_image_id(image_key): - try: - image_key_path = ('machine_configuration/%(image_key)s' % - {'image_key': image_key}) - image_id = manifest.find(image_key_path).text - except Exception: - return - if image_id == 'true': - return True - os_image = ec2utils.get_os_image(context, image_id) - metadata[image_key] = os_image.id - - image_format = 'ami' - if set_dependent_image_id('kernel_id'): - image_format = 'aki' - if set_dependent_image_id('ramdisk_id'): - image_format = 'ari' - - metadata.update({'disk_format': image_format, - 'container_format': image_format}) - image_parts = [ - fn_element.text - for fn_element in manifest.find('image').iter('filename')] - encrypted_key = manifest.find('image/ec2_encrypted_key').text - encrypted_iv = manifest.find('image/ec2_encrypted_iv').text - - return metadata, image_parts, encrypted_key, encrypted_iv - - -def _s3_download_file(s3_client, bucket_name, filename, local_dir): - s3_object = s3_client.get_object(Bucket=bucket_name, Key=filename) - local_filename = os.path.join(local_dir, os.path.basename(filename)) - body = s3_object['Body'] - with open(local_filename, 'wb') as f: - f.write(body.read()) - return local_filename - - -def _s3_decrypt_image(context, encrypted_filename, encrypted_key, - encrypted_iv, decrypted_filename): - encrypted_key = binascii.a2b_hex(encrypted_key) - encrypted_iv = binascii.a2b_hex(encrypted_iv) - try: - key = _decrypt_text(encrypted_key).decode() - except Exception as exc: - msg = _('Failed to decrypt private key: %s') % exc - raise exception.EC2Exception(msg) - try: - iv = _decrypt_text(encrypted_iv).decode() - except Exception as exc: - msg = _('Failed to decrypt initialization vector: %s') % exc - raise exception.EC2Exception(msg) - - try: - processutils.execute('openssl', 'enc', - '-d', '-aes-128-cbc', - '-in', '%s' % (encrypted_filename,), - '-K', '%s' % (key,), - '-iv', '%s' % (iv,), - '-out', '%s' % (decrypted_filename,)) - except processutils.ProcessExecutionError as exc: - raise exception.EC2Exception(_('Failed to decrypt image file ' - '%(image_file)s: %(err)s') % - {'image_file': encrypted_filename, - 'err': exc.stdout}) - - -def _s3_untarzip_image(path, filename): - _s3_test_for_malicious_tarball(path, filename) - tar_file = tarfile.open(filename, 'r|gz') - tar_file.extractall(path) - image_file = tar_file.getnames()[0] - tar_file.close() - return os.path.join(path, image_file) - - -def _s3_test_for_malicious_tarball(path, filename): - """Raises exception if extracting tarball would escape extract path.""" - tar_file = tarfile.open(filename, 'r|gz') - for n in tar_file.getnames(): - if not os.path.abspath(os.path.join(path, n)).startswith(path): - tar_file.close() - # TODO(ft): figure out actual AWS exception - raise exception.EC2InvalidException(_('Unsafe filenames in image')) - tar_file.close() - - -def _s3_conn(context): - region = CONF.s3_region - ec2_creds = clients.keystone(context).ec2.list(context.user_id) - - # Here we a) disable user's default config to let ec2api works independetly - # of user's local settings; - # b) specify region to be used by botocore; - # c) do not change standard botocore keys to get these settings - # from environment - connection_data = { - 'config_file': (None, 'AWS_CONFIG_FILE', None, None), - 'region': ('region', 'AWS_DEFAULT_REGION', region, None), - } - session = botocore.session.get_session(connection_data) - return session.create_client( - 's3', region_name=region, endpoint_url=CONF.s3_url, - aws_access_key_id=ec2_creds[0].access, - aws_secret_access_key=ec2_creds[0].secret, - config=botocore.config.Config(signature_version='s3v4')) - - -def _decrypt_text(text): - private_key_file = CONF.x509_root_private_key - if not private_key_file: - msg = _("Path to ca private key isn't configured") - raise exception.EC2Exception(msg) - with open(private_key_file, 'rb') as f: - data = f.read() - priv_key = serialization.load_pem_private_key( - data, None, backends.default_backend()) - return priv_key.decrypt(text, padding.PKCS1v15()) diff --git a/ec2api/api/instance.py b/ec2api/api/instance.py deleted file mode 100644 index 8748e2e6..00000000 --- a/ec2api/api/instance.py +++ /dev/null @@ -1,1716 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import collections -import copy -import itertools -import random -import time - -from novaclient import exceptions as nova_exception -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import network_interface as network_interface_api -from ec2api.api import security_group as security_group_api -from ec2api import clients -from ec2api import context as ec2_context -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - -LOG = logging.getLogger(__name__) - -ec2_opts = [ - cfg.BoolOpt('ec2_private_dns_show_ip', - default=False, - help='Return the IP address as private dns hostname in ' - 'describe instances'), - cfg.StrOpt('default_flavor', - default='m1.small', - help='A flavor to use as a default instance type') -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) - -"""Instance related API implementation -""" - - -class Validator(common.Validator): - - def i_id_or_ids(self, value): - # NOTE(ft): boto specifies an instance id to GetConsoleOutput as - # a list with the id. This is an AWS undocumented feature for all (?) - # parameters, but ec2api will support it in certain operations only. - if type(value) is list: - if len(value) != 1: - msg = ( - _("The parameter 'InstanceId' may only be specified once.") - if len(value) else - _('No instanceId specified')) - raise exception.InvalidParameterCombination(msg) - value = value[0] - self.i_id(value) - - -def get_instance_engine(): - return InstanceEngineNeutron() - - -def run_instances(context, image_id, min_count, max_count, - key_name=None, security_group_id=None, - security_group=None, user_data=None, instance_type=None, - placement=None, kernel_id=None, ramdisk_id=None, - block_device_mapping=None, monitoring=None, - subnet_id=None, disable_api_termination=None, - instance_initiated_shutdown_behavior=None, - private_ip_address=None, client_token=None, - network_interface=None, iam_instance_profile=None, - ebs_optimized=None): - - if ramdisk_id: - raise exception.InvalidAttribute(attr='ramdisk_id') - - if kernel_id: - raise exception.InvalidAttribute(attr='kernel_id') - - _check_min_max_count(min_count, max_count) - - if client_token: - reservations = describe_instances(context, - filter=[{'name': 'client-token', - 'value': [client_token]}]) - if reservations['reservationSet']: - if len(reservations['reservationSet']) > 1: - LOG.error('describe_instances has returned %s ' - 'reservations, but 1 is expected.', - len(reservations['reservationSet'])) - LOG.error('Requested instances client token: %s', client_token) - LOG.error('Result: %s', reservations) - return reservations['reservationSet'][0] - - os_image = _parse_image_parameters(context, image_id) - - nova = clients.nova(context) - os_flavor = _get_os_flavor(instance_type, nova) - - bdm = _build_block_device_mapping(context, block_device_mapping, os_image) - availability_zone = (placement or {}).get('availability_zone') - if user_data: - user_data = base64.b64decode(user_data) - - vpc_id, launch_context = instance_engine.get_vpc_and_build_launch_context( - context, security_group, - subnet_id, private_ip_address, security_group_id, network_interface, - multiple_instances=max_count > 1) - - ec2_reservation_id = _generate_reservation_id() - instance_ids = [] - with common.OnCrashCleaner() as cleaner: - # NOTE(ft): create Neutron's ports manually and run instances one - # by one to have a chance to: - # process individual network interface options like security_group - # or private_ip_addresses (Nova's create_instances receives only - # one fixed_ip for subnet) - # set dhcp options to port - # add corresponding OS ids of network interfaces to our DB - # TODO(ft): we should lock created network interfaces to prevent - # their usage or deleting - - # TODO(ft): do correct error messages on create failures. For - # example, overlimit, ip lack, ip overlapping, etc - for launch_index in range(max_count): - if launch_index >= min_count: - cleaner.approveChanges() - - extra_params = ( - instance_engine.get_launch_extra_parameters( - context, cleaner, launch_context)) - - os_instance = nova.servers.create( - '%s-%s' % (ec2_reservation_id, launch_index), - os_image.id, os_flavor, - min_count=1, max_count=1, - availability_zone=availability_zone, - block_device_mapping_v2=bdm, - key_name=key_name, userdata=user_data, - **extra_params) - cleaner.addCleanup(nova.servers.delete, os_instance.id) - - instance = {'os_id': os_instance.id, - 'vpc_id': vpc_id, - 'reservation_id': ec2_reservation_id, - 'launch_index': launch_index} - if client_token: - instance['client_token'] = client_token - if disable_api_termination: - instance['disable_api_termination'] = disable_api_termination - - instance = db_api.add_item(context, 'i', instance) - cleaner.addCleanup(db_api.delete_item, context, instance['id']) - instance_ids.append(instance['id']) - - nova.servers.update(os_instance, name=instance['id']) - - instance_engine.post_launch_action( - context, cleaner, launch_context, instance['id']) - - ec2_reservations = describe_instances(context, instance_ids) - reservation_count = len(ec2_reservations['reservationSet']) - if reservation_count != 1: - LOG.error('describe_instances has returned %s reservations, ' - 'but 1 is expected.', reservation_count) - LOG.error('Requested instances IDs: %s', instance_ids) - LOG.error('Result: %s', ec2_reservations) - return (ec2_reservations['reservationSet'][0] - if reservation_count else None) - - -def terminate_instances(context, instance_id): - instance_ids = set(instance_id) - instances = ec2utils.get_db_items(context, 'i', instance_ids) - - nova = clients.nova(context) - state_changes = [] - for instance in instances: - if instance.get('disable_api_termination'): - message = _("The instance '%s' may not be terminated. Modify its " - "'disableApiTermination' instance attribute and try " - "again.") % instance['id'] - raise exception.OperationNotPermitted(message=message) - for instance in instances: - try: - os_instance = nova.servers.get(instance['os_id']) - except nova_exception.NotFound: - os_instance = None - else: - os_instance.delete() - state_change = _format_state_change(instance, os_instance) - state_changes.append(state_change) - - # NOTE(ft): don't delete items from DB until they disappear from OS. - # They will be auto deleted by a describe operation - return {'instancesSet': state_changes} - - -class InstanceDescriber(common.TaggableItemsDescriber): - - KIND = 'i' - SORT_KEY = 'instanceId' - FILTER_MAP = { - 'availability-zone': ('placement', 'availabilityZone'), - 'block-device-mapping.delete-on-termination': [ - 'blockDeviceMapping', ('ebs', 'deleteOnTermination')], - 'block-device-mapping.device-name': ['blockDeviceMapping', - 'deviceName'], - 'block-device-mapping.status': ['blockDeviceMapping', - ('ebs', 'status')], - 'block-device-mapping.volume-id': ['blockDeviceMapping', - ('ebs', 'volumeId')], - 'client-token': 'clientToken', - 'dns-name': 'dnsName', - 'group-id': ['groupSet', 'groupId'], - 'group-name': ['groupSet', 'groupName'], - 'image-id': 'imageId', - 'instance-id': 'instanceId', - 'instance-state-code': ('instanceState', 'code'), - 'instance-state-name': ('instanceState', 'name'), - 'instance-type': 'instanceType', - 'instance.group-id': ['groupSet', 'groupId'], - 'instance.group-name': ['groupSet', 'groupName'], - 'ip-address': 'ipAddress', - 'kernel-id': 'kernelId', - 'key-name': 'keyName', - 'launch-index': 'amiLaunchIndex', - 'launch-time': 'launchTime', - 'private-dns-name': 'privateDnsName', - 'private-ip-address': 'privateIpAddress', - 'ramdisk-id': 'ramdiskId', - 'root-device-name': 'rootDeviceName', - 'root-device-type': 'rootDeviceType', - 'subnet-id': ['networkInterfaceSet', 'subnetId'], - 'vpc-id': ['networkInterfaceSet', 'vpcId'], - 'network-interface.description': ['networkInterfaceSet', - 'description'], - 'network-interface.subnet-id': ['networkInterfaceSet', 'subnetId'], - 'network-interface.vpc-id': ['networkInterfaceSet', 'vpcId'], - 'network-interface.network-interface.id': ['networkInterfaceSet', - 'networkInterfaceId'], - 'network-interface.owner-id': ['networkInterfaceSet', 'ownerId'], - 'network-interface.requester-managed': ['networkInterfaceSet', - 'requesterManaged'], - 'network-interface.status': ['networkInterfaceSet', 'status'], - 'network-interface.mac-address': ['networkInterfaceSet', - 'macAddress'], - 'network-interface.source-destination-check': ['networkInterfaceSet', - 'sourceDestCheck'], - 'network-interface.group-id': ['networkInterfaceSet', - ['groupSet', 'groupId']], - 'network-interface.group-name': ['networkInterfaceSet', - ['groupSet', 'groupName']], - 'network-interface.attachment.attachment-id': - ['networkInterfaceSet', ('attachment', 'attachmentId')], - 'network-interface.attachment.instance-id': 'instanceId', - 'network-interface.addresses.private-ip-address': - ['networkInterfaceSet', ['privateIpAddressesSet', - 'privateIpAddress']], - 'network-interface.attachment.device-index': - ['networkInterfaceSet', ('attachment', 'deviceIndex')], - 'network-interface.attachment.status': - ['networkInterfaceSet', ('attachment', 'status')], - 'network-interface.attachment.attach-time': - ['networkInterfaceSet', ('attachment', 'attachTime')], - 'network-interface.attachment.delete-on-termination': - ['networkInterfaceSet', ('attachment', 'deleteOnTermination')], - 'network-interface.addresses.primary': - ['networkInterfaceSet', ['privateIpAddressesSet', 'primary']], - 'network-interface.addresses.association.public-ip': - ['networkInterfaceSet', ['privateIpAddressesSet', - ('association', 'publicIp')]], - 'network-interface.addresses.association.ip-owner-id': - ['networkInterfaceSet', ['privateIpAddressesSet', - ('association', 'ipOwnerId')]], - 'association.public-ip': ['networkInterfaceSet', - ('association', 'publicIp')], - 'association.ip-owner-id': ['networkInterfaceSet', - ('association', 'ipOwnerId')]} - - def __init__(self): - super(InstanceDescriber, self).__init__() - self.reservations = {} - self.reservation_instances = collections.defaultdict(list) - self.reservation_groups = {} - self.obsolete_instances = [] - - def format(self, instance, os_instance): - formatted_instance = _format_instance( - self.context, instance, os_instance, - self.ec2_network_interfaces.get(instance['id']), - self.image_ids, self.volumes, self.os_volumes, - self.os_flavors, self.groups_name_to_id) - - reservation_id = instance['reservation_id'] - if reservation_id in self.reservations: - reservation = self.reservations[reservation_id] - else: - reservation = {'id': reservation_id, - 'owner_id': os_instance.tenant_id} - self.reservations[reservation_id] = reservation - if not instance['vpc_id']: - self.reservation_groups[reservation_id] = ( - formatted_instance.get('groupSet')) - - self.reservation_instances[ - reservation['id']].append(formatted_instance) - - return formatted_instance - - def get_db_items(self): - instances = super(InstanceDescriber, self).get_db_items() - self.ec2_network_interfaces = ( - instance_engine.get_ec2_network_interfaces( - self.context, self.ids)) - self.groups_name_to_id = _get_groups_name_to_id(self.context) - self.volumes = {v['os_id']: v - for v in db_api.get_items(self.context, 'vol')} - self.image_ids = {i['os_id']: i['id'] - for i in itertools.chain( - db_api.get_items(self.context, 'ami'), - db_api.get_public_items(self.context, 'ami'))} - return instances - - def get_os_items(self): - self.os_volumes = _get_os_volumes(self.context) - self.os_flavors = _get_os_flavors(self.context) - nova = clients.nova(ec2_context.get_os_admin_context()) - if len(self.ids) == 1 and len(self.items) == 1: - try: - return [nova.servers.get(self.items[0]['os_id'])] - except nova_exception.NotFound: - return [] - else: - return nova.servers.list( - search_opts={'all_tenants': True, - 'project_id': self.context.project_id}) - - def auto_update_db(self, instance, os_instance): - if not instance: - instance = ec2utils.get_db_item_by_os_id( - self.context, 'i', os_instance.id, - os_instance=os_instance) - return instance - - def get_name(self, os_item): - return '' - - def delete_obsolete_item(self, instance): - self.obsolete_instances.append(instance) - - -class ReservationDescriber(common.NonOpenstackItemsDescriber): - - KIND = 'r' - FILTER_MAP = { - 'reservation-id': 'reservationId', - 'owner-id': 'ownerId', - 'network-interface.attachment.instance-owner-id': 'ownerId', - } - - def format(self, reservation): - formatted_instances = [i for i in self.instances[reservation['id']] - if i['instanceId'] in self.suitable_instances] - if not formatted_instances: - return None - return _format_reservation(self.context, reservation, - formatted_instances, - self.groups.get(reservation['id'], [])) - - def get_db_items(self): - return self.reservations - - def describe(self, context, ids=None, names=None, filter=None, - max_results=None, next_token=None): - reservation_filters = [] - instance_filters = [] - for f in filter or []: - if f.get('name') in self.FILTER_MAP: - reservation_filters.append(f) - else: - instance_filters.append(f) - # NOTE(ft): set empty filter sets to None because Describer - # requires None for no filter case - if not instance_filters: - instance_filters = None - if not reservation_filters: - reservation_filters = None - - try: - instance_describer = InstanceDescriber() - formatted_instances = instance_describer.describe( - context, ids=ids, filter=instance_filters, - max_results=max_results, next_token=next_token) - except exception.InvalidInstanceIDNotFound: - _remove_instances(context, instance_describer.obsolete_instances) - raise - - _remove_instances(context, instance_describer.obsolete_instances) - - self.reservations = instance_describer.reservations.values() - self.instances = instance_describer.reservation_instances - self.groups = instance_describer.reservation_groups - self.suitable_instances = set(i['instanceId'] - for i in formatted_instances) - - result = super(ReservationDescriber, self).describe( - context, filter=reservation_filters) - self.next_token = instance_describer.next_token - return result - - -def describe_instances(context, instance_id=None, filter=None, - max_results=None, next_token=None): - if instance_id and max_results: - msg = _('The parameter instancesSet cannot be used with the parameter ' - 'maxResults') - raise exception.InvalidParameterCombination(msg) - - reservation_describer = ReservationDescriber() - formatted_reservations = reservation_describer.describe( - context, ids=instance_id, filter=filter, - max_results=max_results, next_token=next_token) - - result = {'reservationSet': formatted_reservations} - if reservation_describer.next_token: - result['nextToken'] = reservation_describer.next_token - return result - - -def reboot_instances(context, instance_id): - return _foreach_instance(context, instance_id, - (vm_states_ALLOW_SOFT_REBOOT + - vm_states_ALLOW_HARD_REBOOT), - lambda instance: instance.reboot()) - - -def stop_instances(context, instance_id, force=False): - return _foreach_instance(context, instance_id, - [vm_states_ACTIVE, vm_states_RESCUED, - vm_states_ERROR], - lambda instance: instance.stop()) - - -def start_instances(context, instance_id): - return _foreach_instance(context, instance_id, [vm_states_STOPPED], - lambda instance: instance.start()) - - -def get_password_data(context, instance_id): - if type(instance_id) is list: - instance_id = instance_id[0] - instance = ec2utils.get_db_item(context, instance_id) - nova = clients.nova(context) - os_instance = nova.servers.get(instance['os_id']) - password = os_instance.get_password() - # NOTE(vish): this should be timestamp from the metadata fields - # but it isn't important enough to implement properly - now = timeutils.utcnow() - return {"instanceId": instance_id, - "timestamp": now, - "passwordData": base64.b64encode(password.encode())} - - -def get_console_output(context, instance_id): - if type(instance_id) is list: - instance_id = instance_id[0] - instance = ec2utils.get_db_item(context, instance_id) - nova = clients.nova(context) - os_instance = nova.servers.get(instance['os_id']) - console_output = os_instance.get_console_output() - now = timeutils.utcnow() - return {"instanceId": instance_id, - "timestamp": now, - "output": base64.b64encode(console_output.encode())} - - -def describe_instance_attribute(context, instance_id, attribute): - instance = ec2utils.get_db_item(context, instance_id) - nova = clients.nova(ec2_context.get_os_admin_context()) - os_instance = nova.servers.get(instance['os_id']) - - def _format_attr_block_device_mapping(result): - # TODO(ft): next call add 'rootDeviceType' to result, - # but AWS doesn't. This is legacy behavior of Nova EC2 - _cloud_format_instance_bdm(context, os_instance, result) - - def _format_source_dest_check(result): - if not instance.get('vpc_id'): - raise exception.InvalidParameterCombination( - _('You may only describe the sourceDestCheck attribute for ' - 'VPC instances')) - enis = network_interface_api.describe_network_interfaces( - context, filter=[{'name': 'attachment.instance-id', - 'value': [instance_id]}] - )['networkInterfaceSet'] - if len(enis) != 1: - raise exception.InvalidInstanceId(instance_id=instance_id) - result['sourceDestCheck'] = {'value': enis[0]['sourceDestCheck']} - - def _format_attr_group_set(result): - if instance.get('vpc_id'): - enis = network_interface_api.describe_network_interfaces( - context, filter=[{'name': 'attachment.instance-id', - 'value': [instance_id]}] - )['networkInterfaceSet'] - if len(enis) != 1: - raise exception.InvalidInstanceId(instance_id=instance_id) - result['groupSet'] = enis[0]['groupSet'] - else: - groups = _get_groups_name_to_id(context) - result['groupSet'] = _format_group_set( - context, getattr(os_instance, 'security_groups', []), groups) - - def _format_attr_instance_type(result): - result['instanceType'] = { - 'value': _cloud_format_instance_type(context, os_instance)} - - def _format_attr_kernel(result): - value = _cloud_format_kernel_id(context, os_instance) - result['kernel'] = {'value': value} - - def _format_attr_ramdisk(result): - value = _cloud_format_ramdisk_id(context, os_instance) - result['ramdisk'] = {'value': value} - - def _format_attr_root_device_name(result): - result['rootDeviceName'] = { - 'value': getattr(os_instance, - 'OS-EXT-SRV-ATTR:root_device_name', None)} - - def _format_attr_user_data(result): - user_data = getattr(os_instance, 'OS-EXT-SRV-ATTR:user_data', None) - if user_data: - result['userData'] = {'value': user_data} - - def _format_attr_disable_api_termination(result): - result['disableApiTermination'] = { - 'value': instance.get('disable_api_termination', False)} - - attribute_formatter = { - 'blockDeviceMapping': _format_attr_block_device_mapping, - 'disableApiTermination': _format_attr_disable_api_termination, - 'groupSet': _format_attr_group_set, - 'sourceDestCheck': _format_source_dest_check, - 'instanceType': _format_attr_instance_type, - 'kernel': _format_attr_kernel, - 'ramdisk': _format_attr_ramdisk, - 'rootDeviceName': _format_attr_root_device_name, - 'userData': _format_attr_user_data, - } - - fn = attribute_formatter.get(attribute) - if fn is None: - raise exception.InvalidParameterValue(value=attribute, - parameter='attribute', - reason='Unknown attribute.') - - result = {'instanceId': instance_id} - fn(result) - return result - - -def modify_instance_attribute(context, instance_id, attribute=None, - value=None, source_dest_check=None, - block_device_mapping=None, - disable_api_termination=None, - ebs_optimized=None, group_id=None, - instance_initiated_shutdown_behavior=None, - instance_type=None, kernel=None, - ramdisk=None, sriov_net_support=None, - user_data=None): - # NOTE(andrey-mp): other parameters can be added in same way - - if attribute is not None: - if attribute == 'disableApiTermination': - if disable_api_termination is not None: - raise exception.InvalidParameterCombination() - elif attribute == 'sourceDestCheck': - if source_dest_check is not None: - raise exception.InvalidParameterCombination() - elif attribute == 'instanceType': - if instance_type is not None: - raise exception.InvalidParameterCombination() - else: - raise exception.InvalidParameterValue(value=attribute, - parameter='attribute', - reason='Unknown attribute.') - if value is None: - raise exception.MissingParameter(param='value') - - params_count = ( - int(source_dest_check is not None) + - int(group_id is not None) + int(instance_type is not None) + - int(disable_api_termination is not None)) - if (params_count > 1 or - (attribute is not None and params_count == 1) or - (params_count == 0 and attribute is None)): - raise exception.InvalidParameterCombination() - - if attribute == 'disableApiTermination': - disable_api_termination = value - elif attribute == 'sourceDestCheck': - source_dest_check = value - elif attribute == 'instanceType': - instance_type = value - - instance = ec2utils.get_db_item(context, instance_id) - if disable_api_termination is not None: - instance['disable_api_termination'] = value - db_api.update_item(context, instance) - return True - elif group_id is not None: - _modify_group(context, instance, group_id) - return True - elif source_dest_check is not None: - _modify_source_dest_check(context, instance, source_dest_check) - return True - elif instance_type: - _modify_instance_type(context, instance, instance_type) - return True - - raise exception.InvalidParameterCombination() - - -def _modify_group(context, instance, group_id): - if not instance.get('vpc_id'): - raise exception.InvalidParameterCombination( - _('You may only modify the groupSet attribute for VPC instances')) - enis = network_interface_api.describe_network_interfaces( - context, filter=[{'name': 'attachment.instance-id', - 'value': [instance['id']]}] - )['networkInterfaceSet'] - if len(enis) != 1: - raise exception.InvalidInstanceId(instance_id=instance['id']) - network_interface_api.modify_network_interface_attribute( - context, enis[0]['networkInterfaceId'], security_group_id=group_id) - - -def _modify_source_dest_check(context, instance, source_dest_check): - if not instance.get('vpc_id'): - raise exception.InvalidParameterCombination( - _('You may only modify the sourceDestCheck attribute for ' - 'VPC instances')) - enis = network_interface_api.describe_network_interfaces( - context, filter=[{'name': 'attachment.instance-id', - 'value': [instance['id']]}] - )['networkInterfaceSet'] - if len(enis) != 1: - raise exception.InvalidInstanceId(instance_id=instance['id']) - network_interface_api.modify_network_interface_attribute( - context, enis[0]['networkInterfaceId'], - source_dest_check=source_dest_check) - - -def _modify_instance_type(context, instance, instance_type): - nova = clients.nova(context) - os_instance = nova.servers.get(instance['os_id']) - os_flavor = _get_os_flavor(instance_type, nova) - vm_state = getattr(os_instance, 'OS-EXT-STS:vm_state') - if vm_state != vm_states_STOPPED: - msg = (_("The instance %s is not in the 'stopped' state.") - % instance['id']) - raise exception.IncorrectInstanceState(message=msg) - - if os_instance.flavor['id'] == os_flavor.id: - return True - - os_instance.resize(os_flavor) - # NOTE(andrey-mp): if this operation will be too long (more than - # timeout) then we can add more code. For example: - # 1. current code returns HTTP 500 code if time is out. client retries - # query. code can detect that resizing in progress and wait again. - # 2. make this operation async by some way... - for dummy in range(45): - os_instance = nova.servers.get(os_instance) - vm_state = getattr(os_instance, 'OS-EXT-STS:vm_state') - if vm_state == vm_states_RESIZED: - break - time.sleep(1) - os_instance = nova.servers.get(os_instance) - vm_state = getattr(os_instance, 'OS-EXT-STS:vm_state') - if vm_state != vm_states_RESIZED: - raise exception.EC2APIException( - message=_('Time is out for instance resizing')) - os_instance.confirm_resize() - for dummy in range(15): - os_instance = nova.servers.get(os_instance) - vm_state = getattr(os_instance, 'OS-EXT-STS:vm_state') - if vm_state != vm_states_RESIZED: - break - time.sleep(1) - - -def reset_instance_attribute(context, instance_id, attribute): - if attribute == 'sourceDestCheck': - instance = ec2utils.get_db_item(context, instance_id) - _modify_source_dest_check(context, instance, True) - return True - - raise exception.InvalidParameterValue(value=attribute, - parameter='attribute', - reason='Unknown attribute.') - - -def _format_reservation(context, reservation, formatted_instances, groups): - return { - 'reservationId': reservation['id'], - 'ownerId': reservation['owner_id'], - 'instancesSet': sorted(formatted_instances, - key=lambda i: i['amiLaunchIndex']), - 'groupSet': groups - } - - -def _format_instance(context, instance, os_instance, ec2_network_interfaces, - image_ids, volumes, os_volumes, os_flavors, - groups_name_to_id): - ec2_instance = { - 'amiLaunchIndex': instance['launch_index'], - 'imageId': (ec2utils.os_id_to_ec2_id(context, 'ami', - os_instance.image['id'], - ids_by_os_id=image_ids) - if os_instance.image else None), - 'instanceId': instance['id'], - 'instanceType': os_flavors.get(os_instance.flavor['id'], 'unknown'), - 'keyName': os_instance.key_name, - 'launchTime': os_instance.created, - 'placement': { - 'availabilityZone': getattr(os_instance, - 'OS-EXT-AZ:availability_zone')}, - 'productCodesSet': None, - 'instanceState': _cloud_state_description( - getattr(os_instance, 'OS-EXT-STS:vm_state')), - } - root_device_name = getattr(os_instance, - 'OS-EXT-SRV-ATTR:root_device_name', None) - if root_device_name: - ec2_instance['rootDeviceName'] = root_device_name - _cloud_format_instance_bdm(context, os_instance, ec2_instance, - volumes, os_volumes) - kernel_id = _cloud_format_kernel_id(context, os_instance, image_ids) - if kernel_id: - ec2_instance['kernelId'] = kernel_id - ramdisk_id = _cloud_format_ramdisk_id(context, os_instance, image_ids) - if ramdisk_id: - ec2_instance['ramdiskId'] = ramdisk_id - - if 'client_token' in instance: - ec2_instance['clientToken'] = instance['client_token'] - - if not ec2_network_interfaces: - fixed_ip, fixed_ip6, floating_ip = ( - _get_ip_info_for_instance(os_instance)) - if fixed_ip6: - ec2_instance['dnsNameV6'] = fixed_ip6 - dns_name = floating_ip - if getattr(os_instance, 'security_groups', None): - ec2_instance['groupSet'] = _format_group_set( - context, os_instance.security_groups, groups_name_to_id) - else: - primary_ec2_network_interface = None - for ec2_network_interface in ec2_network_interfaces: - ec2_network_interface['attachment'].pop('instanceId') - ec2_network_interface['attachment'].pop('instanceOwnerId') - ec2_network_interface.pop('tagSet') - ec2_addresses = ec2_network_interface['privateIpAddressesSet'] - for ec2_address in ec2_addresses: - association = ec2_address.get('association') - if association: - association.pop('associationId') - association.pop('allocationId') - association = ec2_network_interface.get('association') - if association: - association.pop('associationId', None) - association.pop('allocationId', None) - if ec2_network_interface['attachment']['deviceIndex'] == 0: - primary_ec2_network_interface = ec2_network_interface - ec2_instance.update({'vpcId': ec2_network_interface['vpcId'], - 'networkInterfaceSet': ec2_network_interfaces}) - fixed_ip = floating_ip = dns_name = None - if primary_ec2_network_interface: - ec2_instance.update({ - 'subnetId': primary_ec2_network_interface['subnetId'], - 'groupSet': primary_ec2_network_interface['groupSet'], - 'sourceDestCheck': - primary_ec2_network_interface['sourceDestCheck']}) - fixed_ip = primary_ec2_network_interface['privateIpAddress'] - if 'association' in primary_ec2_network_interface: - association = primary_ec2_network_interface['association'] - floating_ip = association['publicIp'] - dns_name = association['publicDnsName'] - ec2_instance.update({ - 'privateIpAddress': fixed_ip, - 'privateDnsName': (fixed_ip if CONF.ec2_private_dns_show_ip else - getattr(os_instance, 'OS-EXT-SRV-ATTR:hostname', - None)), - 'dnsName': dns_name, - }) - if floating_ip is not None: - ec2_instance['ipAddress'] = floating_ip - - if context.is_admin: - ec2_instance['keyName'] = '%s (%s, %s)' % ( - ec2_instance['keyName'], - os_instance.tenant_id, - getattr(os_instance, 'OS-EXT-SRV-ATTR:host')) - return ec2_instance - - -def _format_state_change(instance, os_instance): - if os_instance: - prev_state = _cloud_state_description(getattr(os_instance, - 'OS-EXT-STS:vm_state')) - try: - os_instance.get() - curr_state = _cloud_state_description( - getattr(os_instance, 'OS-EXT-STS:vm_state')) - except nova_exception.NotFound: - curr_state = _cloud_state_description(vm_states_WIPED_OUT) - else: - prev_state = curr_state = _cloud_state_description(vm_states_WIPED_OUT) - return { - 'instanceId': instance['id'], - 'previousState': prev_state, - 'currentState': curr_state, - } - - -def _remove_instances(context, instances): - if not instances: - return - ids = set([i['id'] for i in instances]) - network_interfaces = collections.defaultdict(list) - - # TODO(ft): implement search db items by os_id in DB layer - for eni in db_api.get_items(context, 'eni'): - if 'instance_id' in eni and eni['instance_id'] in ids: - network_interfaces[eni['instance_id']].append(eni) - - for instance_id in ids: - for eni in network_interfaces[instance_id]: - delete_on_termination = eni['delete_on_termination'] - network_interface_api._detach_network_interface_item(context, - eni) - if delete_on_termination: - network_interface_api.delete_network_interface(context, - eni['id']) - db_api.delete_item(context, instance_id) - - -def _check_min_max_count(min_count, max_count): - if min_count < 1: - msg = _('Minimum instance count must be greater than zero') - raise exception.InvalidParameterValue(msg) - elif max_count < 1: - msg = _('Maximum instance count must be greater than zero') - raise exception.InvalidParameterValue(msg) - elif min_count > max_count: - msg = _('Maximum instance count must not be smaller than ' - 'minimum instance count') - raise exception.InvalidParameterValue(msg) - - -def _parse_image_parameters(context, image_id): - os_image = ec2utils.get_os_image(context, image_id) - - if _cloud_get_image_state(os_image) != 'available': - # TODO(ft): Change the message with the real AWS message - msg = _('Image must be available') - raise exception.InvalidAMIIDUnavailable(message=msg) - - return os_image - - -def _parse_block_device_mapping(context, block_device_mapping): - # TODO(ft): check block_device_mapping structure - # TODO(ft): support virtual devices - # TODO(ft): support no_device - bdms = [] - for args_bd in (block_device_mapping or []): - bdm = { - 'device_name': args_bd['device_name'], - 'destination_type': 'volume', - } - - ebs = args_bd.get('ebs') - if ebs: - ec2_id = ebs.get('snapshot_id') - if ec2_id: - if ec2_id.startswith('snap-'): - bdm['source_type'] = 'snapshot' - snapshot = ec2utils.get_db_item(context, ec2_id) - bdm['snapshot_id'] = snapshot['os_id'] - # NOTE(ft): OpenStack extension, AWS incompatibility - elif ec2_id.startswith('vol-'): - bdm['source_type'] = 'volume' - volume = ec2utils.get_db_item(context, ec2_id) - bdm['volume_id'] = volume['os_id'] - else: - # NOTE(ft): AWS returns undocumented - # InvalidSnapshotID.NotFound - raise exception.InvalidSnapshotIDMalformed( - snapshot_id=ec2_id) - if 'volume_size' in ebs: - bdm['volume_size'] = ebs['volume_size'] - if 'delete_on_termination' in ebs: - bdm['delete_on_termination'] = ebs['delete_on_termination'] - - # substitute a previous bdm which has the same device name - short_device_name = ec2utils.block_device_strip_dev(bdm['device_name']) - first_bdm, index = next( - ((m, i) for i, m in enumerate(bdms) - if (ec2utils.block_device_strip_dev(m['device_name']) == - short_device_name)), - (None, None)) - if first_bdm: - if bdm['device_name'] == first_bdm['device_name']: - bdms.pop(index) - else: - msg = _("The device '%s' is used in more than one " - "block-device mapping") % short_device_name - raise exception.InvalidBlockDeviceMapping(msg) - - bdms.append(bdm) - - return bdms - - -def _build_block_device_mapping(context, block_device_mapping, os_image): - mappings = _parse_block_device_mapping(context, block_device_mapping) - properties = ec2utils.deserialize_os_image_properties(os_image) - image_bdms = ec2utils.get_os_image_mappings(properties) - root_device_name = ( - ec2utils.block_device_properties_root_device_name(properties)) - short_root_device_name = ec2utils.block_device_strip_dev(root_device_name) - - # build a dict of image bmds to make the merge easier - # set some default values to a root bdm to simplify checks in mapping loop - image_bdm_dict = {} - for bdm in image_bdms: - if bdm.get('device_name'): - key = ec2utils.block_device_strip_dev(bdm['device_name']) - if key == short_root_device_name: - bdm.setdefault('boot_index', 0) - elif bdm.get('boot_index') == 0: - key = short_root_device_name - bdm.setdefault('device_name', root_device_name) - else: - continue - image_bdm_dict[key] = bdm - result = [] - - # convert mappings to be ready to pass in nova.servers.create - # and merge to them a corresponding image bdm if existing - # (because Nova only supports an overloading, but not the merging) - for bdm in mappings: - short_device_name = ec2utils.block_device_strip_dev(bdm['device_name']) - if short_device_name not in image_bdm_dict: - _populate_parsed_bdm_parameter(bdm, short_root_device_name) - else: - image_bdm = image_bdm_dict[short_device_name] - if bdm['device_name'] != image_bdm['device_name']: - raise exception.InvalidBlockDeviceMapping( - _("The device '%s' is used in more than one " - "block-device mapping") % short_device_name) - if (image_bdm.get('boot_index') == 0 and 'snapshot_id' in bdm and - bdm['snapshot_id'] != image_bdm.get('snapshot_id')): - raise exception.InvalidBlockDeviceMapping( - _('snapshotId cannot be modified on root device')) - if ('volume_size' in bdm and 'volume_size' in image_bdm and - bdm['volume_size'] < image_bdm['volume_size']): - raise exception.InvalidBlockDeviceMapping( - _("Volume of size %(bdm_size)dGB is smaller than expected " - "size %(image_bdm_size)dGB for '(device_name)s'") % - {'bdm_size': bdm['volume_size'], - 'image_bdm_size': image_bdm['volume_size'], - 'device_name': bdm['device_name']}) - - if bdm.get('snapshot_id'): - if 'snapshot_id' not in image_bdm: - raise exception.InvalidBlockDeviceMapping( - _('snapshotId can only be modified on EBS devices')) - - _populate_parsed_bdm_parameter(bdm, short_root_device_name) - else: - image_bdm = {k: v for k, v in image_bdm.items() - if v is not None} - image_bdm.update(bdm) - bdm = image_bdm - - # move source id to nova.servers.create related parameter - # NOTE(ft): safely extract source id, because we do not validate - # v2 image bdm, thus the bdm may be invalid and do not contain - # mandatory keys - source_type = bdm.get('source_type') - if source_type and source_type != 'blank': - uuid = bdm.pop('_'.join([source_type, 'id']), None) - bdm['uuid'] = uuid - - result.append(bdm) - - return result - - -def _populate_parsed_bdm_parameter(bdm, short_root_device_name): - bdm.setdefault('delete_on_termination', True) - bdm.setdefault('source_type', 'blank') - if (short_root_device_name == - ec2utils.block_device_strip_dev(bdm['device_name'])): - bdm['boot_index'] = 0 - else: - bdm['boot_index'] = -1 - - -def _format_group_set(context, os_security_groups, groups): - if not os_security_groups: - return [] - return [{'groupName': sg['name'], - 'groupId': groups[sg['name']]} - for sg in os_security_groups - if sg['name'] in groups] - - -def _get_groups_name_to_id(context): - # TODO(andrey-mp): remove filtering by vpcId=None when fitering - # by None will be implemented - return {g['groupName']: g['groupId'] - for g in (security_group_api.describe_security_groups(context) - ['securityGroupInfo']) - if not g.get('vpcId')} - - -def _get_ip_info_for_instance(os_instance): - addresses = list(itertools.chain(*os_instance.addresses.values())) - fixed_ip = next((addr['addr'] for addr in addresses - if (addr['version'] == 4 and - addr['OS-EXT-IPS:type'] == 'fixed')), None) - fixed_ip6 = next((addr['addr'] for addr in addresses - if (addr['version'] == 6 and - addr['OS-EXT-IPS:type'] == 'fixed')), None) - floating_ip = next((addr['addr'] for addr in addresses - if addr['OS-EXT-IPS:type'] == 'floating'), None) - return fixed_ip, fixed_ip6, floating_ip - - -def _foreach_instance(context, instance_ids, valid_states, func): - instances = ec2utils.get_db_items(context, 'i', instance_ids) - os_instances = _get_os_instances_by_instances(context, instances, - exactly=True) - for os_instance in os_instances: - if getattr(os_instance, 'OS-EXT-STS:vm_state') not in valid_states: - raise exception.IncorrectInstanceState( - instance_id=next(inst['id'] for inst in instances - if inst['os_id'] == os_instance.id)) - for os_instance in os_instances: - func(os_instance) - return True - - -def _get_os_instances_by_instances(context, instances, exactly=False, - nova=None): - nova = nova or clients.nova(context) - os_instances = [] - obsolete_instances = [] - for instance in instances: - try: - os_instances.append(nova.servers.get(instance['os_id'])) - except nova_exception.NotFound: - obsolete_instances.append(instance) - if obsolete_instances: - _remove_instances(context, obsolete_instances) - if exactly: - raise exception.InvalidInstanceIDNotFound( - id=obsolete_instances[0]['id']) - - return os_instances - - -def _get_os_flavors(context): - os_flavors = clients.nova(context).flavors.list() - return dict((f.id, f.name) for f in os_flavors) - - -def _get_os_volumes(context): - search_opts = ({'all_tenants': True, - 'project_id': context.project_id} - if context.is_os_admin else None) - os_volumes = collections.defaultdict(list) - cinder = clients.cinder(context) - for os_volume in cinder.volumes.list(search_opts=search_opts): - os_attachment = next(iter(os_volume.attachments), {}) - os_instance_id = os_attachment.get('server_id') - if os_instance_id: - os_volumes[os_instance_id].append(os_volume) - return os_volumes - - -def _get_os_flavor(instance_type, nova): - try: - if instance_type is None: - instance_type = CONF.default_flavor - os_flavor = next(f for f in nova.flavors.list() - if f.name == instance_type) - except StopIteration: - raise exception.InvalidParameterValue(value=instance_type, - parameter='InstanceType') - return os_flavor - - -def _is_ebs_instance(context, os_instance_id): - nova = clients.nova(ec2_context.get_os_admin_context()) - os_instance = nova.servers.get(os_instance_id) - root_device_name = getattr(os_instance, - 'OS-EXT-SRV-ATTR:root_device_name', None) - if not root_device_name: - return False - root_device_short_name = ec2utils.block_device_strip_dev( - root_device_name) - if root_device_name == root_device_short_name: - root_device_name = ec2utils.block_device_prepend_dev( - root_device_name) - for os_volume in _get_os_volumes(context)[os_instance_id]: - os_attachment = next(iter(os_volume.attachments), {}) - device_name = os_attachment.get('device') - if (device_name == root_device_name or - device_name == root_device_short_name): - return True - return False - - -def _generate_reservation_id(): - return _utils_generate_uid('r') - - -class InstanceEngineNeutron(object): - - def get_vpc_and_build_launch_context( - self, context, security_group, - subnet_id, private_ip_address, security_group_id, - network_interface, multiple_instances): - # TODO(ft): support auto_assign_floating_ip - - (security_group, - vpc_network_parameters) = self.merge_network_interface_parameters( - context, security_group, - subnet_id, private_ip_address, security_group_id, - network_interface) - - self.check_network_interface_parameters(vpc_network_parameters, - multiple_instances) - - (vpc_id, network_data) = self.parse_network_interface_parameters( - context, vpc_network_parameters) - launch_context = {'vpc_id': vpc_id, - 'network_data': network_data, - 'security_groups': security_group} - - # NOTE(ft): workaround for Launchpad Bug #1384347 in Icehouse - if not security_group and vpc_network_parameters: - launch_context['security_groups'] = ( - self.get_vpc_default_security_group_id(context, vpc_id)) - - if not vpc_id: - neutron = clients.neutron(context) - launch_context['ec2_classic_nics'] = [ - {'net-id': self.get_ec2_classic_os_network(context, - neutron)['id']}] - - return vpc_id, launch_context - - def get_launch_extra_parameters(self, context, cleaner, launch_context): - if 'ec2_classic_nics' in launch_context: - nics = launch_context['ec2_classic_nics'] - else: - network_data = launch_context['network_data'] - self.create_network_interfaces(context, cleaner, network_data) - nics = [{'port-id': data['network_interface']['os_id']} - for data in network_data] - return {'security_groups': launch_context['security_groups'], - 'nics': nics} - - def post_launch_action(self, context, cleaner, launch_context, - instance_id): - for data in launch_context['network_data']: - # TODO(ft): implement update items in DB layer to prevent - # record by record modification - # Alternatively a create_network_interface sub-function can - # set attach_time at once - network_interface_api._attach_network_interface_item( - context, data['network_interface'], instance_id, - data['device_index'], - delete_on_termination=data['delete_on_termination']) - cleaner.addCleanup( - network_interface_api._detach_network_interface_item, - context, data['network_interface']) - - def get_ec2_network_interfaces(self, context, instance_ids=None): - # NOTE(ft): we would be glad to use filters with this describe - # operation, but: - # 1. A selective filter by network interface IDs is improper because - # it leads to rising NotFound exception if at least one of specified - # network interfaces is obsolete. This is the legal case of describing - # an instance after its terminating. - # 2. A general filter by instance ID is unsupported now. - # 3. A general filter by network interface IDs leads to additional - # call of DB here to get corresponding network interfaces, but doesn't - # lead to decrease DB and OS throughtput in called describe operation. - enis = network_interface_api.describe_network_interfaces( - context)['networkInterfaceSet'] - ec2_network_interfaces = collections.defaultdict(list) - for eni in enis: - if (eni['status'] == 'in-use' and - (not instance_ids or - eni['attachment']['instanceId'] in instance_ids)): - ec2_network_interfaces[ - eni['attachment']['instanceId']].append(eni) - return ec2_network_interfaces - - def merge_network_interface_parameters(self, - context, - security_group_names, - subnet_id, - private_ip_address, - security_group_ids, - network_interfaces): - - if ((subnet_id or private_ip_address or security_group_ids or - security_group_names) and network_interfaces): - msg = _(' Network interfaces and an instance-level subnet ID or ' - 'private IP address or security groups may not be ' - 'specified on the same request') - raise exception.InvalidParameterCombination(msg) - - if network_interfaces: - if (CONF.disable_ec2_classic and - len(network_interfaces) == 1 and - # NOTE(tikitavi): the case in AWS CLI when security_group_ids - # and/or private_ip_address parameters are set with - # network_interface parameter having - # associate_public_ip_address setting - # private_ip_address and security_group_ids in that case - # go to network_interface parameter - 'associate_public_ip_address' in network_interfaces[0] and - 'device_index' in network_interfaces[0] and - network_interfaces[0]['device_index'] == 0 and - ('subnet_id' not in network_interfaces[0] or - 'network_interface_id' not in network_interfaces[0])): - - subnet_id = self.get_default_subnet(context)['id'] - network_interfaces[0]['subnet_id'] = subnet_id - return None, network_interfaces - elif subnet_id: - if security_group_names: - msg = _('The parameter groupName cannot be used with ' - 'the parameter subnet') - raise exception.InvalidParameterCombination(msg) - param = {'device_index': 0, - 'subnet_id': subnet_id} - if private_ip_address: - param['private_ip_address'] = private_ip_address - if security_group_ids: - param['security_group_id'] = security_group_ids - return None, [param] - elif CONF.disable_ec2_classic: - subnet_id = self.get_default_subnet(context)['id'] - param = {'device_index': 0, - 'subnet_id': subnet_id} - if security_group_ids or security_group_names: - security_group_id = security_group_ids or [] - if security_group_names: - security_groups = ( - security_group_api.describe_security_groups( - context, group_name=security_group_names) - ['securityGroupInfo']) - security_group_id.extend(sg['groupId'] - for sg in security_groups) - - param['security_group_id'] = security_group_id - if private_ip_address: - param['private_ip_address'] = private_ip_address - return None, [param] - elif private_ip_address: - msg = _('Specifying an IP address is only valid for VPC instances ' - 'and thus requires a subnet in which to launch') - raise exception.InvalidParameterCombination(msg) - elif security_group_ids: - msg = _('VPC security groups may not be used for a non-VPC launch') - raise exception.InvalidParameterCombination(msg) - else: - return security_group_names, [] - - def get_default_subnet(self, context): - default_vpc = ec2utils.get_default_vpc(context) - subnet = next( - (subnet for subnet in db_api.get_items(context, 'subnet') - if subnet['vpc_id'] == default_vpc['id']), None) - if not subnet: - raise exception.MissingInput( - _("No subnets found for the default VPC '%s'. " - "Please specify a subnet.") % default_vpc['id']) - return subnet - - def check_network_interface_parameters(self, params, multiple_instances): - # NOTE(ft): we ignore associate_public_ip_address - device_indexes = set() - for param in params: - if 'device_index' not in param: - msg = _('Each network interface requires a device index.') - raise exception.InvalidParameterValue(msg) - elif param['device_index'] in device_indexes: - msg = _('Each network interface requires a unique ' - 'device index.') - raise exception.InvalidParameterValue(msg) - device_indexes.add(param['device_index']) - ni_exists = 'network_interface_id' in param - subnet_exists = 'subnet_id' in param - ip_exists = 'private_ip_address' in param - if not ni_exists and not subnet_exists: - msg = _('Each network interface requires either a subnet or ' - 'a network interface ID.') - raise exception.InvalidParameterValue(msg) - if ni_exists and (subnet_exists or ip_exists or - param.get('security_group_id') or - param.get('delete_on_termination')): - param = (_('a subnet') if subnet_exists else - _('a private IP address') if ip_exists else - _('security groups') if param.get('security_group_id') - else _('delete on termination as true')) - msg = _('A network interface may not specify both a network ' - 'interface ID and %(param)s') % {'param': param} - raise exception.InvalidParameterCombination(msg) - if multiple_instances and (ni_exists or ip_exists): - msg = _('Multiple instances creation is not compatible with ' - 'private IP address or network interface ID ' - 'parameters.') - raise exception.InvalidParameterCombination(msg) - if params and 0 not in device_indexes: - msg = _('When specifying network interfaces, you must include ' - 'a device at index 0.') - raise exception.UnsupportedOperation(msg) - - def parse_network_interface_parameters(self, context, params): - vpc_ids = set() - network_interface_ids = set() - busy_network_interfaces = [] - network_data = [] - for param in params: - # TODO(ft): OpenStack doesn't support more than one port in a - # subnet for an instance, but AWS does it. - # We should check this before creating any object in OpenStack - if 'network_interface_id' in param: - ec2_eni_id = param['network_interface_id'] - if ec2_eni_id in network_interface_ids: - msg = _("Network interface ID '%(id)s' " - "may not be specified on multiple interfaces.") - msg = msg % {'id': ec2_eni_id} - raise exception.InvalidParameterValue(msg) - network_interface = ec2utils.get_db_item(context, ec2_eni_id, - 'eni') - if 'instance_id' in network_interface: - busy_network_interfaces.append(ec2_eni_id) - vpc_ids.add(network_interface['vpc_id']) - network_interface_ids.add(ec2_eni_id) - network_data.append({'device_index': param['device_index'], - 'network_interface': network_interface, - 'delete_on_termination': False}) - else: - subnet = ec2utils.get_db_item(context, param['subnet_id'], - 'subnet') - vpc_ids.add(subnet['vpc_id']) - args = copy.deepcopy(param) - delete_on_termination = args.pop('delete_on_termination', True) - args.pop('associate_public_ip_address', None) - network_data.append( - {'device_index': args.pop('device_index'), - 'create_args': (args.pop('subnet_id'), args), - 'delete_on_termination': delete_on_termination}) - - if busy_network_interfaces: - raise exception.InvalidNetworkInterfaceInUse( - interface_ids=busy_network_interfaces) - - if len(vpc_ids) > 1: - msg = _('Network interface attachments may not cross ' - 'VPC boundaries.') - raise exception.InvalidParameterValue(msg) - - # TODO(ft): a race condition can occure like using a network - # interface for an instance in parallel run_instances, or even - # deleting a network interface. We should lock such operations - - network_data.sort(key=lambda data: data['device_index']) - return (next(iter(vpc_ids), None), network_data) - - def create_network_interfaces(self, context, cleaner, network_data): - for data in network_data: - if 'create_args' not in data: - continue - (subnet_id, args) = data['create_args'] - ec2_network_interface = ( - network_interface_api.create_network_interface( - context, subnet_id, **args)['networkInterface']) - ec2_network_interface_id = ( - ec2_network_interface['networkInterfaceId']) - cleaner.addCleanup(network_interface_api.delete_network_interface, - context, - network_interface_id=ec2_network_interface_id) - # TODO(ft): receive network_interface from a - # create_network_interface sub-function - network_interface = db_api.get_item_by_id(context, - ec2_network_interface_id) - data['network_interface'] = network_interface - - def get_vpc_default_security_group_id(self, context, vpc_id): - default_groups = security_group_api.describe_security_groups( - context, - filter=[{'name': 'vpc-id', 'value': [vpc_id]}, - {'name': 'group-name', 'value': ['default']}] - )['securityGroupInfo'] - security_groups = db_api.get_items_by_ids( - context, [sg['groupId'] for sg in default_groups]) - return [sg['os_id'] for sg in security_groups] - - def get_ec2_classic_os_network(self, context, neutron): - os_subnet_ids = [eni['os_id'] - for eni in db_api.get_items(context, 'subnet')] - if os_subnet_ids: - os_subnets = neutron.list_subnets( - id=os_subnet_ids, fields=['network_id'], - tenant_id=context.project_id)['subnets'] - vpc_os_network_ids = set( - sn['network_id'] for sn in os_subnets) - else: - vpc_os_network_ids = [] - os_networks = neutron.list_networks( - **{'router:external': False, 'fields': ['id', 'name'], - 'tenant_id': context.project_id})['networks'] - ec2_classic_os_networks = [n for n in os_networks - if n['id'] not in vpc_os_network_ids and - not n.get('name').startswith('subnet-')] - if len(ec2_classic_os_networks) == 0: - raise exception.Unsupported( - reason=_('There are no available networks ' - 'for EC2 Classic mode')) - if len(ec2_classic_os_networks) > 1: - raise exception.Unsupported( - reason=_('There is more than one available network ' - 'for EC2 Classic mode')) - return ec2_classic_os_networks[0] - - -instance_engine = get_instance_engine() - - -def _auto_create_instance_extension(context, instance, os_instance=None): - if not os_instance: - nova = clients.nova(ec2_context.get_os_admin_context()) - os_instance = nova.servers.get(instance['os_id']) - if hasattr(os_instance, 'OS-EXT-SRV-ATTR:reservation_id'): - instance['reservation_id'] = getattr(os_instance, - 'OS-EXT-SRV-ATTR:reservation_id') - instance['launch_index'] = getattr(os_instance, - 'OS-EXT-SRV-ATTR:launch_index') - else: - # NOTE(ft): partial compatibility with pre Kilo OS releases - instance['reservation_id'] = _generate_reservation_id() - instance['launch_index'] = 0 - - -ec2utils.register_auto_create_db_item_extension( - 'i', _auto_create_instance_extension) - - -# NOTE(ft): following functions are copied from various parts of Nova - -def _cloud_get_image_state(image): - state = image.status - if state == 'active': - state = 'available' - return getattr(image, 'image_state', state) - - -def _cloud_format_kernel_id(context, os_instance, image_ids=None): - os_kernel_id = getattr(os_instance, 'OS-EXT-SRV-ATTR:kernel_id', None) - if os_kernel_id is None or os_kernel_id == '': - return - return ec2utils.os_id_to_ec2_id(context, 'aki', os_kernel_id, - ids_by_os_id=image_ids) - - -def _cloud_format_ramdisk_id(context, os_instance, image_ids=None): - os_ramdisk_id = getattr(os_instance, 'OS-EXT-SRV-ATTR:ramdisk_id', None) - if os_ramdisk_id is None or os_ramdisk_id == '': - return - return ec2utils.os_id_to_ec2_id(context, 'ari', os_ramdisk_id, - ids_by_os_id=image_ids) - - -def _cloud_format_instance_type(context, os_instance): - return clients.nova(context).flavors.get(os_instance.flavor['id']).name - - -def _cloud_state_description(vm_state): - """Map the vm state to the server status string.""" - # Note(maoy): We do not provide EC2 compatibility - # in shutdown_terminate flag behavior. So we ignore - # it here. - name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) - - return {'code': inst_state_name_to_code(name), - 'name': name} - - -def _cloud_format_instance_bdm(context, os_instance, result, - volumes=None, os_volumes=None): - """Format InstanceBlockDeviceMappingResponseItemType.""" - root_device_name = getattr(os_instance, - 'OS-EXT-SRV-ATTR:root_device_name', None) - if not root_device_name: - root_device_short_name = root_device_type = None - else: - root_device_type = 'instance-store' - root_device_short_name = ec2utils.block_device_strip_dev( - root_device_name) - if root_device_name == root_device_short_name: - root_device_name = ec2utils.block_device_prepend_dev( - root_device_name) - mapping = [] - if os_volumes is None: - os_volumes = _get_os_volumes(context) - # NOTE(ft): Attaching volumes are not reported, because Cinder - # volume doesn't yet contain attachment info at this stage, but Nova v2.3 - # instance volumes_attached doesn't contain a device name. - # But a bdm must contain the last one. - volumes_attached = getattr(os_instance, - 'os-extended-volumes:volumes_attached', []) - for os_volume in os_volumes[os_instance.id]: - os_attachment = next(iter(os_volume.attachments), {}) - device_name = os_attachment.get('device') - if not device_name: - continue - if (device_name == root_device_name or - device_name == root_device_short_name): - root_device_type = 'ebs' - - volume = ec2utils.get_db_item_by_os_id(context, 'vol', os_volume.id, - volumes) - # TODO(yamahata): volume attach time - ebs = {'volumeId': volume['id'], - 'status': _cloud_get_volume_attach_status(os_volume)} - volume_attached = next((va for va in volumes_attached - if va['id'] == os_volume.id), None) - if volume_attached and 'delete_on_termination' in volume_attached: - ebs['deleteOnTermination'] = ( - volume_attached['delete_on_termination']) - mapping.append({'deviceName': device_name, - 'ebs': ebs}) - - if mapping: - result['blockDeviceMapping'] = mapping - if root_device_type: - result['rootDeviceType'] = root_device_type - - -def _cloud_get_volume_attach_status(volume): - if volume.status == 'reserved': - # 'reserved' state means that volume will be attached later - return 'attaching' - if volume.status in ('attaching', 'detaching'): - return volume.status - elif volume.attachments: - return 'attached' - else: - return 'detached' - - -def _utils_generate_uid(topic, size=8): - characters = '01234567890abcdefghijklmnopqrstuvwxyz' - choices = [random.choice(characters) for _x in range(size)] - return '%s-%s' % (topic, ''.join(choices)) - - -# NOTE(ft): nova/compute/vm_states.py - -"""Possible vm states for instances. - -Compute instance vm states represent the state of an instance as it pertains to -a user or administrator. - -vm_state describes a VM's current stable (not transition) state. That is, if -there is no ongoing compute API calls (running tasks), vm_state should reflect -what the customer expect the VM to be. When combined with task states -(task_states.py), a better picture can be formed regarding the instance's -health and progress. - -See http://wiki.openstack.org/VMState -""" - -vm_states_ACTIVE = 'active' # VM is running -vm_states_BUILDING = 'building' # VM only exists in DB -vm_states_PAUSED = 'paused' -vm_states_SUSPENDED = 'suspended' # VM is suspended to disk. -vm_states_STOPPED = 'stopped' # VM is powered off, the disk image is still -# there. -vm_states_RESCUED = 'rescued' # A rescue image is running with the original VM -# image attached. -vm_states_RESIZED = 'resized' # a VM with the new size is active. The user is -# expected to manually confirm or revert. - -vm_states_SOFT_DELETED = 'soft-delete' # VM is marked as deleted but the disk -# images are still available to restore. -vm_states_DELETED = 'deleted' # VM is permanently deleted. - -vm_states_ERROR = 'error' - -vm_states_SHELVED = 'shelved' # VM is powered off, resources still on -# hypervisor -vm_states_SHELVED_OFFLOADED = 'shelved_offloaded' # VM and associated -# resources are not on hypervisor - -vm_states_WIPED_OUT = 'wiped_out' # Artificial state, added for state -# of VM which was just deleted and is not reported by OpenStack anymore. - -vm_states_ALLOW_SOFT_REBOOT = [vm_states_ACTIVE] # states we can soft reboot -# from -vm_states_ALLOW_HARD_REBOOT = ( - vm_states_ALLOW_SOFT_REBOOT + - [vm_states_STOPPED, vm_states_PAUSED, vm_states_SUSPENDED, - vm_states_ERROR]) -# states we allow hard reboot from - -# NOTE(ft): end of nova/compute/vm_states.py - -# NOTE(ft): nova/api/ec2/inst_states.py - -inst_state_PENDING_CODE = 0 -inst_state_RUNNING_CODE = 16 -inst_state_SHUTTING_DOWN_CODE = 32 -inst_state_TERMINATED_CODE = 48 -inst_state_STOPPING_CODE = 64 -inst_state_STOPPED_CODE = 80 - -inst_state_PENDING = 'pending' -inst_state_RUNNING = 'running' -inst_state_SHUTTING_DOWN = 'shutting-down' -inst_state_TERMINATED = 'terminated' -inst_state_STOPPING = 'stopping' -inst_state_STOPPED = 'stopped' - -# non-ec2 value -inst_state_MIGRATE = 'migrate' -inst_state_RESIZE = 'resize' -inst_state_PAUSE = 'pause' -inst_state_SUSPEND = 'suspend' -inst_state_RESCUE = 'rescue' - -# EC2 API instance status code -_NAME_TO_CODE = { - inst_state_PENDING: inst_state_PENDING_CODE, - inst_state_RUNNING: inst_state_RUNNING_CODE, - inst_state_SHUTTING_DOWN: inst_state_SHUTTING_DOWN_CODE, - inst_state_TERMINATED: inst_state_TERMINATED_CODE, - inst_state_STOPPING: inst_state_STOPPING_CODE, - inst_state_STOPPED: inst_state_STOPPED_CODE, - - # approximation - inst_state_MIGRATE: inst_state_RUNNING_CODE, - inst_state_RESIZE: inst_state_RUNNING_CODE, - inst_state_PAUSE: inst_state_STOPPED_CODE, - inst_state_SUSPEND: inst_state_STOPPED_CODE, - inst_state_RESCUE: inst_state_RUNNING_CODE, -} -_CODE_TO_NAMES = {code: [item[0] for item in _NAME_TO_CODE.items() - if item[1] == code] - for code in set(_NAME_TO_CODE.values())} - - -def inst_state_name_to_code(name): - return _NAME_TO_CODE.get(name, inst_state_PENDING_CODE) - - -# NOTE(ft): end of nova/api/ec2/inst_state.py - -# EC2 API can return the following values as documented in the EC2 API -# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ -# ApiReference-ItemType-InstanceStateType.html -# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | -# stopped 80 -_STATE_DESCRIPTION_MAP = { - None: inst_state_PENDING, - vm_states_ACTIVE: inst_state_RUNNING, - vm_states_BUILDING: inst_state_PENDING, - vm_states_DELETED: inst_state_SHUTTING_DOWN, - vm_states_SOFT_DELETED: inst_state_SHUTTING_DOWN, - vm_states_STOPPED: inst_state_STOPPED, - vm_states_PAUSED: inst_state_PAUSE, - vm_states_SUSPENDED: inst_state_SUSPEND, - vm_states_RESCUED: inst_state_RESCUE, - vm_states_RESIZED: inst_state_RESIZE, - vm_states_WIPED_OUT: inst_state_TERMINATED -} diff --git a/ec2api/api/internet_gateway.py b/ec2api/api/internet_gateway.py deleted file mode 100644 index f4e4c1bf..00000000 --- a/ec2api/api/internet_gateway.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -from neutronclient.common import exceptions as neutron_exception -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - -LOG = logging.getLogger(__name__) - -"""Internet gateway related API implementation -""" - - -Validator = common.Validator - - -def create_internet_gateway(context): - igw = db_api.add_item(context, 'igw', {}) - return {'internetGateway': _format_internet_gateway(igw)} - - -def attach_internet_gateway(context, internet_gateway_id, vpc_id): - igw = ec2utils.get_db_item(context, internet_gateway_id) - if igw.get('vpc_id'): - msg_params = {'igw_id': igw['id'], - 'vpc_id': igw['vpc_id']} - msg = _('resource %(igw_id)s is already attached to ' - 'network %(vpc_id)s') % msg_params - raise exception.ResourceAlreadyAssociated(msg) - vpc = ec2utils.get_db_item(context, vpc_id) - if ec2utils.get_attached_gateway(context, vpc['id'], 'igw'): - msg = _('Network %(vpc_id)s already has an internet gateway ' - 'attached') % {'vpc_id': vpc['id']} - raise exception.InvalidParameterValue(msg) - - external_network_id = None - if not ec2utils.get_attached_gateway(context, vpc['id'], 'vgw'): - external_network_id = ec2utils.get_os_public_network(context)['id'] - neutron = clients.neutron(context) - - # TODO(ft): set attaching state into db - with common.OnCrashCleaner() as cleaner: - _attach_internet_gateway_item(context, igw, vpc['id']) - cleaner.addCleanup(_detach_internet_gateway_item, context, igw) - if external_network_id: - neutron.add_gateway_router(vpc['os_id'], - {'network_id': external_network_id}) - return True - - -def detach_internet_gateway(context, internet_gateway_id, vpc_id): - igw = ec2utils.get_db_item(context, internet_gateway_id) - vpc = ec2utils.get_db_item(context, vpc_id) - if igw.get('vpc_id') != vpc['id']: - raise exception.GatewayNotAttached(gw_id=igw['id'], - vpc_id=vpc['id']) - - remove_os_gateway_router = ( - ec2utils.get_attached_gateway(context, vpc_id, 'vgw') is None) - neutron = clients.neutron(context) - # TODO(ft): set detaching state into db - with common.OnCrashCleaner() as cleaner: - _detach_internet_gateway_item(context, igw) - cleaner.addCleanup(_attach_internet_gateway_item, - context, igw, vpc['id']) - if remove_os_gateway_router: - try: - neutron.remove_gateway_router(vpc['os_id']) - except neutron_exception.NotFound: - pass - return True - - -def delete_internet_gateway(context, internet_gateway_id): - igw = ec2utils.get_db_item(context, internet_gateway_id) - if igw.get('vpc_id'): - msg = _("The internetGateway '%(igw_id)s' has dependencies and " - "cannot be deleted.") % {'igw_id': igw['id']} - raise exception.DependencyViolation(msg) - db_api.delete_item(context, igw['id']) - return True - - -class InternetGatewayDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'igw' - FILTER_MAP = {'internet-gateway-id': 'internetGatewayId', - 'attachment.state': ['attachmentSet', 'state'], - 'attachment.vpc-id': ['attachmentSet', 'vpcId']} - - def format(self, igw): - return _format_internet_gateway(igw) - - -def describe_internet_gateways(context, internet_gateway_id=None, - filter=None): - ec2utils.check_and_create_default_vpc(context) - formatted_igws = InternetGatewayDescriber().describe( - context, ids=internet_gateway_id, filter=filter) - return {'internetGatewaySet': formatted_igws} - - -def _format_internet_gateway(igw): - ec2_igw = {'internetGatewayId': igw['id'], - 'attachmentSet': []} - if igw.get('vpc_id'): - # NOTE(ft): AWS actually returns 'available' state rather than - # documented 'attached' one - attachment_state = 'available' - attachment = {'vpcId': igw['vpc_id'], - 'state': attachment_state} - ec2_igw['attachmentSet'].append(attachment) - return ec2_igw - - -def _attach_internet_gateway_item(context, igw, vpc_id): - igw['vpc_id'] = vpc_id - db_api.update_item(context, igw) - - -def _detach_internet_gateway_item(context, igw): - igw['vpc_id'] = None - db_api.update_item(context, igw) diff --git a/ec2api/api/key_pair.py b/ec2api/api/key_pair.py deleted file mode 100644 index d95d1559..00000000 --- a/ec2api/api/key_pair.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 - -from cryptography.hazmat import backends -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import serialization as crypt_serialization -from novaclient import exceptions as nova_exception -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api import clients -from ec2api import exception -from ec2api.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -"""Keypair-object related API implementation -""" - - -Validator = common.Validator - - -class KeyPairDescriber(common.UniversalDescriber): - - KIND = 'kp' - FILTER_MAP = {'fingerprint': 'keyFingerprint', - 'key-name': 'keyName'} - - def format(self, _item, key_pair): - return _format_key_pair(key_pair) - - def get_db_items(self): - return [] - - def get_os_items(self): - # Original EC2 in nova filters out vpn keys for admin user. - # We're not filtering out the vpn keys for now. - # In order to implement this we'd have to configure vpn_key_suffix - # in our config which we consider an overkill. - # suffix = CONF.vpn_key_suffix - # if context.is_admin or not key_pair['name'].endswith(suffix): - nova = clients.nova(self.context) - return nova.keypairs.list() - - def auto_update_db(self, item, os_item): - pass - - def get_id(self, os_item): - return '' - - def get_name(self, key_pair): - return key_pair.name - - -def describe_key_pairs(context, key_name=None, filter=None): - formatted_key_pairs = KeyPairDescriber().describe(context, names=key_name, - filter=filter) - return {'keySet': formatted_key_pairs} - - -def _validate_name(name): - if len(name) > 255: - raise exception.InvalidParameterValue( - value=name, - parameter='KeyName', - reason='lenght is exceeds maximum of 255') - - -# We may wish to make the algorithm configurable. This would require API -# changes. -def _generate_key_pair(): - key = rsa.generate_private_key( - backend=backends.default_backend(), - public_exponent=65537, - key_size=2048 - ) - private_key = key.private_bytes( - crypt_serialization.Encoding.PEM, - crypt_serialization.PrivateFormat.TraditionalOpenSSL, - crypt_serialization.NoEncryption(), - ).decode() - public_key = key.public_key().public_bytes( - crypt_serialization.Encoding.OpenSSH, - crypt_serialization.PublicFormat.OpenSSH, - ).decode() - return private_key, public_key - - -def create_key_pair(context, key_name): - _validate_name(key_name) - nova = clients.nova(context) - private_key, public_key = _generate_key_pair() - try: - key_pair = nova.keypairs.create(key_name, public_key) - except nova_exception.OverLimit: - raise exception.ResourceLimitExceeded(resource='keypairs') - except nova_exception.Conflict: - raise exception.InvalidKeyPairDuplicate(key_name=key_name) - formatted_key_pair = _format_key_pair(key_pair) - formatted_key_pair['keyMaterial'] = private_key - return formatted_key_pair - - -def import_key_pair(context, key_name, public_key_material): - _validate_name(key_name) - if not public_key_material: - raise exception.MissingParameter( - _('The request must contain the parameter PublicKeyMaterial')) - nova = clients.nova(context) - public_key = base64.b64decode(public_key_material).decode("utf-8") - try: - key_pair = nova.keypairs.create(key_name, public_key) - except nova_exception.OverLimit: - raise exception.ResourceLimitExceeded(resource='keypairs') - except nova_exception.Conflict: - raise exception.InvalidKeyPairDuplicate(key_name=key_name) - return _format_key_pair(key_pair) - - -def delete_key_pair(context, key_name): - nova = clients.nova(context) - try: - nova.keypairs.delete(key_name) - except nova_exception.NotFound: - # aws returns true even if the key doesn't exist - pass - return True - - -def _format_key_pair(key_pair): - return {'keyName': key_pair.name, - 'keyFingerprint': key_pair.fingerprint - } diff --git a/ec2api/api/network_interface.py b/ec2api/api/network_interface.py deleted file mode 100644 index 53223c06..00000000 --- a/ec2api/api/network_interface.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import collections - -import netaddr -from neutronclient.common import exceptions as neutron_exception -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import address as address_api -from ec2api.api import common -from ec2api.api import dhcp_options -from ec2api.api import ec2utils -from ec2api.api import security_group as security_group_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -"""Network interface related API implementation -""" - - -Validator = common.Validator - - -def create_network_interface(context, subnet_id, - private_ip_address=None, - private_ip_addresses=None, - secondary_private_ip_address_count=None, - description=None, - security_group_id=None, - client_token=None): - - if client_token: - result = describe_network_interfaces(context, - filter=[{'name': 'client-token', - 'value': [client_token]}]) - if result['networkInterfaceSet']: - if len(result['networkInterfaceSet']) > 1: - LOG.error('describe_network_interfaces returns %s ' - 'network_interfaces, but 1 is expected.', - len(result['networkInterfaceSet'])) - LOG.error('Requested client token: %s', client_token) - LOG.error('Result: %s', result) - return result['networkInterfaceSet'][0] - - subnet = ec2utils.get_db_item(context, subnet_id) - if subnet is None: - raise exception.InvalidSubnetIDNotFound(id=subnet_id) - neutron = clients.neutron(context) - os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] - # NOTE(Alex): Combine and check ip addresses. Neutron will accept - # ip_address as a parameter for specified address and subnet_id for - # address to auto-allocate. - # TODO(Alex): Implement better diagnostics. - subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr']) - if not private_ip_addresses: - private_ip_addresses = [] - if private_ip_address is not None: - private_ip_addresses.insert(0, - {'private_ip_address': private_ip_address, - 'primary': True}) - primary_ip = None - fixed_ips = [] - for ip in private_ip_addresses: - ip_address = netaddr.IPAddress(ip['private_ip_address']) - if ip_address not in subnet_ipnet: - raise exception.InvalidParameterValue( - value=str(ip_address), - parameter='PrivateIpAddresses', - reason='IP address is out of the subnet range') - if ip.get('primary', False): - if primary_ip is not None: - raise exception.InvalidParameterValue( - value=str(ip_address), - parameter='PrivateIpAddresses', - reason='More than one primary ip is supplied') - else: - primary_ip = str(ip_address) - fixed_ips.insert(0, {'ip_address': primary_ip}) - else: - fixed_ips.append({'ip_address': str(ip_address)}) - if not fixed_ips and not secondary_private_ip_address_count: - secondary_private_ip_address_count = 1 - if secondary_private_ip_address_count is None: - secondary_private_ip_address_count = 0 - if secondary_private_ip_address_count > 0: - for _i in range(secondary_private_ip_address_count): - fixed_ips.append({'subnet_id': os_subnet['id']}) - vpc = db_api.get_item_by_id(context, subnet['vpc_id']) - vpc_id = vpc['id'] - dhcp_options_id = vpc.get('dhcp_options_id', None) - if not security_group_id: - default_groups = security_group_api.describe_security_groups( - context, - filter=[{'name': 'vpc-id', 'value': [vpc_id]}, - {'name': 'group-name', 'value': ['default']}] - )['securityGroupInfo'] - security_group_id = [default_group['groupId'] - for default_group in default_groups] - security_groups = db_api.get_items_by_ids(context, security_group_id) - if any(security_group['vpc_id'] != vpc['id'] - for security_group in security_groups): - msg = _('You have specified two resources that belong to ' - 'different networks.') - raise exception.InvalidGroupNotFound(msg) - os_groups = [security_group['os_id'] for security_group in security_groups] - with common.OnCrashCleaner() as cleaner: - os_port_body = {'port': {'network_id': os_subnet['network_id'], - 'security_groups': os_groups}} - os_port_body['port']['fixed_ips'] = fixed_ips - try: - os_port = neutron.create_port(os_port_body)['port'] - except (neutron_exception.IpAddressGenerationFailureClient, - neutron_exception.OverQuotaClient): - raise exception.InsufficientFreeAddressesInSubnet() - except (neutron_exception.IpAddressInUseClient, - neutron_exception.BadRequest) as ex: - # NOTE(ft): AWS returns InvalidIPAddress.InUse for a primary IP - # address, but InvalidParameterValue for secondary one. - # AWS returns PrivateIpAddressLimitExceeded, but Neutron does - # general InvalidInput (converted to BadRequest) in the same case. - msg = _('Specified network interface parameters are invalid. ' - 'Reason: %(reason)s') % {'reason': ex.message} - raise exception.InvalidParameterValue(msg) - cleaner.addCleanup(neutron.delete_port, os_port['id']) - if primary_ip is None: - primary_ip = os_port['fixed_ips'][0]['ip_address'] - network_interface = db_api.add_item(context, 'eni', - {'os_id': os_port['id'], - 'vpc_id': subnet['vpc_id'], - 'subnet_id': subnet['id'], - 'description': description, - 'private_ip_address': primary_ip}) - cleaner.addCleanup(db_api.delete_item, - context, network_interface['id']) - - network_interface_id = network_interface['id'] - neutron.update_port(os_port['id'], - {'port': {'name': network_interface_id}}) - if dhcp_options_id: - dhcp_options._add_dhcp_opts_to_port( - context, - db_api.get_item_by_id(context, dhcp_options_id), - network_interface, - os_port) - security_groups = security_group_api._format_security_groups_ids_names( - context) - return {'networkInterface': - _format_network_interface(context, - network_interface, - os_port, - security_groups=security_groups)} - - -def delete_network_interface(context, network_interface_id): - network_interface = ec2utils.get_db_item(context, network_interface_id) - if 'instance_id' in network_interface: - msg = _("Network interface '%(eni_id)s' is currently in use.") - msg = msg % {'eni_id': network_interface_id} - raise exception.InvalidParameterValue(msg) - - for address in db_api.get_items(context, 'eipalloc'): - if address.get('network_interface_id') == network_interface['id']: - address_api._disassociate_address_item(context, address) - - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - db_api.delete_item(context, network_interface['id']) - cleaner.addCleanup(db_api.restore_item, context, 'eni', - network_interface) - try: - neutron.delete_port(network_interface['os_id']) - except neutron_exception.PortNotFoundClient: - pass - return True - - -class NetworkInterfaceDescriber(common.TaggableItemsDescriber): - - KIND = 'eni' - FILTER_MAP = {'addresses.private-ip-address': ['privateIpAddressesSet', - 'privateIpAddress'], - 'addresses.primary': ['privateIpAddressesSet', 'primary'], - 'addresses.association.public-ip': ['privateIpAddressesSet', - ('association', - 'publicIp')], - 'addresses.association.owner-id': ['privateIpAddressesSet', - ('association', - 'ipOwnerId')], - 'association.association-id': ('association', - 'associationId'), - 'association.allocation-id': ('association', 'allocationId'), - 'association.ip-owner-id': ('association', 'ipOwnerId'), - 'association.public-ip': ('association', 'publicIp'), - 'attachment.attachment-id': ('attachment', 'attachmentId'), - 'attachment.instance-id': ('attachment', 'instanceId'), - 'attachment.instance-owner-id': ('attachment', - 'instanceOwnerId'), - 'attachment.device-index': ('attachment', 'deviceIndex'), - 'attachment.status': ('attachment', 'status'), - 'attachment.attach.time': ('attachment', 'attachTime'), - 'attachment.delete-on-termination': ('attachment', - 'deleteOnTermination'), - 'client-token': 'clientToken', - 'description': 'description', - 'group-id': ['groupSet', 'groupId'], - 'group-name': ['groupSet', 'groupName'], - 'mac-address': 'macAddress', - 'network-interface-id': 'networkInterfaceId', - 'owner-id': 'ownerId', - 'private-ip-address': 'privateIpAddress', - 'requester-managed': 'requesterManaged', - 'source-dest-check': 'sourceDestCheck', - 'status': 'status', - 'vpc-id': 'vpcId', - 'subnet-id': 'subnetId'} - - def format(self, network_interface, os_port): - if not network_interface: - return None - return _format_network_interface( - self.context, network_interface, os_port, - self.ec2_addresses[network_interface['id']], - self.security_groups) - - def get_os_items(self): - addresses = address_api.describe_addresses(self.context) - self.ec2_addresses = collections.defaultdict(list) - for address in addresses['addressesSet']: - if 'networkInterfaceId' in address: - self.ec2_addresses[ - address['networkInterfaceId']].append(address) - self.security_groups = ( - security_group_api._format_security_groups_ids_names(self.context)) - neutron = clients.neutron(self.context) - return neutron.list_ports(tenant_id=self.context.project_id)['ports'] - - def get_name(self, os_item): - return '' - - -def describe_network_interfaces(context, network_interface_id=None, - filter=None): - formatted_network_interfaces = NetworkInterfaceDescriber().describe( - context, ids=network_interface_id, filter=filter) - return {'networkInterfaceSet': formatted_network_interfaces} - - -def assign_private_ip_addresses(context, network_interface_id, - private_ip_address=None, - secondary_private_ip_address_count=None, - allow_reassignment=False): - # TODO(Alex): allow_reassignment is not supported at the moment - network_interface = ec2utils.get_db_item(context, network_interface_id) - subnet = db_api.get_item_by_id(context, network_interface['subnet_id']) - neutron = clients.neutron(context) - os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] - os_port = neutron.show_port(network_interface['os_id'])['port'] - subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr']) - fixed_ips = os_port['fixed_ips'] or [] - if private_ip_address is not None: - for ip_address in private_ip_address: - if netaddr.IPAddress(ip_address) not in subnet_ipnet: - raise exception.InvalidParameterValue( - value=str(ip_address), - parameter='PrivateIpAddress', - reason='IP address is out of the subnet range') - fixed_ips.append({'ip_address': str(ip_address)}) - elif secondary_private_ip_address_count > 0: - for _i in range(secondary_private_ip_address_count): - fixed_ips.append({'subnet_id': os_subnet['id']}) - try: - neutron.update_port(os_port['id'], - {'port': {'fixed_ips': fixed_ips}}) - except neutron_exception.IpAddressGenerationFailureClient: - raise exception.InsufficientFreeAddressesInSubnet() - except neutron_exception.IpAddressInUseClient: - msg = _('Some of %(addresses)s is assigned, but move is not ' - 'allowed.') % {'addresses': private_ip_address} - raise exception.InvalidParameterValue(msg) - except neutron_exception.BadRequest as ex: - # NOTE(ft):AWS returns PrivateIpAddressLimitExceeded, but Neutron does - # general InvalidInput (converted to BadRequest) in the same case. - msg = _('Specified network interface parameters are invalid. ' - 'Reason: %(reason)s') % {'reason': ex.message} - raise exception.InvalidParameterValue(msg) - return True - - -def unassign_private_ip_addresses(context, network_interface_id, - private_ip_address): - network_interface = ec2utils.get_db_item(context, network_interface_id) - if network_interface['private_ip_address'] in private_ip_address: - raise exception.InvalidParameterValue( - value=str(network_interface['private_ip_address']), - parameter='PrivateIpAddresses', - reason='Primary IP address cannot be unassigned') - neutron = clients.neutron(context) - os_port = neutron.show_port(network_interface['os_id'])['port'] - fixed_ips = os_port['fixed_ips'] or [] - new_fixed_ips = [ip for ip in fixed_ips - if ip['ip_address'] not in private_ip_address] - if len(new_fixed_ips) + len(private_ip_address) != len(fixed_ips): - msg = _('Some of the specified addresses are not assigned to ' - 'interface %(id)s') % {'id': network_interface_id} - raise exception.InvalidParameterValue(msg) - os_port = neutron.update_port(os_port['id'], - {'port': {'fixed_ips': new_fixed_ips}}) - return True - - -def describe_network_interface_attribute(context, network_interface_id, - attribute=None): - if attribute is None: - raise exception.InvalidParameterCombination( - _('No attributes specified.')) - network_interface = ec2utils.get_db_item(context, network_interface_id) - - def _format_attr_description(result): - result['description'] = { - 'value': network_interface.get('description', '')} - - def _format_attr_source_dest_check(result): - result['sourceDestCheck'] = { - 'value': network_interface.get('source_dest_check', True)} - - def _format_attr_group_set(result): - ec2_network_interface = describe_network_interfaces(context, - network_interface_id=[network_interface_id] - )['networkInterfaceSet'][0] - result['groupSet'] = ec2_network_interface['groupSet'] - - def _format_attr_attachment(result): - ec2_network_interface = describe_network_interfaces(context, - network_interface_id=[network_interface_id] - )['networkInterfaceSet'][0] - if 'attachment' in ec2_network_interface: - result['attachment'] = ec2_network_interface['attachment'] - - attribute_formatter = { - 'description': _format_attr_description, - 'sourceDestCheck': _format_attr_source_dest_check, - 'groupSet': _format_attr_group_set, - 'attachment': _format_attr_attachment, - } - - fn = attribute_formatter.get(attribute) - if fn is None: - raise exception.InvalidParameterValue(value=attribute, - parameter='attribute', - reason='Unknown attribute.') - - result = {'networkInterfaceId': network_interface['id']} - fn(result) - return result - - -def modify_network_interface_attribute(context, network_interface_id, - description=None, - source_dest_check=None, - security_group_id=None, - attachment=None): - params_count = ( - int(description is not None) + - int(source_dest_check is not None) + - int(security_group_id is not None) + - int(attachment is not None)) - if params_count != 1: - raise exception.InvalidParameterCombination( - 'Multiple attributes specified') - network_interface = ec2utils.get_db_item(context, network_interface_id) - if description is not None: - network_interface['description'] = description - db_api.update_item(context, network_interface) - neutron = clients.neutron(context) - if security_group_id is not None: - os_groups = [sg['os_id'] - for sg in ec2utils.get_db_items(context, 'sg', - security_group_id)] - neutron.update_port(network_interface['os_id'], - {'port': {'security_groups': os_groups}}) - if source_dest_check is not None: - allowed = [] if source_dest_check else [{'ip_address': '0.0.0.0/0'}] - neutron.update_port(network_interface['os_id'], - {'port': {'allowed_address_pairs': allowed}}) - network_interface['source_dest_check'] = source_dest_check - db_api.update_item(context, network_interface) - if attachment: - attachment_id = attachment.get('attachment_id') - delete_on_termination = attachment.get('delete_on_termination') - if attachment_id is None or delete_on_termination is None: - raise exception.MissingParameter( - _('The request must contain the parameter attachment ' - 'deleteOnTermination')) - attachment_id_own = ec2utils.change_ec2_id_kind( - network_interface['id'], 'eni-attach') - if ('instance_id' not in network_interface - or attachment_id_own != attachment_id): - raise exception.InvalidAttachmentIDNotFound(id=attachment_id) - network_interface['delete_on_termination'] = delete_on_termination - db_api.update_item(context, network_interface) - return True - - -def reset_network_interface_attribute(context, network_interface_id, - attribute): - # TODO(Alex) This is only a stub because it's not supported by - # Openstack. True will be returned for now in any case. - # NOTE(Alex) There is a bug in the AWS doc about this method - - # "sourceDestCheck" should be used instead of "SourceDestCheck". - # Also aws cli doesn't work with it because it doesn't comply with - # the API. - if attribute == 'sourceDestCheck': - return modify_network_interface_attribute(context, - network_interface_id, - source_dest_check=True) - return True - - -def attach_network_interface(context, network_interface_id, - instance_id, device_index): - network_interface = ec2utils.get_db_item(context, network_interface_id) - if 'instance_id' in network_interface: - raise exception.InvalidParameterValue( - _("Network interface '%(id)s' is currently in use.") % - {'id': network_interface_id}) - os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] - # TODO(Alex) Check that the instance is not yet attached to another VPC - # TODO(Alex) Check that the instance is "our", not created via nova - # (which means that it doesn't belong to any VPC and can't be attached) - if any(eni['device_index'] == device_index - for eni in db_api.get_items(context, 'eni') - if eni.get('instance_id') == instance_id): - raise exception.InvalidParameterValue( - _("Instance '%(id)s' already has an interface attached at " - "device index '%(index)s'.") % {'id': instance_id, - 'index': device_index}) - neutron = clients.neutron(context) - os_port = neutron.show_port(network_interface['os_id'])['port'] - nova = clients.nova(context) - with common.OnCrashCleaner() as cleaner: - # TODO(Alex) nova inserts compute:%availability_zone into device_owner - # 'device_owner': 'compute:None'}}) - _attach_network_interface_item(context, network_interface, - instance_id, device_index) - cleaner.addCleanup(_detach_network_interface_item, context, - network_interface) - nova.servers.interface_attach(os_instance_id, os_port['id'], - None, None) - return {'attachmentId': ec2utils.change_ec2_id_kind( - network_interface['id'], 'eni-attach')} - - -def detach_network_interface(context, attachment_id, force=None): - network_interface = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(attachment_id, 'eni')) - if not network_interface or 'instance_id' not in network_interface: - raise exception.InvalidAttachmentIDNotFound(id=attachment_id) - if network_interface['device_index'] == 0: - raise exception.OperationNotPermitted( - _('The network interface at device index 0 cannot be detached.')) - neutron = clients.neutron(context) - os_port = neutron.show_port(network_interface['os_id'])['port'] - with common.OnCrashCleaner() as cleaner: - instance_id = network_interface['instance_id'] - device_index = network_interface['device_index'] - attach_time = network_interface['attach_time'] - delete_on_termination = network_interface['delete_on_termination'] - _detach_network_interface_item(context, network_interface) - cleaner.addCleanup(_attach_network_interface_item, - context, network_interface, instance_id, - device_index, attach_time, delete_on_termination) - neutron.update_port(os_port['id'], - {'port': {'device_id': '', - 'device_owner': ''}}) - return True - - -def _format_network_interface(context, network_interface, os_port, - associated_ec2_addresses=[], security_groups={}): - ec2_network_interface = {} - ec2_network_interface['networkInterfaceId'] = network_interface['id'] - ec2_network_interface['subnetId'] = network_interface['subnet_id'] - ec2_network_interface['vpcId'] = network_interface['vpc_id'] - ec2_network_interface['description'] = network_interface['description'] - ec2_network_interface['sourceDestCheck'] = ( - network_interface.get('source_dest_check', True)) - ec2_network_interface['requesterManaged'] = ( - os_port.get('device_owner', '').startswith('network:')) - ec2_network_interface['ownerId'] = context.project_id - security_group_set = [] - for sg_id in os_port['security_groups']: - if security_groups.get(sg_id): - security_group_set.append(security_groups[sg_id]) - ec2_network_interface['groupSet'] = security_group_set - if 'instance_id' in network_interface: - ec2_network_interface['status'] = 'in-use' - ec2_network_interface['attachment'] = { - 'attachmentId': ec2utils.change_ec2_id_kind( - network_interface['id'], 'eni-attach'), - 'instanceId': network_interface['instance_id'], - 'deviceIndex': network_interface['device_index'], - 'status': 'attached', - 'deleteOnTermination': network_interface['delete_on_termination'], - 'attachTime': network_interface['attach_time'], - 'instanceOwnerId': context.project_id - } - else: - ec2_network_interface['status'] = 'available' - ec2_network_interface['macAddress'] = os_port['mac_address'] - if os_port['fixed_ips']: - ipsSet = [] - for ip in os_port['fixed_ips']: - primary = ( - network_interface.get('private_ip_address', '') == - ip['ip_address']) - item = {'privateIpAddress': ip['ip_address'], - 'primary': primary} - ec2_address = next( - (addr for addr in associated_ec2_addresses - if addr['privateIpAddress'] == ip['ip_address']), - None) - if ec2_address: - item['association'] = { - 'associationId': ec2utils.change_ec2_id_kind( - ec2_address['allocationId'], 'eipassoc'), - 'allocationId': ec2_address['allocationId'], - 'ipOwnerId': context.project_id, - 'publicDnsName': None, - 'publicIp': ec2_address['publicIp'], - } - if primary: - ipsSet.insert(0, item) - else: - ipsSet.append(item) - ec2_network_interface['privateIpAddressesSet'] = ipsSet - primary_ip = ipsSet[0] - ec2_network_interface['privateIpAddress'] = ( - primary_ip['privateIpAddress']) - if 'association' in primary_ip: - ec2_network_interface['association'] = primary_ip['association'] - # NOTE(ft): AWS returns empty tag set for a network interface - # if no tag exists - ec2_network_interface['tagSet'] = [] - return ec2_network_interface - - -def _attach_network_interface_item(context, network_interface, instance_id, - device_index, attach_time=None, - delete_on_termination=False): - if not attach_time: - attach_time = ec2utils.isotime(None, True) - network_interface.update({ - 'instance_id': instance_id, - 'device_index': device_index, - 'attach_time': attach_time, - 'delete_on_termination': delete_on_termination}) - db_api.update_item(context, network_interface) - - -def _detach_network_interface_item(context, network_interface): - network_interface.pop('instance_id', None) - network_interface.pop('device_index', None) - network_interface.pop('attach_time', None) - network_interface.pop('delete_on_termination', None) - db_api.update_item(context, network_interface) diff --git a/ec2api/api/opts.py b/ec2api/api/opts.py deleted file mode 100644 index 2674cd6e..00000000 --- a/ec2api/api/opts.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -import ec2api.api -import ec2api.api.auth -import ec2api.api.availability_zone -import ec2api.api.common -import ec2api.api.dhcp_options -import ec2api.api.ec2utils -import ec2api.api.image -import ec2api.api.instance - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - ec2api.api.ec2_opts, - ec2api.api.auth.auth_opts, - ec2api.api.availability_zone.availability_zone_opts, - ec2api.api.common.ec2_opts, - ec2api.api.dhcp_options.ec2_opts, - ec2api.api.ec2utils.ec2_opts, - ec2api.api.image.s3_opts, - ec2api.api.image.rpcapi_opts, - ec2api.api.instance.ec2_opts, - )), - ] diff --git a/ec2api/api/route_table.py b/ec2api/api/route_table.py deleted file mode 100644 index 8e2167d1..00000000 --- a/ec2api/api/route_table.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import copy - -import netaddr -from novaclient import exceptions as nova_exception - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import vpn_connection as vpn_connection_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -HOST_TARGET = 'host' -VPN_TARGET = 'vpn' - - -"""Route tables related API implementation -""" - - -class Validator(common.Validator): - - def igw_or_vgw_id(self, id): - self.ec2_id(id, ['igw', 'vgw']) - - -def create_route_table(context, vpc_id): - vpc = ec2utils.get_db_item(context, vpc_id) - route_table = _create_route_table(context, vpc) - return {'routeTable': _format_route_table(context, route_table, - is_main=False)} - - -def create_route(context, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, - network_interface_id=None, - vpc_peering_connection_id=None): - return _set_route(context, route_table_id, destination_cidr_block, - gateway_id, instance_id, network_interface_id, - vpc_peering_connection_id, False) - - -def replace_route(context, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, - network_interface_id=None, - vpc_peering_connection_id=None): - return _set_route(context, route_table_id, destination_cidr_block, - gateway_id, instance_id, network_interface_id, - vpc_peering_connection_id, True) - - -def delete_route(context, route_table_id, destination_cidr_block): - route_table = ec2utils.get_db_item(context, route_table_id) - for route_index, route in enumerate(route_table['routes']): - if route['destination_cidr_block'] != destination_cidr_block: - continue - if route.get('gateway_id', 0) is None: - msg = _('cannot remove local route %(destination_cidr_block)s ' - 'in route table %(route_table_id)s') - msg = msg % {'route_table_id': route_table_id, - 'destination_cidr_block': destination_cidr_block} - raise exception.InvalidParameterValue(msg) - break - else: - raise exception.InvalidRouteNotFound( - route_table_id=route_table_id, - destination_cidr_block=destination_cidr_block) - update_target = _get_route_target(route) - if update_target == VPN_TARGET: - vpn_gateway = db_api.get_item_by_id(context, route['gateway_id']) - if (not vpn_gateway or - vpn_gateway['vpc_id'] != route_table['vpc_id']): - update_target = None - rollback_route_table_state = copy.deepcopy(route_table) - del route_table['routes'][route_index] - with common.OnCrashCleaner() as cleaner: - db_api.update_item(context, route_table) - cleaner.addCleanup(db_api.update_item, context, - rollback_route_table_state) - - if update_target: - _update_routes_in_associated_subnets( - context, cleaner, route_table, update_target=update_target) - - return True - - -def enable_vgw_route_propagation(context, route_table_id, gateway_id): - route_table = ec2utils.get_db_item(context, route_table_id) - # NOTE(ft): AWS returns GatewayNotAttached for all invalid cases of - # gateway_id value - vpn_gateway = ec2utils.get_db_item(context, gateway_id) - if vpn_gateway['vpc_id'] != route_table['vpc_id']: - raise exception.GatewayNotAttached(gw_id=vpn_gateway['id'], - vpc_id=route_table['vpc_id']) - if vpn_gateway['id'] in route_table.setdefault('propagating_gateways', []): - return True - with common.OnCrashCleaner() as cleaner: - _append_propagation_to_route_table_item(context, route_table, - vpn_gateway['id']) - cleaner.addCleanup(_remove_propagation_from_route_table_item, - context, route_table, vpn_gateway['id']) - - _update_routes_in_associated_subnets(context, cleaner, route_table, - update_target=VPN_TARGET) - return True - - -def disable_vgw_route_propagation(context, route_table_id, gateway_id): - route_table = ec2utils.get_db_item(context, route_table_id) - if gateway_id not in route_table.get('propagating_gateways', []): - return True - vpn_gateway = db_api.get_item_by_id(context, gateway_id) - - with common.OnCrashCleaner() as cleaner: - _remove_propagation_from_route_table_item(context, route_table, - gateway_id) - cleaner.addCleanup(_append_propagation_to_route_table_item, - context, route_table, gateway_id) - - if vpn_gateway and vpn_gateway['vpc_id'] == route_table['vpc_id']: - _update_routes_in_associated_subnets(context, cleaner, route_table, - update_target=VPN_TARGET) - return True - - -def associate_route_table(context, route_table_id, subnet_id): - route_table = ec2utils.get_db_item(context, route_table_id) - subnet = ec2utils.get_db_item(context, subnet_id) - if route_table['vpc_id'] != subnet['vpc_id']: - msg = _('Route table %(rtb_id)s and subnet %(subnet_id)s belong to ' - 'different networks') - msg = msg % {'rtb_id': route_table_id, - 'subnet_id': subnet_id} - raise exception.InvalidParameterValue(msg) - if 'route_table_id' in subnet: - msg = _('The specified association for route table %(rtb_id)s ' - 'conflicts with an existing association') - msg = msg % {'rtb_id': route_table_id} - raise exception.ResourceAlreadyAssociated(msg) - - with common.OnCrashCleaner() as cleaner: - _associate_subnet_item(context, subnet, route_table['id']) - cleaner.addCleanup(_disassociate_subnet_item, context, subnet) - - _update_subnet_routes(context, cleaner, subnet, route_table) - - return {'associationId': ec2utils.change_ec2_id_kind(subnet['id'], - 'rtbassoc')} - - -def replace_route_table_association(context, association_id, route_table_id): - route_table = ec2utils.get_db_item(context, route_table_id) - if route_table['vpc_id'] == ec2utils.change_ec2_id_kind(association_id, - 'vpc'): - vpc = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(association_id, 'vpc')) - if vpc is None: - raise exception.InvalidAssociationIDNotFound(id=association_id) - - rollback_route_table_id = vpc['route_table_id'] - with common.OnCrashCleaner() as cleaner: - _associate_vpc_item(context, vpc, route_table['id']) - cleaner.addCleanup(_associate_vpc_item, context, vpc, - rollback_route_table_id) - - _update_routes_in_associated_subnets( - context, cleaner, route_table, default_associations_only=True) - else: - subnet = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(association_id, 'subnet')) - if subnet is None or 'route_table_id' not in subnet: - raise exception.InvalidAssociationIDNotFound(id=association_id) - if subnet['vpc_id'] != route_table['vpc_id']: - msg = _('Route table association %(rtbassoc_id)s and route table ' - '%(rtb_id)s belong to different networks') - msg = msg % {'rtbassoc_id': association_id, - 'rtb_id': route_table_id} - raise exception.InvalidParameterValue(msg) - - rollback_route_table_id = subnet['route_table_id'] - with common.OnCrashCleaner() as cleaner: - _associate_subnet_item(context, subnet, route_table['id']) - cleaner.addCleanup(_associate_subnet_item, context, subnet, - rollback_route_table_id) - - _update_subnet_routes(context, cleaner, subnet, route_table) - - return {'newAssociationId': association_id} - - -def disassociate_route_table(context, association_id): - subnet = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(association_id, 'subnet')) - if not subnet: - vpc = db_api.get_item_by_id( - context, ec2utils.change_ec2_id_kind(association_id, 'vpc')) - if vpc is None: - raise exception.InvalidAssociationIDNotFound(id=association_id) - msg = _('Cannot disassociate the main route table association ' - '%(rtbassoc_id)s') % {'rtbassoc_id': association_id} - raise exception.InvalidParameterValue(msg) - if 'route_table_id' not in subnet: - raise exception.InvalidAssociationIDNotFound(id=association_id) - - rollback_route_table_id = subnet['route_table_id'] - vpc = db_api.get_item_by_id(context, subnet['vpc_id']) - main_route_table = db_api.get_item_by_id(context, vpc['route_table_id']) - with common.OnCrashCleaner() as cleaner: - _disassociate_subnet_item(context, subnet) - cleaner.addCleanup(_associate_subnet_item, context, subnet, - rollback_route_table_id) - - _update_subnet_routes(context, cleaner, subnet, main_route_table) - - return True - - -def delete_route_table(context, route_table_id): - route_table = ec2utils.get_db_item(context, route_table_id) - vpc = db_api.get_item_by_id(context, route_table['vpc_id']) - _delete_route_table(context, route_table['id'], vpc) - return True - - -class RouteTableDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'rtb' - FILTER_MAP = {'association.route-table-association-id': ( - ['associationSet', 'routeTableAssociationId']), - 'association.route-table-id': ['associationSet', - 'routeTableId'], - 'association.subnet-id': ['associationSet', 'subnetId'], - 'association.main': ['associationSet', 'main'], - 'route-table-id': 'routeTableId', - 'route.destination-cidr-block': ['routeSet', - 'destinationCidrBlock'], - 'route.gateway-id': ['routeSet', 'gatewayId'], - 'route.instance-id': ['routeSet', 'instanceId'], - 'route.origin': ['routeSet', 'origin'], - 'route.state': ['routeSet', 'state'], - 'vpc-id': 'vpcId'} - - def format(self, route_table): - return _format_route_table( - self.context, route_table, - associated_subnet_ids=self.associations[route_table['id']], - is_main=(self.vpcs[route_table['vpc_id']]['route_table_id'] == - route_table['id']), - gateways=self.gateways, - network_interfaces=self.network_interfaces, - vpn_connections_by_gateway_id=self.vpn_connections_by_gateway_id) - - def get_db_items(self): - associations = collections.defaultdict(list) - for subnet in db_api.get_items(self.context, 'subnet'): - if 'route_table_id' in subnet: - associations[subnet['route_table_id']].append(subnet['id']) - self.associations = associations - vpcs = db_api.get_items(self.context, 'vpc') - self.vpcs = {vpc['id']: vpc for vpc in vpcs} - gateways = (db_api.get_items(self.context, 'igw') + - db_api.get_items(self.context, 'vgw')) - self.gateways = {gw['id']: gw for gw in gateways} - # TODO(ft): scan route tables to get only used instances and - # network interfaces to reduce DB and Nova throughput - network_interfaces = db_api.get_items(self.context, 'eni') - self.network_interfaces = {eni['id']: eni - for eni in network_interfaces} - vpn_connections = db_api.get_items(self.context, 'vpn') - vpns_by_gateway_id = {} - for vpn in vpn_connections: - vpns = vpns_by_gateway_id.setdefault(vpn['vpn_gateway_id'], []) - vpns.append(vpn) - self.vpn_connections_by_gateway_id = vpns_by_gateway_id - return super(RouteTableDescriber, self).get_db_items() - - -def describe_route_tables(context, route_table_id=None, filter=None): - ec2utils.check_and_create_default_vpc(context) - formatted_route_tables = RouteTableDescriber().describe( - context, ids=route_table_id, filter=filter) - return {'routeTableSet': formatted_route_tables} - - -def _create_route_table(context, vpc): - route_table = {'vpc_id': vpc['id'], - 'routes': [{'destination_cidr_block': vpc['cidr_block'], - 'gateway_id': None}]} - route_table = db_api.add_item(context, 'rtb', route_table) - return route_table - - -def _delete_route_table(context, route_table_id, vpc=None, cleaner=None): - def get_associated_subnets(): - return [s for s in db_api.get_items(context, 'subnet') - if s.get('route_table_id') == route_table_id] - - if (vpc and route_table_id == vpc['route_table_id'] or - len(get_associated_subnets()) > 0): - msg = _("The routeTable '%(rtb_id)s' has dependencies and cannot " - "be deleted.") % {'rtb_id': route_table_id} - raise exception.DependencyViolation(msg) - if cleaner: - route_table = db_api.get_item_by_id(context, route_table_id) - db_api.delete_item(context, route_table_id) - if cleaner and route_table: - cleaner.addCleanup(db_api.restore_item, context, 'rtb', route_table) - - -def _set_route(context, route_table_id, destination_cidr_block, - gateway_id, instance_id, network_interface_id, - vpc_peering_connection_id, do_replace): - route_table = ec2utils.get_db_item(context, route_table_id) - vpc = db_api.get_item_by_id(context, route_table['vpc_id']) - vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block']) - route_ipnet = netaddr.IPNetwork(destination_cidr_block) - if route_ipnet in vpc_ipnet: - msg = _('Cannot create a more specific route for ' - '%(destination_cidr_block)s than local route ' - '%(vpc_cidr_block)s in route table %(rtb_id)s') - msg = msg % {'rtb_id': route_table_id, - 'destination_cidr_block': destination_cidr_block, - 'vpc_cidr_block': vpc['cidr_block']} - raise exception.InvalidParameterValue(msg) - - obj_param_count = len([p for p in (gateway_id, network_interface_id, - instance_id, vpc_peering_connection_id) - if p is not None]) - if obj_param_count != 1: - msg = _('The request must contain exactly one of gatewayId, ' - 'networkInterfaceId, vpcPeeringConnectionId or instanceId') - if obj_param_count == 0: - raise exception.MissingParameter(msg) - else: - raise exception.InvalidParameterCombination(msg) - - rollabck_route_table_state = copy.deepcopy(route_table) - if do_replace: - route_index, old_route = next( - ((i, r) for i, r in enumerate(route_table['routes']) - if r['destination_cidr_block'] == destination_cidr_block), - (None, None)) - if route_index is None: - msg = _("There is no route defined for " - "'%(destination_cidr_block)s' in the route table. " - "Use CreateRoute instead.") - msg = msg % {'destination_cidr_block': destination_cidr_block} - raise exception.InvalidParameterValue(msg) - else: - del route_table['routes'][route_index] - - if gateway_id: - gateway = ec2utils.get_db_item(context, gateway_id) - if gateway.get('vpc_id') != route_table['vpc_id']: - if ec2utils.get_ec2_id_kind(gateway_id) == 'vgw': - raise exception.InvalidGatewayIDNotFound(id=gateway['id']) - else: # igw - raise exception.InvalidParameterValue( - _('Route table %(rtb_id)s and network gateway %(igw_id)s ' - 'belong to different networks') % - {'rtb_id': route_table_id, - 'igw_id': gateway_id}) - route = {'gateway_id': gateway['id']} - elif network_interface_id: - network_interface = ec2utils.get_db_item(context, network_interface_id) - if network_interface['vpc_id'] != route_table['vpc_id']: - msg = _('Route table %(rtb_id)s and interface %(eni_id)s ' - 'belong to different networks') - msg = msg % {'rtb_id': route_table_id, - 'eni_id': network_interface_id} - raise exception.InvalidParameterValue(msg) - route = {'network_interface_id': network_interface['id']} - elif instance_id: - # TODO(ft): implement search in DB layer - network_interfaces = [eni for eni in db_api.get_items(context, 'eni') - if eni.get('instance_id') == instance_id] - if len(network_interfaces) == 0: - msg = _("Invalid value '%(i_id)s' for instance ID. " - "Instance is not in a VPC.") - msg = msg % {'i_id': instance_id} - raise exception.InvalidParameterValue(msg) - elif len(network_interfaces) > 1: - raise exception.InvalidInstanceId(instance_id=instance_id) - network_interface = network_interfaces[0] - if network_interface['vpc_id'] != route_table['vpc_id']: - msg = _('Route table %(rtb_id)s and interface %(eni_id)s ' - 'belong to different networks') - msg = msg % {'rtb_id': route_table_id, - 'eni_id': network_interface['id']} - raise exception.InvalidParameterValue(msg) - route = {'network_interface_id': network_interface['id']} - else: - raise exception.InvalidRequest('Parameter VpcPeeringConnectionId is ' - 'not supported by this implementation') - route['destination_cidr_block'] = destination_cidr_block - update_target = _get_route_target(route) - - if do_replace: - idempotent_call = False - old_target = _get_route_target(old_route) - if old_target != update_target: - update_target = None - else: - old_route = next((r for r in route_table['routes'] - if r['destination_cidr_block'] == - destination_cidr_block), None) - idempotent_call = old_route == route - if old_route and not idempotent_call: - raise exception.RouteAlreadyExists( - destination_cidr_block=destination_cidr_block) - - if not idempotent_call: - route_table['routes'].append(route) - - with common.OnCrashCleaner() as cleaner: - db_api.update_item(context, route_table) - cleaner.addCleanup(db_api.update_item, context, - rollabck_route_table_state) - _update_routes_in_associated_subnets(context, cleaner, route_table, - update_target=update_target) - - return True - - -def _format_route_table(context, route_table, is_main=False, - associated_subnet_ids=[], - gateways={}, - network_interfaces={}, - vpn_connections_by_gateway_id={}): - vpc_id = route_table['vpc_id'] - ec2_route_table = { - 'routeTableId': route_table['id'], - 'vpcId': vpc_id, - 'routeSet': [], - 'propagatingVgwSet': [ - {'gatewayId': vgw_id} - for vgw_id in route_table.get('propagating_gateways', [])], - # NOTE(ft): AWS returns empty tag set for a route table - # if no tag exists - 'tagSet': [], - } - # TODO(ft): refactor to get Nova instances outside of this function - nova = clients.nova(context) - for route in route_table['routes']: - origin = ('CreateRouteTable' - if route.get('gateway_id', 0) is None else - 'CreateRoute') - ec2_route = {'destinationCidrBlock': route['destination_cidr_block'], - 'origin': origin} - if 'gateway_id' in route: - gateway_id = route['gateway_id'] - if gateway_id is None: - state = 'active' - ec2_gateway_id = 'local' - else: - gateway = gateways.get(gateway_id) - state = ('active' - if gateway and gateway.get('vpc_id') == vpc_id else - 'blackhole') - ec2_gateway_id = gateway_id - ec2_route.update({'gatewayId': ec2_gateway_id, - 'state': state}) - else: - network_interface_id = route['network_interface_id'] - network_interface = network_interfaces.get(network_interface_id) - instance_id = (network_interface.get('instance_id') - if network_interface else - None) - state = 'blackhole' - if instance_id: - instance = db_api.get_item_by_id(context, instance_id) - if instance: - try: - os_instance = nova.servers.get(instance['os_id']) - if os_instance and os_instance.status == 'ACTIVE': - state = 'active' - except nova_exception.NotFound: - pass - ec2_route.update({'instanceId': instance_id, - 'instanceOwnerId': context.project_id}) - ec2_route.update({'networkInterfaceId': network_interface_id, - 'state': state}) - ec2_route_table['routeSet'].append(ec2_route) - - for vgw_id in route_table.get('propagating_gateways', []): - vgw = gateways.get(vgw_id) - if vgw and vgw_id in vpn_connections_by_gateway_id: - cidrs = set() - vpn_connections = vpn_connections_by_gateway_id[vgw_id] - for vpn_connection in vpn_connections: - cidrs.update(vpn_connection['cidrs']) - state = 'active' if vgw['vpc_id'] == vpc_id else 'blackhole' - for cidr in cidrs: - ec2_route = {'gatewayId': vgw_id, - 'destinationCidrBlock': cidr, - 'state': state, - 'origin': 'EnableVgwRoutePropagation'} - ec2_route_table['routeSet'].append(ec2_route) - - associations = [] - if is_main: - associations.append({ - 'routeTableAssociationId': ec2utils.change_ec2_id_kind(vpc_id, - 'rtbassoc'), - 'routeTableId': route_table['id'], - 'main': True}) - for subnet_id in associated_subnet_ids: - associations.append({ - 'routeTableAssociationId': ec2utils.change_ec2_id_kind(subnet_id, - 'rtbassoc'), - 'routeTableId': route_table['id'], - 'subnetId': subnet_id, - 'main': False}) - if associations: - ec2_route_table['associationSet'] = associations - - return ec2_route_table - - -def _update_routes_in_associated_subnets(context, cleaner, route_table, - default_associations_only=None, - update_target=None): - if default_associations_only: - appropriate_rtb_ids = (None,) - else: - vpc = db_api.get_item_by_id(context, route_table['vpc_id']) - if vpc['route_table_id'] == route_table['id']: - appropriate_rtb_ids = (route_table['id'], None) - else: - appropriate_rtb_ids = (route_table['id'],) - neutron = clients.neutron(context) - subnets = [subnet for subnet in db_api.get_items(context, 'subnet') - if (subnet['vpc_id'] == route_table['vpc_id'] and - subnet.get('route_table_id') in appropriate_rtb_ids)] - # NOTE(ft): we need to update host routes for both host and vpn target - # because vpn-related routes are present in host routes as well - _update_host_routes(context, neutron, cleaner, route_table, subnets) - if not update_target or update_target == VPN_TARGET: - vpn_connection_api._update_vpn_routes(context, neutron, cleaner, - route_table, subnets) - - -def _update_subnet_routes(context, cleaner, subnet, route_table): - neutron = clients.neutron(context) - _update_host_routes(context, neutron, cleaner, route_table, [subnet]) - vpn_connection_api._update_vpn_routes(context, neutron, cleaner, - route_table, [subnet]) - - -def _update_host_routes(context, neutron, cleaner, route_table, subnets): - destinations = _get_active_route_destinations(context, route_table) - for subnet in subnets: - # TODO(ft): do list subnet w/ filters instead of show one by one - os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] - host_routes, gateway_ip = _get_subnet_host_routes_and_gateway_ip( - context, route_table, os_subnet['cidr'], destinations) - neutron.update_subnet(subnet['os_id'], - {'subnet': {'host_routes': host_routes, - 'gateway_ip': gateway_ip}}) - cleaner.addCleanup( - neutron.update_subnet, subnet['os_id'], - {'subnet': {'host_routes': os_subnet['host_routes'], - 'gateway_ip': os_subnet['gateway_ip']}}) - - -def _get_active_route_destinations(context, route_table): - vpn_connections = {vpn['vpn_gateway_id']: vpn - for vpn in db_api.get_items(context, 'vpn')} - dst_ids = [route[id_key] - for route in route_table['routes'] - for id_key in ('gateway_id', 'network_interface_id') - if route.get(id_key) is not None] - dst_ids.extend(route_table.get('propagating_gateways', [])) - destinations = {item['id']: item - for item in db_api.get_items_by_ids(context, dst_ids) - if (item['vpc_id'] == route_table['vpc_id'] and - (ec2utils.get_ec2_id_kind(item['id']) != 'vgw' or - item['id'] in vpn_connections))} - for vpn in vpn_connections.values(): - if vpn['vpn_gateway_id'] in destinations: - destinations[vpn['vpn_gateway_id']]['vpn_connection'] = vpn - return destinations - - -def _get_subnet_host_routes_and_gateway_ip(context, route_table, cidr_block, - destinations=None): - if not destinations: - destinations = _get_active_route_destinations(context, route_table) - gateway_ip = str(netaddr.IPAddress( - netaddr.IPNetwork(cidr_block).first + 1)) - - def get_nexthop(route): - if 'gateway_id' in route: - gateway_id = route['gateway_id'] - if gateway_id and gateway_id not in destinations: - return '127.0.0.1' - return gateway_ip - network_interface = destinations.get(route['network_interface_id']) - if not network_interface: - return '127.0.0.1' - return network_interface['private_ip_address'] - - host_routes = [] - subnet_gateway_is_used = False - for route in route_table['routes']: - nexthop = get_nexthop(route) - cidr = route['destination_cidr_block'] - if cidr == '0.0.0.0/0': - if nexthop == '127.0.0.1': - continue - elif nexthop == gateway_ip: - subnet_gateway_is_used = True - host_routes.append({'destination': cidr, - 'nexthop': nexthop}) - host_routes.extend( - {'destination': cidr, - 'nexthop': gateway_ip} - for vgw_id in route_table.get('propagating_gateways', []) - for cidr in (destinations.get(vgw_id, {}).get('vpn_connection', {}). - get('cidrs', []))) - - if not subnet_gateway_is_used: - # NOTE(andrey-mp): add route to metadata server - host_routes.append( - {'destination': '169.254.169.254/32', - 'nexthop': gateway_ip}) - # NOTE(ft): gateway_ip is set to None to allow correct handling - # of 0.0.0.0/0 route by Neutron. - gateway_ip = None - return host_routes, gateway_ip - - -def _get_route_target(route): - if ec2utils.get_ec2_id_kind(route.get('gateway_id') or '') == 'vgw': - return VPN_TARGET - else: - return HOST_TARGET - - -def _associate_subnet_item(context, subnet, route_table_id): - subnet['route_table_id'] = route_table_id - db_api.update_item(context, subnet) - - -def _disassociate_subnet_item(context, subnet): - subnet.pop('route_table_id') - db_api.update_item(context, subnet) - - -def _associate_vpc_item(context, vpc, route_table_id): - vpc['route_table_id'] = route_table_id - db_api.update_item(context, vpc) - - -def _append_propagation_to_route_table_item(context, route_table, gateway_id): - vgws = route_table.setdefault('propagating_gateways', []) - vgws.append(gateway_id) - db_api.update_item(context, route_table) - - -def _remove_propagation_from_route_table_item(context, route_table, - gateway_id): - vgws = route_table['propagating_gateways'] - vgws.remove(gateway_id) - if not vgws: - del route_table['propagating_gateways'] - db_api.update_item(context, route_table) diff --git a/ec2api/api/security_group.py b/ec2api/api/security_group.py deleted file mode 100644 index d439be1c..00000000 --- a/ec2api/api/security_group.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy - -try: - from neutronclient.common import exceptions as neutron_exception -except ImportError: - pass # clients will log absense of neutronclient in this case -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import validator -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -"""Security Groups related API implementation -""" - -Validator = common.Validator - - -SECURITY_GROUP_MAP = {'domain-name-servers': 'dns-servers', - 'domain-name': 'domain-name', - 'ntp-servers': 'ntp-server', - 'netbios-name-servers': 'netbios-ns', - 'netbios-node-type': 'netbios-nodetype'} - -DEFAULT_GROUP_NAME = 'default' - - -def get_security_group_engine(): - return SecurityGroupEngineNeutron() - - -def create_security_group(context, group_name, group_description, - vpc_id=None): - if group_name == DEFAULT_GROUP_NAME: - if vpc_id: - raise exception.InvalidParameterValue( - _('Cannot use reserved security group name: %s') - % DEFAULT_GROUP_NAME) - else: - raise exception.InvalidGroupReserved(group_name=group_name) - filter = [{'name': 'group-name', - 'value': [group_name]}] - if not vpc_id and CONF.disable_ec2_classic: - vpc_id = ec2utils.get_default_vpc(context)['id'] - if vpc_id and group_name != vpc_id: - filter.append({'name': 'vpc-id', - 'value': [vpc_id]}) - security_groups = describe_security_groups( - context, filter=filter)['securityGroupInfo'] - if not vpc_id: - # TODO(andrey-mp): remove it when fitering by None will be implemented - security_groups = [sg for sg in security_groups - if sg.get('vpcId') is None] - if security_groups: - raise exception.InvalidGroupDuplicate(name=group_name) - return _create_security_group(context, group_name, group_description, - vpc_id) - - -def _create_security_group(context, group_name, group_description, - vpc_id=None, default=False): - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - try: - secgroup_body = ( - {'security_group': {'name': group_name, - 'description': group_description}}) - os_security_group = neutron.create_security_group( - secgroup_body)['security_group'] - except neutron_exception.OverQuotaClient: - raise exception.ResourceLimitExceeded(resource='security groups') - cleaner.addCleanup(neutron.delete_security_group, - os_security_group['id']) - if vpc_id: - # NOTE(Alex) Check if such vpc exists - ec2utils.get_db_item(context, vpc_id) - item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']} - if not default: - security_group = db_api.add_item(context, 'sg', item) - else: - item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg') - # NOTE(andrey-mp): try to add item with specific id - # and catch exception if it exists - security_group = db_api.restore_item(context, 'sg', item) - return {'return': 'true', - 'groupId': security_group['id']} - - -def _create_default_security_group(context, vpc): - # NOTE(Alex): OpenStack doesn't allow creation of another group - # named 'default' hence vpc-id is used. - try: - sg_id = _create_security_group(context, vpc['id'], - 'Default VPC security group', vpc['id'], - default=True)['groupId'] - except (exception.EC2DBDuplicateEntry, exception.InvalidVpcIDNotFound): - # NOTE(andrey-mp): when this thread tries to recreate default group - # but another thread tries to delete vpc we should pass vpc not found - LOG.exception('Failed to create default security group.') - return None - return sg_id - - -def delete_security_group(context, group_name=None, group_id=None, - delete_default=False): - if group_name is None and group_id is None: - raise exception.MissingParameter(param='group id or name') - security_group_engine.delete_group(context, group_name, group_id, - delete_default) - return True - - -class SecurityGroupDescriber(common.TaggableItemsDescriber): - - KIND = 'sg' - FILTER_MAP = {'description': 'groupDescription', - 'group-id': 'groupId', - 'group-name': 'groupName', - 'ip-permission.cidr': ['ipPermissions', - ['ipRanges', 'cidrIp']], - 'ip-permission.from-port': ['ipPermissions', 'fromPort'], - 'ip-permission.group-id': ['ipPermissions', - ['groups', 'groupId']], - 'ip-permission.group-name': ['ipPermissions', - ['groups', 'groupName']], - 'ip-permission.protocol': ['ipPermissions', 'ipProtocol'], - 'ip-permission.to-port': ['ipPermissions', 'toPort'], - 'ip-permission.user-id': ['ipPermissions', - ['groups', 'userId']], - 'owner-id': 'ownerId', - 'vpc-id': 'vpcId', - } - - def __init__(self, default_vpc_id): - super(SecurityGroupDescriber, self).__init__() - self.all_db_items = None - self.default_vpc_id = default_vpc_id - - def format(self, item=None, os_item=None): - return _format_security_group(item, os_item, - self.all_db_items, self.os_items) - - def get_os_items(self): - if self.all_db_items is None: - self.all_db_items = db_api.get_items(self.context, 'sg') - os_groups = security_group_engine.get_os_groups(self.context) - if self.check_and_repair_default_groups(os_groups, self.all_db_items): - self.all_db_items = db_api.get_items(self.context, 'sg') - self.items = self.get_db_items() - os_groups = security_group_engine.get_os_groups(self.context) - for os_group in os_groups: - os_group['name'] = _translate_group_name(self.context, - os_group, - self.all_db_items) - return os_groups - - def check_and_repair_default_groups(self, os_groups, db_groups): - vpcs = ec2utils.get_db_items(self.context, 'vpc', None) - os_groups_dict = {g['name']: g['id'] for g in os_groups} - db_groups_dict = {g['os_id']: g['vpc_id'] for g in db_groups} - had_to_repair = False - for vpc in vpcs: - os_group = os_groups_dict.get(vpc['id']) - if os_group: - db_group = db_groups_dict.get(os_group) - if db_group and db_group == vpc['id']: - continue - result = _create_default_security_group(self.context, vpc) - if result: - had_to_repair = True - return had_to_repair - - def is_selected_item(self, context, os_item_name, item): - if item and item['id'] in self.ids: - return True - if os_item_name in self.names: - if not CONF.disable_ec2_classic: - return (not item or not item['vpc_id']) - else: - return (self.default_vpc_id and item and - item['vpc_id'] == self.default_vpc_id) - return False - - -def describe_security_groups(context, group_name=None, group_id=None, - filter=None): - default_vpc_id = None - default_vpc = ec2utils.check_and_create_default_vpc(context) - if default_vpc: - default_vpc_id = default_vpc['id'] - formatted_security_groups = SecurityGroupDescriber( - default_vpc_id).describe(context, group_id, group_name, filter) - return {'securityGroupInfo': formatted_security_groups} - - -# TODO(Alex) cidr/ports/protocol/source_group should be possible -# to pass in root set of parameters, not in ip_permissions as now only -# supported, for authorize and revoke functions. -# The new parameters appeared only in the very recent version of AWS doc. -# API version 2014-06-15 didn't claim support of it. - -def authorize_security_group_ingress(context, group_id=None, - group_name=None, ip_permissions=None): - if group_name and not group_id and CONF.disable_ec2_classic: - sg = describe_security_groups( - context, - group_name=[group_name])['securityGroupInfo'][0] - group_id = sg['groupId'] - group_name = None - return _authorize_security_group(context, group_id, group_name, - ip_permissions, 'ingress') - - -def authorize_security_group_egress(context, group_id, ip_permissions=None): - security_group = ec2utils.get_db_item(context, group_id) - if not security_group.get('vpc_id'): - raise exception.InvalidParameterValue(message=_('Only Amazon VPC ' - 'security groups may be used with this operation.')) - return _authorize_security_group(context, group_id, None, - ip_permissions, 'egress') - - -def _authorize_security_group(context, group_id, group_name, - ip_permissions, direction): - rules_bodies = _build_rules(context, group_id, group_name, - ip_permissions, direction) - for rule_body in rules_bodies: - security_group_engine.authorize_security_group(context, rule_body) - return True - - -def _validate_parameters(protocol, from_port, to_port): - if (not isinstance(protocol, int) and - protocol not in ['tcp', 'udp', 'icmp']): - raise exception.InvalidParameterValue( - _('Invalid value for IP protocol. Unknown protocol.')) - if (not isinstance(from_port, int) or - not isinstance(to_port, int)): - raise exception.InvalidParameterValue( - _('Integer values should be specified for ports')) - if protocol in ['tcp', 'udp', 6, 17]: - if from_port == -1 or to_port == -1: - raise exception.InvalidParameterValue( - _('Must specify both from and to ports with TCP/UDP.')) - if from_port > to_port: - raise exception.InvalidParameterValue( - _('Invalid TCP/UDP port range.')) - if from_port < 0 or from_port > 65535: - raise exception.InvalidParameterValue( - _('TCP/UDP from port is out of range.')) - if to_port < 0 or to_port > 65535: - raise exception.InvalidParameterValue( - _('TCP/UDP to port is out of range.')) - elif protocol in ['icmp', 1]: - if from_port < -1 or from_port > 255: - raise exception.InvalidParameterValue( - _('ICMP type is out of range.')) - if to_port < -1 or to_port > 255: - raise exception.InvalidParameterValue( - _('ICMP code is out of range.')) - - -def _build_rules(context, group_id, group_name, ip_permissions, direction): - if group_name is None and group_id is None: - raise exception.MissingParameter(param='group id or name') - if ip_permissions is None: - raise exception.MissingParameter(param='source group or cidr') - os_security_group_id = security_group_engine.get_group_os_id(context, - group_id, - group_name) - os_security_group_rule_bodies = [] - if ip_permissions is None: - ip_permissions = [] - for rule in ip_permissions: - os_security_group_rule_body = ( - {'security_group_id': os_security_group_id, - 'direction': direction, - 'ethertype': 'IPv4'}) - protocol = rule.get('ip_protocol', -1) - from_port = rule.get('from_port', -1) - to_port = rule.get('to_port', -1) - _validate_parameters(protocol, from_port, to_port) - if protocol != -1: - os_security_group_rule_body['protocol'] = rule['ip_protocol'] - if from_port != -1: - os_security_group_rule_body['port_range_min'] = rule['from_port'] - if to_port != -1: - os_security_group_rule_body['port_range_max'] = rule['to_port'] - # NOTE(Dmitry_Eremeev): Neutron behaviour changed. - # If rule with full port range is created (1 - 65535), then Neutron - # creates rule without ports specified. - # If a rule with full port range must be deleted, then Neutron cannot - # find a rule with this range in order to delete it, but it can find - # a rule which has not ports in its properties. - if ((from_port == 1) and (to_port in [255, 65535])): - for item in ['port_range_min', 'port_range_max']: - del os_security_group_rule_body[item] - # TODO(Alex) AWS protocol claims support of multiple groups and cidrs, - # however, neutron doesn't support it at the moment. - # It's possible in the future to convert list values incoming from - # REST API into several neutron rules and squeeze them back into one - # for describing. - # For now only 1 value is supported for either. - if rule.get('groups'): - os_security_group_rule_body['remote_group_id'] = ( - security_group_engine.get_group_os_id( - context, - rule['groups'][0].get('group_id'), - rule['groups'][0].get('group_name'))) - elif rule.get('ip_ranges'): - os_security_group_rule_body['remote_ip_prefix'] = ( - rule['ip_ranges'][0]['cidr_ip']) - validator.validate_cidr_with_ipv6( - os_security_group_rule_body['remote_ip_prefix'], 'cidr_ip') - else: - raise exception.MissingParameter(param='source group or cidr') - os_security_group_rule_bodies.append(os_security_group_rule_body) - return os_security_group_rule_bodies - - -def revoke_security_group_ingress(context, group_id=None, - group_name=None, ip_permissions=None): - return _revoke_security_group(context, group_id, group_name, - ip_permissions, 'ingress') - - -def revoke_security_group_egress(context, group_id, ip_permissions=None): - security_group = ec2utils.get_db_item(context, group_id) - if not security_group.get('vpc_id'): - raise exception.InvalidParameterValue(message=_('Only Amazon VPC ' - 'security groups may be used with this operation.')) - return _revoke_security_group(context, group_id, None, - ip_permissions, 'egress') - - -def _are_identical_rules(rule1, rule2): - - def significant_values(rule): - dict = {} - for key, value in rule.items(): - if (value is not None and value != -1 and - value != '0.0.0.0/0' and - key not in ['id', 'tenant_id', 'security_group_id', 'tags', - 'description', 'revision', 'revision_number', - 'created_at', 'updated_at', 'project_id']): - dict[key] = str(value) - return dict - - r1 = significant_values(rule1) - r2 = significant_values(rule2) - return r1 == r2 - - -def _revoke_security_group(context, group_id, group_name, ip_permissions, - direction): - rules_bodies = _build_rules(context, group_id, group_name, - ip_permissions, direction) - if not rules_bodies: - return True - os_rules = security_group_engine.get_os_group_rules( - context, rules_bodies[0]['security_group_id']) - - os_rules_to_delete = [] - for rule_body in rules_bodies: - for os_rule in os_rules: - if _are_identical_rules(rule_body, os_rule): - os_rules_to_delete.append(os_rule['id']) - - if len(os_rules_to_delete) != len(rules_bodies): - security_group = ec2utils.get_db_item(context, group_id) - if security_group.get('vpc_id'): - raise exception.InvalidPermissionNotFound() - return True - for os_rule_id in os_rules_to_delete: - security_group_engine.delete_os_group_rule(context, os_rule_id) - return True - - -def _translate_group_name(context, os_group, db_groups): - # NOTE(Alex): This function translates VPC default group names - # from vpc id 'vpc-xxxxxxxx' format to 'default'. It's supposed - # to be called right after getting security groups from OpenStack - # in order to avoid problems with incoming 'default' name value - # in all of the subsequent handling (filtering, using in parameters...) - if os_group['name'].startswith('vpc-') and db_groups: - db_group = next((g for g in db_groups - if g['os_id'] == os_group['id']), None) - if db_group and db_group.get('vpc_id'): - return DEFAULT_GROUP_NAME - return os_group['name'] - - -def _format_security_groups_ids_names(context): - neutron = clients.neutron(context) - os_security_groups = neutron.list_security_groups( - tenant_id=context.project_id)['security_groups'] - security_groups = db_api.get_items(context, 'sg') - ec2_security_groups = {} - for os_security_group in os_security_groups: - security_group = next((g for g in security_groups - if g['os_id'] == os_security_group['id']), None) - if security_group is None: - continue - ec2_security_groups[os_security_group['id']] = ( - {'groupId': security_group['id'], - 'groupName': _translate_group_name(context, - os_security_group, - security_groups)}) - return ec2_security_groups - - -def _format_security_group(security_group, os_security_group, - security_groups, os_security_groups): - ec2_security_group = {} - ec2_security_group['groupId'] = security_group['id'] - if security_group.get('vpc_id'): - ec2_security_group['vpcId'] = security_group['vpc_id'] - ec2_security_group['ownerId'] = os_security_group['tenant_id'] - ec2_security_group['groupName'] = os_security_group['name'] - ec2_security_group['groupDescription'] = os_security_group['description'] - ingress_permissions = [] - egress_permissions = [] - for os_rule in os_security_group.get('security_group_rules', []): - # NOTE(Alex) We're skipping IPv6 rules because AWS doesn't support - # them. - if os_rule.get('ethertype', 'IPv4') == 'IPv6': - continue - # NOTE(Dmitry_Eremeev): Neutron behaviour changed. - # If rule with full port range (except icmp protocol) is created - # (1 - 65535), then Neutron creates rule without ports specified. - # Ports passed for rule creation don't match ports in created rule. - # That's why default values were changed to match full port - # range (1 - 65535) - if os_rule.get('protocol') in ["icmp", 1]: - min_port = max_port = -1 - else: - min_port = 1 - max_port = 65535 - ec2_rule = {'ipProtocol': -1 if os_rule['protocol'] is None - else os_rule['protocol'], - 'fromPort': min_port if os_rule['port_range_min'] is None - else os_rule['port_range_min'], - 'toPort': max_port if os_rule['port_range_max'] is None - else os_rule['port_range_max']} - remote_group_id = os_rule['remote_group_id'] - if remote_group_id is not None: - ec2_remote_group = {} - db_remote_group = next((g for g in security_groups - if g['os_id'] == remote_group_id), None) - if db_remote_group is not None: - ec2_remote_group['groupId'] = db_remote_group['id'] - else: - # TODO(Alex) Log absence of remote_group - pass - os_remote_group = next((g for g in os_security_groups - if g['id'] == remote_group_id), None) - if os_remote_group is not None: - ec2_remote_group['groupName'] = os_remote_group['name'] - ec2_remote_group['userId'] = os_remote_group['tenant_id'] - else: - # TODO(Alex) Log absence of remote_group - pass - ec2_rule['groups'] = [ec2_remote_group] - elif os_rule['remote_ip_prefix'] is not None: - ec2_rule['ipRanges'] = [{'cidrIp': os_rule['remote_ip_prefix']}] - if os_rule.get('direction') == 'egress': - egress_permissions.append(ec2_rule) - else: - if security_group is None and os_rule['protocol'] is None: - for protocol, min_port, max_port in (('icmp', -1, -1), - ('tcp', 1, 65535), - ('udp', 1, 65535)): - ec2_rule['ipProtocol'] = protocol - ec2_rule['fromPort'] = min_port - ec2_rule['toPort'] = max_port - ingress_permissions.append(copy.deepcopy(ec2_rule)) - else: - ingress_permissions.append(ec2_rule) - - ec2_security_group['ipPermissions'] = ingress_permissions - ec2_security_group['ipPermissionsEgress'] = egress_permissions - return ec2_security_group - - -class SecurityGroupEngineNeutron(object): - - def delete_group(self, context, group_name=None, group_id=None, - delete_default=False): - neutron = clients.neutron(context) - if group_name: - sg = describe_security_groups( - context, - group_name=[group_name])['securityGroupInfo'][0] - group_id = sg['groupId'] - group_name = None - - security_group = ec2utils.get_db_item(context, group_id) - try: - if not delete_default: - os_security_group = neutron.show_security_group( - security_group['os_id']) - if (os_security_group and - os_security_group['security_group']['name'] == - security_group['vpc_id']): - raise exception.CannotDelete() - neutron.delete_security_group(security_group['os_id']) - except neutron_exception.Conflict as ex: - # TODO(Alex): Instance ID is unknown here, report exception message - # in its place - looks readable. - raise exception.DependencyViolation( - obj1_id=group_id, - obj2_id=ex.message) - except neutron_exception.NeutronClientException as ex: - # TODO(Alex): do log error - # TODO(Alex): adjust caught exception classes to catch: - # the port doesn't exist - pass - db_api.delete_item(context, group_id) - - def get_os_groups(self, context): - neutron = clients.neutron(context) - return neutron.list_security_groups( - tenant_id=context.project_id)['security_groups'] - - def authorize_security_group(self, context, rule_body): - neutron = clients.neutron(context) - try: - os_security_group_rule = neutron.create_security_group_rule( - {'security_group_rule': rule_body})['security_group_rule'] - except neutron_exception.OverQuotaClient: - raise exception.RulesPerSecurityGroupLimitExceeded() - except neutron_exception.Conflict as ex: - raise exception.InvalidPermissionDuplicate() - - def get_os_group_rules(self, context, os_id): - neutron = clients.neutron(context) - os_security_group = ( - neutron.show_security_group(os_id)['security_group']) - return os_security_group.get('security_group_rules') - - def delete_os_group_rule(self, context, os_id): - neutron = clients.neutron(context) - neutron.delete_security_group_rule(os_id) - - def get_group_os_id(self, context, group_id, group_name): - if group_name and not group_id: - os_group = self.get_os_group_by_name(context, group_name) - return str(os_group['id']) - return ec2utils.get_db_item(context, group_id, 'sg')['os_id'] - - def get_os_group_by_name(self, context, group_name, - os_security_groups=None): - if os_security_groups is None: - neutron = clients.neutron(context) - os_security_groups = ( - neutron.list_security_groups()['security_groups']) - os_group = next((g for g in os_security_groups - if g['name'] == group_name), None) - if os_group is None: - raise exception.InvalidGroupNotFound(id=group_name) - return os_group - - -security_group_engine = get_security_group_engine() diff --git a/ec2api/api/snapshot.py b/ec2api/api/snapshot.py deleted file mode 100644 index 7ab23162..00000000 --- a/ec2api/api/snapshot.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cinderclient import exceptions as cinder_exception - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -"""Snapshot related API implementation -""" - - -Validator = common.Validator - - -def create_snapshot(context, volume_id, description=None): - volume = ec2utils.get_db_item(context, volume_id) - cinder = clients.cinder(context) - os_volume = cinder.volumes.get(volume['os_id']) - # NOTE(ft): Easy fix to allow snapshot creation in statuses other than - # AVAILABLE without cinder modifications. Potential race condition - # though. Seems arguably non-fatal. - if os_volume.status not in ['available', 'in-use', - 'attaching', 'detaching']: - msg = (_("'%s' is not in a state where snapshots are allowed.") % - volume_id) - raise exception.IncorrectState(reason=msg) - with common.OnCrashCleaner() as cleaner: - os_snapshot = cinder.volume_snapshots.create(os_volume.id, True) - cleaner.addCleanup(os_snapshot.delete) - snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id}) - cleaner.addCleanup(db_api.delete_item, context, snapshot['id']) - os_snapshot.update(display_name=snapshot['id'], - display_description=description) - # NOTE(andrey-mp): to re-read description in version dependent format - os_snapshot.get() - - return _format_snapshot(context, snapshot, os_snapshot, - volume_id=volume_id) - - -def delete_snapshot(context, snapshot_id): - snapshot = ec2utils.get_db_item(context, snapshot_id) - cinder = clients.cinder(context) - try: - cinder.volume_snapshots.delete(snapshot['os_id']) - except cinder_exception.NotFound: - pass - # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud - # It will be deleted by describer in the future - return True - - -class SnapshotDescriber(common.TaggableItemsDescriber): - - KIND = 'snap' - SORT_KEY = 'snapshotId' - FILTER_MAP = {'description': 'description', - 'owner-id': 'ownerId', - 'progress': 'progress', - 'snapshot-id': 'snapshotId', - 'start-time': 'startTime', - 'status': 'status', - 'volume-id': 'volumeId', - 'volume-size': 'volumeSize'} - - def format(self, snapshot, os_snapshot): - return _format_snapshot(self.context, snapshot, os_snapshot, - self.volumes) - - def get_db_items(self): - self.volumes = {vol['os_id']: vol - for vol in db_api.get_items(self.context, 'vol')} - return super(SnapshotDescriber, self).get_db_items() - - def get_os_items(self): - return clients.cinder(self.context).volume_snapshots.list() - - def get_name(self, os_item): - return '' - - -def describe_snapshots(context, snapshot_id=None, owner=None, - restorable_by=None, filter=None, - max_results=None, next_token=None): - if snapshot_id and max_results: - msg = _('The parameter snapshotSet cannot be used with the parameter ' - 'maxResults') - raise exception.InvalidParameterCombination(msg) - - snapshot_describer = SnapshotDescriber() - formatted_snapshots = snapshot_describer.describe( - context, ids=snapshot_id, filter=filter, - max_results=max_results, next_token=next_token) - result = {'snapshotSet': formatted_snapshots} - if snapshot_describer.next_token: - result['nextToken'] = snapshot_describer.next_token - return result - - -def _format_snapshot(context, snapshot, os_snapshot, volumes={}, - volume_id=None): - # NOTE(mikal): this is just a set of strings in cinder. If they - # implement an enum, then we should move this code to use it. The - # valid ec2 statuses are "pending", "completed", and "error". - status_map = {'new': 'pending', - 'creating': 'pending', - 'available': 'completed', - 'active': 'completed', - 'deleting': 'pending', - 'deleted': None, - 'error': 'error'} - - mapped_status = status_map.get(os_snapshot.status, os_snapshot.status) - if not mapped_status: - return None - - if not volume_id and os_snapshot.volume_id: - volume = ec2utils.get_db_item_by_os_id( - context, 'vol', os_snapshot.volume_id, volumes) - volume_id = volume['id'] - - # NOTE(andrey-mp): ownerId and progress are empty in just created snapshot - ownerId = os_snapshot.project_id - if not ownerId: - ownerId = context.project_id - progress = os_snapshot.progress - if not progress: - progress = '0%' - description = (getattr(os_snapshot, 'description', None) or - getattr(os_snapshot, 'display_description', None)) - return {'snapshotId': snapshot['id'], - 'volumeId': volume_id, - 'status': mapped_status, - 'startTime': os_snapshot.created_at, - 'progress': progress, - 'ownerId': ownerId, - 'volumeSize': os_snapshot.size, - 'description': description} diff --git a/ec2api/api/subnet.py b/ec2api/api/subnet.py deleted file mode 100644 index 04950f37..00000000 --- a/ec2api/api/subnet.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import netaddr -from neutronclient.common import exceptions as neutron_exception -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import network_interface as network_interface_api -from ec2api.api import route_table as route_table_api -from ec2api.api import vpn_gateway as vpn_gateway_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -"""Subnet related API implementation -""" - - -Validator = common.Validator - - -def create_subnet(context, vpc_id, cidr_block, - availability_zone=None): - vpc = ec2utils.get_db_item(context, vpc_id) - vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block']) - subnet_ipnet = netaddr.IPNetwork(cidr_block) - if subnet_ipnet not in vpc_ipnet: - raise exception.InvalidSubnetRange(cidr_block=cidr_block) - - main_route_table = db_api.get_item_by_id(context, vpc['route_table_id']) - (host_routes, - gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip( - context, main_route_table, cidr_block) - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - # NOTE(andrey-mp): set fake name to filter networks in instance api - os_network_body = {'network': {'name': 'subnet-0'}} - try: - os_network = neutron.create_network(os_network_body)['network'] - cleaner.addCleanup(neutron.delete_network, os_network['id']) - # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for - # OpenStack we decided not to support this as compatibility. - os_subnet_body = {'subnet': {'network_id': os_network['id'], - 'ip_version': '4', - 'cidr': cidr_block, - 'host_routes': host_routes}} - os_subnet = neutron.create_subnet(os_subnet_body)['subnet'] - cleaner.addCleanup(neutron.delete_subnet, os_subnet['id']) - except neutron_exception.OverQuotaClient: - raise exception.SubnetLimitExceeded() - try: - neutron.add_interface_router(vpc['os_id'], - {'subnet_id': os_subnet['id']}) - except neutron_exception.BadRequest: - raise exception.InvalidSubnetConflict(cidr_block=cidr_block) - cleaner.addCleanup(neutron.remove_interface_router, - vpc['os_id'], {'subnet_id': os_subnet['id']}) - subnet = db_api.add_item(context, 'subnet', - {'os_id': os_subnet['id'], - 'vpc_id': vpc['id']}) - cleaner.addCleanup(db_api.delete_item, context, subnet['id']) - vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner, - subnet, vpc, main_route_table) - neutron.update_network(os_network['id'], - {'network': {'name': subnet['id']}}) - # NOTE(ft): In some cases we need gateway_ip to be None (see - # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet - # creation to allow automatic configuration of the default port by - # which subnet is attached to the router. - neutron.update_subnet(os_subnet['id'], - {'subnet': {'name': subnet['id'], - 'gateway_ip': gateway_ip}}) - os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] - return {'subnet': _format_subnet(context, subnet, os_subnet, - os_network, os_ports)} - - -def delete_subnet(context, subnet_id): - subnet = ec2utils.get_db_item(context, subnet_id) - vpc = db_api.get_item_by_id(context, subnet['vpc_id']) - network_interfaces = network_interface_api.describe_network_interfaces( - context, - filter=[{'name': 'subnet-id', - 'value': [subnet_id]}])['networkInterfaceSet'] - if network_interfaces: - msg = _("The subnet '%(subnet_id)s' has dependencies and " - "cannot be deleted.") % {'subnet_id': subnet_id} - raise exception.DependencyViolation(msg) - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - db_api.delete_item(context, subnet['id']) - cleaner.addCleanup(db_api.restore_item, context, 'subnet', subnet) - vpn_gateway_api._stop_vpn_in_subnet(context, neutron, cleaner, subnet) - try: - neutron.remove_interface_router(vpc['os_id'], - {'subnet_id': subnet['os_id']}) - except neutron_exception.NotFound: - pass - cleaner.addCleanup(neutron.add_interface_router, - vpc['os_id'], - {'subnet_id': subnet['os_id']}) - try: - os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] - except neutron_exception.NotFound: - pass - else: - try: - neutron.delete_network(os_subnet['network_id']) - except neutron_exception.NetworkInUseClient as ex: - LOG.warning('Failed to delete network %(os_id)s during ' - 'deleting Subnet %(id)s. Reason: %(reason)s', - {'id': subnet['id'], - 'os_id': os_subnet['network_id'], - 'reason': ex.message}) - - return True - - -class SubnetDescriber(common.TaggableItemsDescriber): - - KIND = 'subnet' - FILTER_MAP = {'available-ip-address-count': 'availableIpAddressCount', - 'cidr': 'cidrBlock', - 'cidrBlock': 'cidrBlock', - 'cidr-block': 'cidrBlock', - 'subnet-id': 'subnetId', - 'state': 'state', - 'vpc-id': 'vpcId'} - - def format(self, subnet, os_subnet): - if not subnet: - return None - os_network = next((n for n in self.os_networks - if n['id'] == os_subnet['network_id']), - None) - if not os_network: - self.delete_obsolete_item(subnet) - return None - return _format_subnet(self.context, subnet, os_subnet, os_network, - self.os_ports) - - def get_name(self, os_item): - return '' - - def get_os_items(self): - neutron = clients.neutron(self.context) - self.os_networks = neutron.list_networks( - tenant_id=self.context.project_id)['networks'] - self.os_ports = neutron.list_ports( - tenant_id=self.context.project_id)['ports'] - return neutron.list_subnets( - tenant_id=self.context.project_id)['subnets'] - - -def describe_subnets(context, subnet_id=None, filter=None): - ec2utils.check_and_create_default_vpc(context) - formatted_subnets = SubnetDescriber().describe(context, ids=subnet_id, - filter=filter) - return {'subnetSet': formatted_subnets} - - -def _format_subnet(context, subnet, os_subnet, os_network, os_ports): - status_map = {'ACTIVE': 'available', - 'BUILD': 'pending', - 'DOWN': 'available', - 'ERROR': 'available'} - cidr_range = int(os_subnet['cidr'].split('/')[1]) - # NOTE(Alex) First and last IP addresses are system ones. - ip_count = pow(2, 32 - cidr_range) - 2 - # TODO(Alex): Probably performance-killer. Will have to optimize. - service_ports = ['network:dhcp', 'network:distributed'] - service_port_accounted = False - for port in os_ports: - for fixed_ip in port.get('fixed_ips', []): - if fixed_ip['subnet_id'] == os_subnet['id']: - ip_count -= 1 - if port['device_owner'] in service_ports: - service_port_accounted = True - if not service_port_accounted: - ip_count -= 1 - return { - 'subnetId': subnet['id'], - 'state': status_map.get(os_network['status'], 'available'), - 'vpcId': subnet['vpc_id'], - 'cidrBlock': os_subnet['cidr'], - 'defaultForAz': 'false', - 'mapPublicIpOnLaunch': 'false', - 'availableIpAddressCount': ip_count - } diff --git a/ec2api/api/tag.py b/ec2api/api/tag.py deleted file mode 100644 index dbe8081b..00000000 --- a/ec2api/api/tag.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -"""Tag related API implementation -""" - - -Validator = common.Validator - - -RESOURCE_TYPES = { - 'dopt': 'dhcp-options', - 'ami': 'image', - 'aki': 'image', - 'ari': 'image', - 'cgw': 'customer-gateway', - 'i': 'instance', - 'igw': 'internet-gateway', - 'eni': 'network-interface', - 'rtb': 'route-table', - 'snap': 'snapshot', - 'subnet': 'subnet', - 'sg': 'security-group', - 'vgw': 'vpn-gateway', - 'vol': 'volume', - 'vpc': 'vpc', - 'vpn': 'vpn-connection', -} - - -def create_tags(context, resource_id, tag): - reason = None - for tag_pair in tag: - if not tag_pair.get('key'): - reason = _('Not empty key must be present') - elif len(tag_pair['key']) > 127: - reason = _('Tag key exceeds the maximum length of 127 characters') - elif tag_pair['key'].startswith('aws:'): - reason = _("Tag keys starting with 'aws:' are reserved for " - "internal use") - elif 'value' not in tag_pair: - reason = _('Value must be present') - elif len(tag_pair['value']) > 255: - reason = _('Tag value exceeds the maximum length of 255 ' - 'characters') - if reason: - raise exception.InvalidParameterValue( - parameter='Tag', value=str(tag_pair), reason=reason) - - for item_id in resource_id: - kind = ec2utils.get_ec2_id_kind(item_id) - if kind not in RESOURCE_TYPES: - raise exception.InvalidID(id=item_id) - # NOTE(ft): check items exist (excluding images because AWS allows to - # create a tag with any image id) - if kind not in ('ami', 'ari', 'aki'): - ec2utils.get_db_item(context, item_id) - - tags = [dict(item_id=item_id, - key=tag_pair['key'], - value=tag_pair['value']) - for item_id in resource_id - for tag_pair in tag] - - db_api.add_tags(context, tags) - return True - - -def delete_tags(context, resource_id, tag=None): - db_api.delete_tags(context, resource_id, tag) - return True - - -class TagDescriber(common.NonOpenstackItemsDescriber): - - SORT_KEY = 'key' - FILTER_MAP = {'key': 'key', - 'tag-key': 'key', - 'resource-id': 'resourceId', - 'resource-type': 'resourceType', - 'value': 'value', - 'tag-value': 'value'} - - def get_db_items(self): - return db_api.get_tags(self.context) - - def format(self, item): - return _format_tag(item) - - -def describe_tags(context, filter=None, max_results=None, next_token=None): - tag_describer = TagDescriber() - formatted_tags = tag_describer.describe( - context, filter=filter, max_results=max_results, next_token=next_token) - result = {'tagSet': formatted_tags} - if tag_describer.next_token: - result['nextToken'] = tag_describer.next_token - return result - - -def _format_tag(tag): - kind = ec2utils.get_ec2_id_kind(tag['item_id']) - return { - 'resourceType': RESOURCE_TYPES.get(kind, kind), - 'resourceId': tag['item_id'], - 'key': tag['key'], - 'value': tag['value'], - } diff --git a/ec2api/api/validator.py b/ec2api/api/validator.py deleted file mode 100644 index a7db7ff8..00000000 --- a/ec2api/api/validator.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -import netaddr -from oslo_log import log as logging - -from ec2api import exception -from ec2api.i18n import _ - - -LOG = logging.getLogger(__name__) - - -def validate_str(val, parameter_name, max_length=None): - if (isinstance(val, str) and - (max_length is None or max_length and len(val) <= max_length)): - return True - raise exception.ValidationError( - reason=_("%s should not be greater " - "than 255 characters.") % parameter_name) - - -def validate_bool(val, parameter_name): - if isinstance(val, bool): - return True - raise exception.ValidationError( - reason=_("Expected a boolean value for parameter %s") % parameter_name) - - -def validate_int(val, parameter_name): - if isinstance(val, int): - return True - raise exception.ValidationError( - reason=(_("Expected an integer value for parameter %s") % - parameter_name)) - - -def validate_list(items, parameter_name): - if not isinstance(items, list): - raise exception.InvalidParameterValue( - value=items, - parameter=parameter_name, - reason='Expected a list here') - - -def _is_valid_cidr(address): - """Check if address is valid - - The provided address can be a IPv6 or a IPv4 - CIDR address. - """ - try: - # Validate the correct CIDR Address - netaddr.IPNetwork(address) - except netaddr.core.AddrFormatError: - return False - except UnboundLocalError: - # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in - # https://github.com/drkjam/netaddr/issues/2) - return False - - # Prior validation partially verify /xx part - # Verify it here - ip_segment = address.split('/') - - if (len(ip_segment) <= 1 or - ip_segment[1] == ''): - return False - - return True - - -def validate_cidr_with_ipv6(cidr, parameter_name, **kwargs): - invalid_format_exception = exception.InvalidParameterValue( - value=cidr, - parameter=parameter_name, - reason='This is not a valid CIDR block.') - if not _is_valid_cidr(cidr): - raise invalid_format_exception - return True - - -_cidr_re = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$") - - -def validate_cidr(cidr, parameter_name): - invalid_format_exception = exception.InvalidParameterValue( - value=cidr, - parameter=parameter_name, - reason='This is not a valid CIDR block.') - if not _cidr_re.match(cidr): - raise invalid_format_exception - address, size = cidr.split("/") - octets = address.split(".") - if any(int(octet) > 255 for octet in octets): - raise invalid_format_exception - size = int(size) - if size > 32: - raise invalid_format_exception - return True - - -def _validate_cidr_block(cidr): - validate_cidr(cidr, 'cidrBlock') - size = int(cidr.split("/")[-1]) - return size >= 16 and size <= 28 - - -def validate_vpc_cidr(cidr): - if not _validate_cidr_block(cidr): - raise exception.InvalidVpcRange(cidr_block=cidr) - - -def validate_subnet_cidr(cidr): - if not _validate_cidr_block(cidr): - raise exception.InvalidSubnetRange(cidr_block=cidr) - - -# NOTE(Alex) Unfortunately Amazon returns various kinds of error for invalid -# IDs (...ID.Malformed, ...Id.Malformed, ...ID.NotFound, InvalidParameterValue) -# So we decided here to commonize invalid IDs to InvalidParameterValue error. - -def validate_ec2_id(val, parameter_name, prefices): - try: - prefix, value = val.rsplit('-', 1) - int(value, 16) - if not prefices or prefix in prefices: - return True - except Exception: - pass - - if not prefices: - reason = _('Invalid EC2 id was specified.') - else: - reason = _('Expected: %(prefix)s-...') % {'prefix': prefices[0]} - raise exception.InvalidParameterValue( - value=val, parameter=parameter_name, reason=reason) - - -def validate_ec2_association_id(id, parameter_name, action): - if action == 'DisassociateAddress': - return validate_ec2_id(['eipassoc'])(id, parameter_name) - else: - return validate_ec2_id(['rtbassoc'])(id, parameter_name) - - -def validate_ipv4(address, parameter_name): - """Verify that address represents a valid IPv4 address.""" - try: - if netaddr.valid_ipv4(address): - return True - except Exception: - pass - raise exception.InvalidParameterValue( - value=address, parameter=parameter_name, - reason=_('Not a valid IP address')) - - -def validate_enum(value, allowed_values, parameter_name, allow_empty=False): - if value is None and allow_empty or value in allowed_values: - return True - raise exception.InvalidParameterValue( - value=value, parameter=parameter_name, - reason=_('Invalid parameter value specified')) - - -def validate_filter(filters): - for filter in filters: - if (not filter.get('name') or not filter.get('value') or - not isinstance(filter['value'], list)): - raise exception.InvalidFilter() - return True - - -def validate_key_value_dict_list(dict_list, parameter_name): - for dict in dict_list: - if not dict.get('key') or dict.get('value') is None: - raise exception.InvalidParameterValue( - value=dict, parameter=parameter_name, - reason=_('Expected list of key value dictionaries')) - return True - - -def validate_security_group_str(value, parameter_name, vpc_id=None): - # NOTE(Alex) Amazon accepts any ASCII for EC2 classic; - # for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* - if vpc_id: - allowed = r'^[a-zA-Z0-9\._\-:/\(\)#,@\[\]\+=&;\{\}!\$\*\ ]+$' - else: - allowed = r'^[\x20-\x7E]+$' - msg = '' - try: - val = value.strip() - except AttributeError: - msg = (_("Security group %s is not a string or unicode") % - parameter_name) - if not val: - msg = _("Security group %s cannot be empty.") % parameter_name - elif not re.match(allowed, val): - msg = (_("Specified value for parameter Group%(property)s is " - "invalid. Content limited to '%(allowed)s'.") % - {'allowed': 'allowed', - 'property': parameter_name}) - elif len(val) > 255: - msg = _("Security group %s should not be greater " - "than 255 characters.") % parameter_name - if msg: - raise exception.ValidationError(reason=msg) - return True - - -def validate_vpn_connection_type(value): - if value != 'ipsec.1': - raise exception.InvalidParameterValue( - value=type, parameter='type', - reason=_('Invalid VPN connection type.')) - return True diff --git a/ec2api/api/volume.py b/ec2api/api/volume.py deleted file mode 100644 index 2610e20e..00000000 --- a/ec2api/api/volume.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cinderclient import exceptions as cinder_exception -from novaclient import exceptions as nova_exception -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api import context as ec2_context -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -LOG = logging.getLogger(__name__) - - -"""Volume related API implementation -""" - - -Validator = common.Validator - - -def create_volume(context, availability_zone=None, size=None, - snapshot_id=None, volume_type=None, iops=None, - encrypted=None, kms_key_id=None, client_token=None): - - if client_token: - result = describe_volumes(context, - filter=[{'name': 'client-token', - 'value': [client_token]}]) - if result['volumeSet']: - if len(result['volumeSet']) > 1: - LOG.error('describe_volumes returns %s ' - 'volumes, but 1 is expected.', - len(result['volumeSet'])) - LOG.error('Requested client token: %s', client_token) - LOG.error('Result: %s', result) - return result['volumeSet'][0] - - if snapshot_id is not None: - snapshot = ec2utils.get_db_item(context, snapshot_id) - os_snapshot_id = snapshot['os_id'] - else: - os_snapshot_id = None - - cinder = clients.cinder(context) - with common.OnCrashCleaner() as cleaner: - os_volume = cinder.volumes.create( - size, snapshot_id=os_snapshot_id, volume_type=volume_type, - availability_zone=availability_zone) - cleaner.addCleanup(os_volume.delete) - - volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id}) - cleaner.addCleanup(db_api.delete_item, context, volume['id']) - os_volume.update(display_name=volume['id']) - - return _format_volume(context, volume, os_volume, snapshot_id=snapshot_id) - - -def attach_volume(context, volume_id, instance_id, device): - volume = ec2utils.get_db_item(context, volume_id) - instance = ec2utils.get_db_item(context, instance_id) - - nova = clients.nova(context) - try: - nova.volumes.create_server_volume(instance['os_id'], volume['os_id'], - device) - except (nova_exception.Conflict, nova_exception.BadRequest): - # TODO(andrey-mp): raise correct errors for different cases - LOG.exception('Attach has failed.') - raise exception.UnsupportedOperation() - cinder = clients.cinder(context) - os_volume = cinder.volumes.get(volume['os_id']) - attachment = _format_attachment(context, volume, os_volume, - instance_id=instance_id) - # NOTE(andrey-mp): nova sets deleteOnTermination=False for attached volume - attachment['deleteOnTermination'] = False - return attachment - - -def detach_volume(context, volume_id, instance_id=None, device=None, - force=None): - volume = ec2utils.get_db_item(context, volume_id) - - cinder = clients.cinder(context) - os_volume = cinder.volumes.get(volume['os_id']) - os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') - if not os_instance_id: - # TODO(ft): Change the message with the real AWS message - reason = _('Volume %(vol_id)s is not attached to anything') - raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) - - nova = clients.nova(context) - nova.volumes.delete_server_volume(os_instance_id, os_volume.id) - os_volume.get() - instance_id = next((i['id'] for i in db_api.get_items(context, 'i') - if i['os_id'] == os_instance_id), None) - return _format_attachment(context, volume, os_volume, - instance_id=instance_id) - - -def delete_volume(context, volume_id): - volume = ec2utils.get_db_item(context, volume_id) - cinder = clients.cinder(context) - try: - cinder.volumes.delete(volume['os_id']) - except cinder_exception.BadRequest: - # TODO(andrey-mp): raise correct errors for different cases - raise exception.UnsupportedOperation() - except cinder_exception.NotFound: - pass - # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud - # It will be deleted by describer in the future - return True - - -class VolumeDescriber(common.TaggableItemsDescriber): - - KIND = 'vol' - SORT_KEY = 'volumeId' - FILTER_MAP = { - 'availability-zone': 'availabilityZone', - 'client-token': 'clientToken', - 'create-time': 'createTime', - 'encrypted': 'encrypted', - 'size': 'size', - 'snapshot-id': 'snapshotId', - 'status': 'status', - 'volume-id': 'volumeId', - 'volume-type': 'volumeType', - 'attachment.delete-on-termination': - ['attachmentSet', 'deleteOnTermination'], - 'attachment.device': ['attachmentSet', 'device'], - 'attachment.instance-id': ['attachmentSet', 'instanceId'], - 'attachment.status': ['attachmentSet', 'status']} - - def format(self, volume, os_volume): - return _format_volume(self.context, volume, os_volume, - self.instances, self.os_instances, - self.snapshots) - - def get_db_items(self): - self.instances = {i['os_id']: i - for i in db_api.get_items(self.context, 'i')} - self.snapshots = {s['os_id']: s - for s in db_api.get_items(self.context, 'snap')} - return super(VolumeDescriber, self).get_db_items() - - def get_os_items(self): - nova = clients.nova(ec2_context.get_os_admin_context()) - os_instances = nova.servers.list( - search_opts={'all_tenants': True, - 'project_id': self.context.project_id}) - self.os_instances = {i.id: i for i in os_instances} - return clients.cinder(self.context).volumes.list() - - def get_name(self, os_item): - return '' - - -def describe_volumes(context, volume_id=None, filter=None, - max_results=None, next_token=None): - if volume_id and max_results: - msg = _('The parameter volumeSet cannot be used with the parameter ' - 'maxResults') - raise exception.InvalidParameterCombination(msg) - - volume_describer = VolumeDescriber() - formatted_volumes = volume_describer.describe( - context, ids=volume_id, filter=filter, - max_results=max_results, next_token=next_token) - result = {'volumeSet': formatted_volumes} - if volume_describer.next_token: - result['nextToken'] = volume_describer.next_token - return result - - -def _format_volume(context, volume, os_volume, instances={}, os_instances={}, - snapshots={}, snapshot_id=None): - valid_ec2_api_volume_status_map = { - 'reserved': 'in-use', - 'attaching': 'in-use', - 'detaching': 'in-use'} - - ec2_volume = { - 'volumeId': volume['id'], - 'status': valid_ec2_api_volume_status_map.get(os_volume.status, - os_volume.status), - 'size': os_volume.size, - 'availabilityZone': os_volume.availability_zone, - 'createTime': os_volume.created_at, - 'volumeType': os_volume.volume_type, - 'encrypted': os_volume.encrypted, - } - if ec2_volume['status'] == 'in-use': - ec2_volume['attachmentSet'] = ( - [_format_attachment(context, volume, os_volume, instances, - os_instances)]) - else: - ec2_volume['attachmentSet'] = {} - if snapshot_id is None and os_volume.snapshot_id: - snapshot = ec2utils.get_db_item_by_os_id( - context, 'snap', os_volume.snapshot_id, snapshots) - snapshot_id = snapshot['id'] - ec2_volume['snapshotId'] = snapshot_id - - return ec2_volume - - -def _format_attachment(context, volume, os_volume, instances={}, - os_instances={}, instance_id=None): - os_attachment = next(iter(os_volume.attachments), {}) - os_instance_id = os_attachment.get('server_id') - if not instance_id and os_instance_id: - instance = ec2utils.get_db_item_by_os_id( - context, 'i', os_instance_id, instances) - instance_id = instance['id'] - status = os_volume.status - if status == 'reserved': - status = 'attaching' - ec2_attachment = { - 'device': os_attachment.get('device'), - 'instanceId': instance_id, - 'status': (status - if status in ('attaching', 'detaching') else - 'attached' if os_attachment else 'detached'), - 'volumeId': volume['id']} - if os_instance_id in os_instances: - os_instance = os_instances[os_instance_id] - volumes_attached = getattr(os_instance, - 'os-extended-volumes:volumes_attached', []) - volume_attached = next((va for va in volumes_attached - if va['id'] == volume['os_id']), None) - if volume_attached and 'delete_on_termination' in volume_attached: - ec2_attachment['deleteOnTermination'] = ( - volume_attached['delete_on_termination']) - return ec2_attachment diff --git a/ec2api/api/vpc.py b/ec2api/api/vpc.py deleted file mode 100644 index 97ec5c23..00000000 --- a/ec2api/api/vpc.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from neutronclient.common import exceptions as neutron_exception -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import internet_gateway as internet_gateway_api -from ec2api.api import route_table as route_table_api -from ec2api.api import security_group as security_group_api -from ec2api.api import subnet as subnet_api -from ec2api.api import vpn_gateway as vpn_gateway_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -synchronized = lockutils.synchronized_with_prefix('ec2api-') - - -"""VPC-object related API implementation -""" - - -Validator = common.Validator - -DEFAULT_VPC_CIDR_BLOCK = '172.31.0.0/16' -DEFAULT_SUBNET_CIDR_BLOCK = '172.31.0.0/20' - - -def create_vpc(context, cidr_block, instance_tenancy='default'): - vpc = _create_vpc(context, cidr_block) - return {'vpc': _format_vpc(vpc)} - - -def delete_vpc(context, vpc_id): - vpc = ec2utils.get_db_item(context, vpc_id) - subnets = subnet_api.describe_subnets( - context, - filter=[{'name': 'vpc-id', 'value': [vpc_id]}])['subnetSet'] - internet_gateways = internet_gateway_api.describe_internet_gateways( - context, - filter=[{'name': 'attachment.vpc-id', - 'value': [vpc['id']]}])['internetGatewaySet'] - route_tables = route_table_api.describe_route_tables( - context, - filter=[{'name': 'vpc-id', 'value': [vpc['id']]}])['routeTableSet'] - security_groups = security_group_api.describe_security_groups( - context, - filter=[{'name': 'vpc-id', - 'value': [vpc['id']]}])['securityGroupInfo'] - vpn_gateways = vpn_gateway_api.describe_vpn_gateways( - context, - filter=[{'name': 'attachment.vpc-id', - 'value': [vpc['id']]}])['vpnGatewaySet'] - if (subnets or internet_gateways or len(route_tables) > 1 or - len(security_groups) > 1 or vpn_gateways): - msg = _("The vpc '%(vpc_id)s' has dependencies and " - "cannot be deleted.") - msg = msg % {'vpc_id': vpc['id']} - raise exception.DependencyViolation(msg) - - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - db_api.delete_item(context, vpc['id']) - cleaner.addCleanup(db_api.restore_item, context, 'vpc', vpc) - route_table_api._delete_route_table(context, vpc['route_table_id'], - cleaner=cleaner) - if len(security_groups) > 0: - security_group_api.delete_security_group( - context, group_id=security_groups[0]['groupId'], - delete_default=True) - try: - neutron.delete_router(vpc['os_id']) - except neutron_exception.Conflict as ex: - LOG.warning('Failed to delete router %(os_id)s during deleting ' - 'VPC %(id)s. Reason: %(reason)s', - {'id': vpc['id'], - 'os_id': vpc['os_id'], - 'reason': ex.message}) - except neutron_exception.NotFound: - pass - - return True - - -class VpcDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'vpc' - FILTER_MAP = {'cidr': 'cidrBlock', - 'dhcp-options-id': 'dhcpOptionsId', - 'is-default': 'isDefault', - 'state': 'state', - 'vpc-id': 'vpcId'} - - def format(self, item=None, os_item=None): - return _format_vpc(item) - - -def describe_vpcs(context, vpc_id=None, filter=None): - _check_and_create_default_vpc(context) - formatted_vpcs = VpcDescriber().describe( - context, ids=vpc_id, filter=filter) - return {'vpcSet': formatted_vpcs} - - -def _create_vpc(context, cidr_block, is_default=False): - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - os_router_body = {'router': {}} - try: - os_router = neutron.create_router(os_router_body)['router'] - except neutron_exception.OverQuotaClient: - raise exception.VpcLimitExceeded() - cleaner.addCleanup(neutron.delete_router, os_router['id']) - vpc = db_api.add_item(context, 'vpc', - {'os_id': os_router['id'], - 'cidr_block': cidr_block, - 'is_default': is_default}) - cleaner.addCleanup(db_api.delete_item, context, vpc['id']) - route_table = route_table_api._create_route_table(context, vpc) - cleaner.addCleanup(route_table_api._delete_route_table, - context, route_table['id']) - vpc['route_table_id'] = route_table['id'] - db_api.update_item(context, vpc) - neutron.update_router(os_router['id'], {'router': {'name': vpc['id']}}) - sg_id = security_group_api._create_default_security_group(context, vpc) - cleaner.addCleanup(security_group_api.delete_security_group, context, - group_id=sg_id, delete_default=True) - if is_default: - igw_id = internet_gateway_api.create_internet_gateway( - context)['internetGateway']['internetGatewayId'] - cleaner.addCleanup(internet_gateway_api.delete_internet_gateway, - context, igw_id) - internet_gateway_api.attach_internet_gateway(context, igw_id, - vpc['id']) - cleaner.addCleanup(internet_gateway_api.detach_internet_gateway, - context, igw_id, vpc['id']) - subnet = subnet_api.create_subnet( - context, vpc['id'], - DEFAULT_SUBNET_CIDR_BLOCK)['subnet'] - cleaner.addCleanup(subnet_api.delete_subnet, context, - subnet['subnetId']) - route_table_api.create_route(context, route_table['id'], - '0.0.0.0/0', gateway_id=igw_id) - return vpc - - -def _check_and_create_default_vpc(context): - if not CONF.disable_ec2_classic or context.is_os_admin: - return - - lock_name = 'default-vpc-lock-{}-'.format(context.project_id) - - @synchronized(lock_name, external=True) - def _check(): - for vpc in db_api.get_items(context, 'vpc'): - if vpc.get('is_default'): - return vpc - try: - default_vpc = _create_vpc(context, DEFAULT_VPC_CIDR_BLOCK, - is_default=True) - return default_vpc - except Exception: - LOG.exception('Failed to create default vpc') - return None - - return _check() - - -ec2utils.set_check_and_create_default_vpc(_check_and_create_default_vpc) - - -def _format_vpc(vpc): - return {'vpcId': vpc['id'], - 'state': "available", - 'cidrBlock': vpc['cidr_block'], - 'isDefault': vpc.get('is_default', False), - 'dhcpOptionsId': vpc.get('dhcp_options_id', 'default'), - } diff --git a/ec2api/api/vpn_connection.py b/ec2api/api/vpn_connection.py deleted file mode 100644 index 64ff0ae3..00000000 --- a/ec2api/api/vpn_connection.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random -import string - -from lxml import etree -import netaddr -from neutronclient.common import exceptions as neutron_exception -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -LOG = logging.getLogger(__name__) - - -"""VPN connections related API implementation -""" - - -Validator = common.Validator - - -SHARED_KEY_CHARS = string.ascii_letters + '_.' + string.digits -AWS_MSS = 1387 -MTU_MSS_DELTA = 40 # 20 byte IP and 20 byte TCP headers - - -def create_vpn_connection(context, customer_gateway_id, vpn_gateway_id, - type, options=None): - if not options or options.get('static_routes_only') is not True: - raise exception.Unsupported('BGP dynamic routing is unsupported') - customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) - vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) - vpn_connection = next( - (vpn for vpn in db_api.get_items(context, 'vpn') - if vpn['customer_gateway_id'] == customer_gateway_id), - None) - if vpn_connection: - if vpn_connection['vpn_gateway_id'] == vpn_gateway_id: - ec2_vpn_connections = describe_vpn_connections( - context, vpn_connection_id=[vpn_connection['id']]) - return { - 'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]} - else: - raise exception.InvalidCustomerGatewayDuplicateIpAddress() - neutron = clients.neutron(context) - with common.OnCrashCleaner() as cleaner: - os_ikepolicy = {'ike_version': 'v1', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'phase1_negotiation_mode': 'main', - 'lifetime': {'units': 'seconds', - 'value': 28800}} - os_ikepolicy = neutron.create_ikepolicy( - {'ikepolicy': os_ikepolicy})['ikepolicy'] - cleaner.addCleanup(neutron.delete_ikepolicy, os_ikepolicy['id']) - - os_ipsecpolicy = {'transform_protocol': 'esp', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'encapsulation_mode': 'tunnel', - 'lifetime': {'units': 'seconds', - 'value': 3600}} - os_ipsecpolicy = neutron.create_ipsecpolicy( - {'ipsecpolicy': os_ipsecpolicy})['ipsecpolicy'] - cleaner.addCleanup(neutron.delete_ipsecpolicy, os_ipsecpolicy['id']) - - psk = ''.join(random.choice(SHARED_KEY_CHARS) for _x in range(32)) - vpn_connection = db_api.add_item( - context, 'vpn', - {'customer_gateway_id': customer_gateway['id'], - 'vpn_gateway_id': vpn_gateway['id'], - 'pre_shared_key': psk, - 'os_ikepolicy_id': os_ikepolicy['id'], - 'os_ipsecpolicy_id': os_ipsecpolicy['id'], - 'cidrs': [], - 'os_ipsec_site_connections': {}}) - cleaner.addCleanup(db_api.delete_item, context, vpn_connection['id']) - - neutron.update_ikepolicy( - os_ikepolicy['id'], {'ikepolicy': {'name': vpn_connection['id']}}) - neutron.update_ipsecpolicy( - os_ipsecpolicy['id'], - {'ipsecpolicy': {'name': vpn_connection['id']}}) - - _reset_vpn_connections(context, neutron, cleaner, - vpn_gateway, vpn_connections=[vpn_connection]) - - ec2_vpn_connections = describe_vpn_connections( - context, vpn_connection_id=[vpn_connection['id']]) - return { - 'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]} - - -def create_vpn_connection_route(context, vpn_connection_id, - destination_cidr_block): - vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) - if destination_cidr_block in vpn_connection['cidrs']: - return True - neutron = clients.neutron(context) - vpn_gateway = db_api.get_item_by_id(context, - vpn_connection['vpn_gateway_id']) - with common.OnCrashCleaner() as cleaner: - _add_cidr_to_vpn_connection_item(context, vpn_connection, - destination_cidr_block) - cleaner.addCleanup(_remove_cidr_from_vpn_connection_item, - context, vpn_connection, destination_cidr_block) - - _reset_vpn_connections(context, neutron, cleaner, - vpn_gateway, vpn_connections=[vpn_connection]) - - return True - - -def delete_vpn_connection_route(context, vpn_connection_id, - destination_cidr_block): - vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) - if destination_cidr_block not in vpn_connection['cidrs']: - raise exception.InvalidRouteNotFound( - _('The specified route %(destination_cidr_block)s does not exist') - % {'destination_cidr_block': destination_cidr_block}) - neutron = clients.neutron(context) - vpn_gateway = db_api.get_item_by_id(context, - vpn_connection['vpn_gateway_id']) - with common.OnCrashCleaner() as cleaner: - _remove_cidr_from_vpn_connection_item(context, vpn_connection, - destination_cidr_block) - cleaner.addCleanup(_add_cidr_to_vpn_connection_item, - context, vpn_connection, destination_cidr_block) - - _reset_vpn_connections(context, neutron, cleaner, - vpn_gateway, vpn_connections=[vpn_connection]) - - return True - - -def delete_vpn_connection(context, vpn_connection_id): - vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) - with common.OnCrashCleaner() as cleaner: - db_api.delete_item(context, vpn_connection['id']) - cleaner.addCleanup(db_api.restore_item, context, 'vpn', vpn_connection) - neutron = clients.neutron(context) - _stop_vpn_connection(neutron, vpn_connection) - try: - neutron.delete_ipsecpolicy(vpn_connection['os_ipsecpolicy_id']) - except neutron_exception.Conflict as ex: - LOG.warning('Failed to delete ipsecoplicy %(os_id)s during ' - 'deleting VPN connection %(id)s. Reason: %(reason)s', - {'id': vpn_connection['id'], - 'os_id': vpn_connection['os_ipsecpolicy_id'], - 'reason': ex.message}) - except neutron_exception.NotFound: - pass - try: - neutron.delete_ikepolicy(vpn_connection['os_ikepolicy_id']) - except neutron_exception.Conflict as ex: - LOG.warning( - 'Failed to delete ikepolicy %(os_id)s during deleting ' - 'VPN connection %(id)s. Reason: %(reason)s', - {'id': vpn_connection['id'], - 'os_id': vpn_connection['os_ikepolicy_id'], - 'reason': ex.message}) - except neutron_exception.NotFound: - pass - return True - - -def describe_vpn_connections(context, vpn_connection_id=None, filter=None): - formatted_vpn_connections = VpnConnectionDescriber().describe( - context, ids=vpn_connection_id, filter=filter) - return {'vpnConnectionSet': formatted_vpn_connections} - - -class VpnConnectionDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'vpn' - FILTER_MAP = {'customer-gateway-configuration': ( - 'customerGatewayConfiguration'), - 'customer-gateway-id': 'customerGatewayId', - 'state': 'state', - 'option.static-routes-only': ('options', 'staticRoutesOnly'), - 'route.destination-cidr-block': ['routes', - 'destinationCidrBlock'], - 'type': 'type', - 'vpn-connection-id': 'vpnConnectionId', - 'vpn-gateway-id': 'vpnGatewayId'} - - def get_db_items(self): - self.customer_gateways = { - cgw['id']: cgw - for cgw in db_api.get_items(self.context, 'cgw')} - neutron = clients.neutron(self.context) - self.os_ikepolicies = { - ike['id']: ike - for ike in neutron.list_ikepolicies( - tenant_id=self.context.project_id)['ikepolicies']} - self.os_ipsecpolicies = { - ipsec['id']: ipsec - for ipsec in neutron.list_ipsecpolicies( - tenant_id=self.context.project_id)['ipsecpolicies']} - self.os_ipsec_site_connections = { - conn['id']: conn - for conn in neutron.list_ipsec_site_connections( - tenant_id=self.context.project_id)['ipsec_site_connections']} - self.external_ips = _get_vpn_gateways_external_ips( - self.context, neutron) - return super(VpnConnectionDescriber, self).get_db_items() - - def format(self, vpn_connection): - return _format_vpn_connection( - vpn_connection, self.customer_gateways, self.os_ikepolicies, - self.os_ipsecpolicies, self.os_ipsec_site_connections, - self.external_ips) - - -def _format_vpn_connection(vpn_connection, customer_gateways, os_ikepolicies, - os_ipsecpolicies, os_ipsec_site_connections, - external_ips): - config_dict = _format_customer_config( - vpn_connection, customer_gateways, os_ikepolicies, os_ipsecpolicies, - os_ipsec_site_connections, external_ips) - config = ec2utils.dict_to_xml(config_dict, 'vpn_connection') - config.attrib['id'] = vpn_connection['id'] - config_str = etree.tostring(config, xml_declaration=True, encoding='UTF-8', - pretty_print=True) - return {'vpnConnectionId': vpn_connection['id'], - 'vpnGatewayId': vpn_connection['vpn_gateway_id'], - 'customerGatewayId': vpn_connection['customer_gateway_id'], - 'state': 'available', - 'type': 'ipsec.1', - 'routes': [{'destinationCidrBlock': cidr, - 'state': 'available'} - for cidr in vpn_connection['cidrs']], - 'vgwTelemetry': [], - 'options': {'staticRoutesOnly': True}, - 'customerGatewayConfiguration': config_str} - - -def _format_customer_config(vpn_connection, customer_gateways, os_ikepolicies, - os_ipsecpolicies, os_ipsec_site_connections, - external_ips): - customer_gateway = customer_gateways[vpn_connection['customer_gateway_id']] - os_connections_ids = vpn_connection['os_ipsec_site_connections'].values() - if os_connections_ids: - os_ipsec_site_connection = next( - (os_ipsec_site_connections[conn_id] - for conn_id in os_connections_ids - if os_ipsec_site_connections.get(conn_id)), - None) - else: - os_ipsec_site_connection = None - - # TODO(ft): figure out and add to the output tunnel internal addresses - config_dict = { - 'customer_gateway_id': vpn_connection['customer_gateway_id'], - 'vpn_gateway_id': vpn_connection['vpn_gateway_id'], - 'vpn_connection_type': 'ipsec.1', - 'vpn_connection_attributes': 'NoBGPVPNConnection', - 'ipsec_tunnel': { - 'customer_gateway': { - 'tunnel_outside_address': { - 'ip_address': ( - os_ipsec_site_connection['peer_address'] - if os_ipsec_site_connection else - customer_gateway['ip_address'])}}, - 'vpn_gateway': { - 'tunnel_outside_address': { - 'ip_address': external_ips.get( - vpn_connection['vpn_gateway_id'])}}}, - } - os_ikepolicy = os_ikepolicies.get(vpn_connection['os_ikepolicy_id']) - if os_ikepolicy: - config_dict['ipsec_tunnel']['ike'] = { - 'authentication_protocol': os_ikepolicy['auth_algorithm'], - 'encryption_protocol': os_ikepolicy['encryption_algorithm'], - 'lifetime': os_ikepolicy['lifetime']['value'], - 'perfect_forward_secrecy': os_ikepolicy['pfs'], - 'mode': os_ikepolicy['phase1_negotiation_mode'], - 'pre_shared_key': ( - os_ipsec_site_connection['psk'] - if os_ipsec_site_connection else - vpn_connection['pre_shared_key']), - } - os_ipsecpolicy = os_ipsecpolicies.get(vpn_connection['os_ipsecpolicy_id']) - if os_ipsecpolicy: - config_dict['ipsec_tunnel']['ipsec'] = { - 'protocol': os_ipsecpolicy['transform_protocol'], - 'authentication_protocol': os_ipsecpolicy['auth_algorithm'], - 'encryption_protocol': os_ipsecpolicy['encryption_algorithm'], - 'lifetime': os_ipsecpolicy['lifetime']['value'], - 'perfect_forward_secrecy': os_ipsecpolicy['pfs'], - 'mode': os_ipsecpolicy['encapsulation_mode'], - 'tcp_mss_adjustment': ( - os_ipsec_site_connection['mtu'] - MTU_MSS_DELTA - if os_ipsec_site_connection else - AWS_MSS), - } - return config_dict - - -def _stop_vpn_connection(neutron, vpn_connection): - connection_ids = vpn_connection['os_ipsec_site_connections'] - for os_connection_id in connection_ids.values(): - try: - neutron.delete_ipsec_site_connection(os_connection_id) - except neutron_exception.NotFound: - pass - - -def _stop_gateway_vpn_connections(context, neutron, cleaner, vpn_gateway): - def undo_vpn_connection(context, vpn_connection, connections_ids): - vpn_connection['os_ipsec_site_connections'] = connections_ids - db_api.update_item(context, vpn_connection) - - for vpn_connection in db_api.get_items(context, 'vpn'): - if vpn_connection['vpn_gateway_id'] == vpn_gateway['id']: - _stop_vpn_connection(neutron, vpn_connection) - - connection_ids = vpn_connection['os_ipsec_site_connections'] - vpn_connection['os_ipsec_site_connections'] = {} - db_api.update_item(context, vpn_connection) - cleaner.addCleanup(undo_vpn_connection, context, vpn_connection, - connection_ids) - - -def _update_vpn_routes(context, neutron, cleaner, route_table, subnets): - vpn_gateway = ec2utils.get_attached_gateway( - context, route_table['vpc_id'], 'vgw') - if not vpn_gateway: - return - _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, - route_tables=[route_table], subnets=subnets) - - -def _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, - subnets=None, route_tables=None, - vpn_connections=None): - if not vpn_gateway['vpc_id']: - return - # TODO(ft): implement search filters in DB api - vpn_connections = (vpn_connections or - [vpn for vpn in db_api.get_items(context, 'vpn') - if vpn['vpn_gateway_id'] == vpn_gateway['id']]) - if not vpn_connections: - return - subnets = (subnets or - [subnet for subnet in db_api.get_items(context, 'subnet') - if subnet['vpc_id'] == vpn_gateway['vpc_id']]) - if not subnets: - return - vpc = db_api.get_item_by_id(context, vpn_gateway['vpc_id']) - customer_gateways = {cgw['id']: cgw - for cgw in db_api.get_items(context, 'cgw')} - route_tables = route_tables or db_api.get_items(context, 'rtb') - route_tables = {rtb['id']: rtb - for rtb in route_tables - if rtb['vpc_id'] == vpc['id']} - route_tables_cidrs = {} - for subnet in subnets: - route_table_id = subnet.get('route_table_id', vpc['route_table_id']) - if route_table_id not in route_tables_cidrs: - route_tables_cidrs[route_table_id] = ( - _get_route_table_vpn_cidrs(route_tables[route_table_id], - vpn_gateway, vpn_connections)) - cidrs = route_tables_cidrs[route_table_id] - for vpn_conn in vpn_connections: - if vpn_conn['id'] in cidrs: - _set_subnet_vpn( - context, neutron, cleaner, subnet, vpn_conn, - customer_gateways[vpn_conn['customer_gateway_id']], - cidrs[vpn_conn['id']]) - else: - _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_conn) - - -def _set_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection, - customer_gateway, cidrs): - subnets_connections = vpn_connection['os_ipsec_site_connections'] - os_connection_id = subnets_connections.get(subnet['id']) - if os_connection_id: - # TODO(ft): restore original peer_cidrs on crash - neutron.update_ipsec_site_connection( - os_connection_id, - {'ipsec_site_connection': {'peer_cidrs': cidrs}}) - else: - os_connection = { - 'vpnservice_id': subnet['os_vpnservice_id'], - 'ikepolicy_id': vpn_connection['os_ikepolicy_id'], - 'ipsecpolicy_id': vpn_connection['os_ipsecpolicy_id'], - 'peer_address': customer_gateway['ip_address'], - 'peer_cidrs': cidrs, - 'psk': vpn_connection['pre_shared_key'], - 'name': '%s/%s' % (vpn_connection['id'], subnet['id']), - 'peer_id': customer_gateway['ip_address'], - 'mtu': AWS_MSS + MTU_MSS_DELTA, - 'initiator': 'response-only', - } - os_connection = (neutron.create_ipsec_site_connection( - {'ipsec_site_connection': os_connection}) - ['ipsec_site_connection']) - cleaner.addCleanup(neutron.delete_ipsec_site_connection, - os_connection['id']) - - _add_subnet_connection_to_vpn_connection_item( - context, vpn_connection, subnet['id'], os_connection['id']) - cleaner.addCleanup(_remove_subnet_connection_from_vpn_connection_item, - context, vpn_connection, subnet['id']) - - -def _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection): - subnets_connections = vpn_connection['os_ipsec_site_connections'] - os_connection_id = subnets_connections.get(subnet['id']) - if not os_connection_id: - return - - _remove_subnet_connection_from_vpn_connection_item( - context, vpn_connection, subnet['id']) - cleaner.addCleanup(_add_subnet_connection_to_vpn_connection_item, - context, vpn_connection, subnet['id'], os_connection_id) - try: - neutron.delete_ipsec_site_connection(os_connection_id) - except neutron_exception.NotFound: - pass - - -def _get_route_table_vpn_cidrs(route_table, vpn_gateway, vpn_connections): - static_cidrs = [route['destination_cidr_block'] - for route in route_table['routes'] - if route.get('gateway_id') == vpn_gateway['id']] - is_propagation_enabled = ( - vpn_gateway['id'] in route_table.get('propagating_gateways', [])) - vpn_cidrs = {} - for vpn in vpn_connections: - if is_propagation_enabled: - cidrs = list(set(static_cidrs + vpn['cidrs'])) - else: - cidrs = static_cidrs - if cidrs: - vpn_cidrs[vpn['id']] = cidrs - return vpn_cidrs - - -def _get_vpn_gateways_external_ips(context, neutron): - vpcs = {vpc['id']: vpc - for vpc in db_api.get_items(context, 'vpc')} - external_ips = {} - routers = neutron.list_routers( - tenant_id=context.project_id)['routers'] - for router in routers: - info = router['external_gateway_info'] - if info: - for ip in info['external_fixed_ips']: - if netaddr.valid_ipv4(ip['ip_address']): - external_ips[router['id']] = ip['ip_address'] - return {vgw['id']: external_ips.get(vpcs[vgw['vpc_id']]['os_id']) - for vgw in db_api.get_items(context, 'vgw') - if vgw['vpc_id']} - - -def _add_cidr_to_vpn_connection_item(context, vpn_connection, cidr): - vpn_connection['cidrs'].append(cidr) - db_api.update_item(context, vpn_connection) - - -def _remove_cidr_from_vpn_connection_item(context, vpn_connection, cidr): - vpn_connection['cidrs'].remove(cidr) - db_api.update_item(context, vpn_connection) - - -def _add_subnet_connection_to_vpn_connection_item(context, vpn_connection, - subnet_id, os_connection_id): - vpn_connection['os_ipsec_site_connections'][subnet_id] = os_connection_id - db_api.update_item(context, vpn_connection) - - -def _remove_subnet_connection_from_vpn_connection_item(context, vpn_connection, - subnet_id): - del vpn_connection['os_ipsec_site_connections'][subnet_id] - db_api.update_item(context, vpn_connection) diff --git a/ec2api/api/vpn_gateway.py b/ec2api/api/vpn_gateway.py deleted file mode 100644 index c77fce06..00000000 --- a/ec2api/api/vpn_gateway.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutronclient.common import exceptions as neutron_exception -from oslo_log import log as logging - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import vpn_connection as vpn_connection_api -from ec2api import clients -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.i18n import _ - - -LOG = logging.getLogger(__name__) - - -"""VPN gateways related API implementation -""" - - -Validator = common.Validator - - -def create_vpn_gateway(context, type, availability_zone=None): - vpn_gateway = db_api.add_item(context, 'vgw', {}) - return {'vpnGateway': _format_vpn_gateway(vpn_gateway)} - - -def attach_vpn_gateway(context, vpc_id, vpn_gateway_id): - vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) - vpc = ec2utils.get_db_item(context, vpc_id) - if vpn_gateway['vpc_id'] and vpn_gateway['vpc_id'] != vpc['id']: - raise exception.VpnGatewayAttachmentLimitExceeded() - attached_vgw = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw') - if attached_vgw and attached_vgw['id'] != vpn_gateway['id']: - raise exception.InvalidVpcState(vpc_id=vpc['id'], - vgw_id=attached_vgw['id']) - - subnets = [subnet for subnet in db_api.get_items(context, 'subnet') - if subnet['vpc_id'] == vpc['id']] - if not vpn_gateway['vpc_id']: - external_network_id = None - if not ec2utils.get_attached_gateway(context, vpc['id'], 'igw'): - external_network_id = ec2utils.get_os_public_network(context)['id'] - neutron = clients.neutron(context) - - with common.OnCrashCleaner() as cleaner: - _attach_vpn_gateway_item(context, vpn_gateway, vpc['id']) - cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway) - - if external_network_id: - neutron.add_gateway_router(vpc['os_id'], - {'network_id': external_network_id}) - cleaner.addCleanup(neutron.remove_gateway_router, vpc['os_id']) - - for subnet in subnets: - _create_subnet_vpnservice(context, neutron, cleaner, - subnet, vpc) - vpn_connection_api._reset_vpn_connections( - context, neutron, cleaner, vpn_gateway, subnets=subnets) - - return {'attachment': _format_attachment(vpn_gateway)} - - -def detach_vpn_gateway(context, vpc_id, vpn_gateway_id): - vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) - if vpn_gateway['vpc_id'] != vpc_id: - raise exception.InvalidVpnGatewayAttachmentNotFound( - vgw_id=vpn_gateway_id, vpc_id=vpc_id) - - vpc = db_api.get_item_by_id(context, vpc_id) - neutron = clients.neutron(context) - remove_os_gateway_router = ( - ec2utils.get_attached_gateway(context, vpc_id, 'igw') is None) - subnets = [subnet for subnet in db_api.get_items(context, 'subnet') - if subnet['vpc_id'] == vpc['id']] - with common.OnCrashCleaner() as cleaner: - _detach_vpn_gateway_item(context, vpn_gateway) - cleaner.addCleanup(_attach_vpn_gateway_item, context, vpn_gateway, - vpc_id) - vpn_connection_api._stop_gateway_vpn_connections( - context, neutron, cleaner, vpn_gateway) - for subnet in subnets: - _delete_subnet_vpnservice(context, neutron, cleaner, subnet) - - if remove_os_gateway_router: - try: - neutron.remove_gateway_router(vpc['os_id']) - except neutron_exception.NotFound: - pass - - return True - - -def delete_vpn_gateway(context, vpn_gateway_id): - vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) - vpn_connections = db_api.get_items(context, 'vpn') - if vpn_gateway['vpc_id'] or any(vpn['vpn_gateway_id'] == vpn_gateway['id'] - for vpn in vpn_connections): - raise exception.IncorrectState(reason=_('The VPN gateway is in use.')) - db_api.delete_item(context, vpn_gateway['id']) - return True - - -def describe_vpn_gateways(context, vpn_gateway_id=None, filter=None): - formatted_vgws = VpnGatewayDescriber().describe( - context, ids=vpn_gateway_id, filter=filter) - return {'vpnGatewaySet': formatted_vgws} - - -class VpnGatewayDescriber(common.TaggableItemsDescriber, - common.NonOpenstackItemsDescriber): - - KIND = 'vgw' - FILTER_MAP = {'attachment.state': ['attachments', 'state'], - 'attachment.vpc-id': ['attachments', 'vpcId'], - 'state': 'state', - 'type': 'type', - 'vpn-gateway-id': 'vpnGatewayId'} - - def format(self, vpn_gateway): - return _format_vpn_gateway(vpn_gateway) - - -def _format_vpn_gateway(vpn_gateway): - ec2_vgw = {'vpnGatewayId': vpn_gateway['id'], - 'state': 'available', - 'type': 'ipsec.1', - 'attachments': []} - if vpn_gateway['vpc_id']: - ec2_vgw['attachments'].append(_format_attachment(vpn_gateway)) - return ec2_vgw - - -def _format_attachment(vpn_gateway): - return {'state': 'attached', - 'vpcId': vpn_gateway['vpc_id']} - - -def _start_vpn_in_subnet(context, neutron, cleaner, subnet, vpc, route_table): - vpn_gateway = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw') - if not vpn_gateway: - return - _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc) - vpn_connection_api._reset_vpn_connections(context, neutron, cleaner, - vpn_gateway, subnets=[subnet], - route_tables=[route_table]) - - -def _stop_vpn_in_subnet(context, neutron, cleaner, subnet): - os_vpnservice_id = subnet.get('os_vpnservice_id') - if not os_vpnservice_id: - return - for vpn in db_api.get_items(context, 'vpn'): - vpn_connection_api._delete_subnet_vpn(context, neutron, cleaner, - subnet, vpn) - _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet['id']) - - -def _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc): - os_vpnservice = {'subnet_id': subnet['os_id'], - 'router_id': vpc['os_id'], - 'name': subnet['id']} - os_vpnservice = neutron.create_vpnservice( - {'vpnservice': os_vpnservice})['vpnservice'] - cleaner.addCleanup(neutron.delete_vpnservice, os_vpnservice['id']) - - _set_vpnservice_in_subnet_item(context, subnet, os_vpnservice['id']) - cleaner.addCleanup(_clear_vpnservice_in_subnet_item, - context, subnet) - - -def _delete_subnet_vpnservice(context, neutron, cleaner, subnet): - os_vpnservice_id = subnet['os_vpnservice_id'] - _clear_vpnservice_in_subnet_item(context, subnet) - cleaner.addCleanup(_set_vpnservice_in_subnet_item, - context, subnet, os_vpnservice_id) - _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet['id']) - - -def _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet_id): - try: - neutron.delete_vpnservice(os_vpnservice_id) - except neutron_exception.NotFound: - pass - except neutron_exception.Conflict as ex: - LOG.warning( - 'Failed to delete vpnservice %(os_id)s for subnet %(id)s. ' - 'Reason: %(reason)s', - {'id': subnet_id, - 'os_id': os_vpnservice_id, - 'reason': ex.message}) - - -def _attach_vpn_gateway_item(context, vpn_gateway, vpc_id): - vpn_gateway['vpc_id'] = vpc_id - db_api.update_item(context, vpn_gateway) - - -def _detach_vpn_gateway_item(context, vpn_gateway): - vpn_gateway['vpc_id'] = None - db_api.update_item(context, vpn_gateway) - - -def _set_vpnservice_in_subnet_item(context, subnet, os_vpnservice_id): - subnet['os_vpnservice_id'] = os_vpnservice_id - db_api.update_item(context, subnet) - - -def _clear_vpnservice_in_subnet_item(context, subnet): - del subnet['os_vpnservice_id'] - db_api.update_item(context, subnet) diff --git a/ec2api/clients.py b/ec2api/clients.py deleted file mode 100644 index 2ca188d4..00000000 --- a/ec2api/clients.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cinderclient import client as cinderclient -from glanceclient import client as glanceclient -from keystoneauth1 import loading as ks_loading -from keystoneclient import client as keystoneclient -from neutronclient.v2_0 import client as neutronclient -from novaclient import api_versions as nova_api_versions -from novaclient import client as novaclient -from oslo_config import cfg -from oslo_log import log as logging - -logger = logging.getLogger(__name__) - -ec2_opts = [ - cfg.StrOpt('nova_service_type', - default='compute', - help='Service type of Compute API, registered in Keystone ' - 'catalog. Should be v2.1 with microversion support. ' - 'If it is obsolete v2, a lot of useful EC2 compliant ' - 'instance properties will be unavailable.'), - cfg.StrOpt('cinder_service_type', - default='volumev3', - help='Service type of Volume API, registered in Keystone ' - 'catalog.'), -] - -CONF = cfg.CONF -CONF.register_opts(ec2_opts) - -GROUP_AUTHTOKEN = 'keystone_authtoken' -ks_loading.register_session_conf_options(CONF, GROUP_AUTHTOKEN) -ks_loading.register_auth_conf_options(CONF, GROUP_AUTHTOKEN) - - -# Nova API version with microversions support -REQUIRED_NOVA_API_VERSION = '2.1' -REQUIRED_NOVA_API_VERSION_ID = 'v%s' % REQUIRED_NOVA_API_VERSION -LEGACY_NOVA_API_VERSION = '2' -# Nova API's 2.3 microversion provides additional EC2 compliant instance -# properties -# Nova API's 2.10 microversion provides admin access to users keypairs, -# which allows metadata service to expose openssh part of an instance key -# Nova API's 2.32 microversion allows 'tag' field of bdm v2, which may be -# contained in image bdms, defined by users or autocreated with instance -# snapshot -REQUIRED_NOVA_API_MICROVERSION = '2.32' -_nova_api_version = None - - -def nova(context): - global _nova_api_version - if not _nova_api_version: - _nova_api_version = _get_nova_api_version(context) - clnt = novaclient.Client(_nova_api_version, - session=context.session, - service_type=CONF.nova_service_type) - # NOTE(ft): workaround for LP #1494116 bug - if not hasattr(clnt.client, 'last_request_id'): - setattr(clnt.client, 'last_request_id', None) - return clnt - - -def neutron(context): - return neutronclient.Client(session=context.session, - service_type='network') - - -def glance(context): - return glanceclient.Client(version='2', service_type='image', - session=context.session) - - -def cinder(context): - url = context.session.get_endpoint(service_type=CONF.cinder_service_type) - # TODO(jamielennox): This should be using proper version discovery from - # the cinder service rather than just inspecting the URL for certain string - # values. - version = cinderclient.get_volume_api_from_url(url) - return cinderclient.Client(version, session=context.session, - service_type=CONF.cinder_service_type) - - -def keystone(context): - url = context.session.get_endpoint(service_type='identity') - return keystoneclient.Client(auth_url=url, - session=context.session) - - -def _get_nova_api_version(context): - client = novaclient.Client(REQUIRED_NOVA_API_VERSION, - session=context.session, - service_type=CONF.nova_service_type) - - required = nova_api_versions.APIVersion(REQUIRED_NOVA_API_MICROVERSION) - current = client.versions.get_current() - if not current: - logger.warning( - 'Could not check Nova API version because no version ' - 'was found in Nova version list for url %(url)s of service ' - 'type "%(service_type)s". ' - 'Use v%(required_api_version)s Nova API.', - {'url': client.client.get_endpoint(), - 'service_type': CONF.nova_service_type, - 'required_api_version': REQUIRED_NOVA_API_MICROVERSION}) - return REQUIRED_NOVA_API_MICROVERSION - if current.id != REQUIRED_NOVA_API_VERSION_ID: - logger.warning( - 'Specified "%s" Nova service type does not support v2.1 API. ' - 'A lot of useful EC2 compliant instance properties ' - 'will be unavailable.', CONF.nova_service_type) - return LEGACY_NOVA_API_VERSION - if (nova_api_versions.APIVersion(current.version) < required): - logger.warning( - 'Nova support v%(nova_api_version)s, ' - 'but v%(required_api_version)s is required. ' - 'A lot of useful EC2 compliant instance properties ' - 'will be unavailable.', - {'nova_api_version': current.version, - 'required_api_version': REQUIRED_NOVA_API_MICROVERSION}) - return current.version - logger.info('Provided Nova API version is v%(nova_api_version)s, ' - 'used one is v%(required_api_version)s', - {'nova_api_version': current.version, - 'required_api_version': ( - REQUIRED_NOVA_API_MICROVERSION)}) - return REQUIRED_NOVA_API_MICROVERSION - - -_admin_session = None - - -def get_os_admin_session(): - """Create a context to interact with OpenStack as an administrator.""" - # NOTE(ft): this is a singletone because keystone's session looks thread - # safe for both regular and token renewal requests - global _admin_session - if not _admin_session: - auth_plugin = ks_loading.load_auth_from_conf_options( - CONF, GROUP_AUTHTOKEN) - _admin_session = ks_loading.load_session_from_conf_options( - CONF, GROUP_AUTHTOKEN, auth=auth_plugin) - - return _admin_session - - -def update_request_params_with_ssl(params): - verify = (CONF[GROUP_AUTHTOKEN].cafile or - not CONF[GROUP_AUTHTOKEN].insecure) - if verify is not True: - params['verify'] = verify diff --git a/ec2api/cmd/__init__.py b/ec2api/cmd/__init__.py deleted file mode 100644 index 87fb851d..00000000 --- a/ec2api/cmd/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import eventlet - -eventlet.monkey_patch(os=False) diff --git a/ec2api/cmd/api.py b/ec2api/cmd/api.py deleted file mode 100644 index 4ad067fb..00000000 --- a/ec2api/cmd/api.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -EC2api API Server -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api import config -from ec2api import service - -CONF = cfg.CONF - - -def main(): - config.parse_args(sys.argv) - logging.setup(CONF, 'ec2api') - - server = service.WSGIService('ec2api', max_url_len=16384) - service.serve(server, workers=server.workers) - service.wait() - - -if __name__ == '__main__': - main() diff --git a/ec2api/cmd/api_metadata.py b/ec2api/cmd/api_metadata.py deleted file mode 100644 index e373c923..00000000 --- a/ec2api/cmd/api_metadata.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -EC2api API Metadata Server -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api import config -from ec2api import service - -CONF = cfg.CONF - - -def main(): - config.parse_args(sys.argv) - logging.setup(CONF, "ec2api") - - server = service.WSGIService('metadata') - service.serve(server, workers=server.workers) - service.wait() - - -if __name__ == '__main__': - main() diff --git a/ec2api/cmd/api_s3.py b/ec2api/cmd/api_s3.py deleted file mode 100644 index aacd703a..00000000 --- a/ec2api/cmd/api_s3.py +++ /dev/null @@ -1,39 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Daemon for ec2api objectstore. Supports S3 API.""" - -import sys - -from oslo_log import log as logging - -from ec2api import config -from ec2api.s3 import s3server -from ec2api import service - - -def main(): - config.parse_args(sys.argv) - logging.setup(config.CONF, "ec2api") - - server = s3server.get_wsgi_server() - service.serve(server) - service.wait() - - -if __name__ == '__main__': - main() diff --git a/ec2api/cmd/manage.py b/ec2api/cmd/manage.py deleted file mode 100644 index e9d67dcd..00000000 --- a/ec2api/cmd/manage.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" - CLI interface for EC2 API management. -""" - -import sys - -from oslo_config import cfg -from oslo_log import log - -from ec2api import config -from ec2api.db import migration - - -CONF = cfg.CONF - - -def do_db_version(): - """Print database's current migration level.""" - print(migration.db_version()) - - -def do_db_sync(): - """Place a database under migration control and upgrade, - - creating if necessary. - """ - migration.db_sync(CONF.command.version) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser('db_version') - parser.set_defaults(func=do_db_version) - - parser = subparsers.add_parser('db_sync') - parser.set_defaults(func=do_db_sync) - parser.add_argument('version', nargs='?') - parser.add_argument('current_version', nargs='?') - - -command_opt = cfg.SubCommandOpt('command', - title='Commands', - help='Available commands', - handler=add_command_parsers) - - -def main(): - CONF.register_cli_opt(command_opt) - config.parse_args(sys.argv) - log.setup(CONF, "ec2api") - - try: - CONF.command.func() - except Exception as e: - sys.exit("ERROR: %s" % e) diff --git a/ec2api/config.py b/ec2api/config.py deleted file mode 100644 index d9779083..00000000 --- a/ec2api/config.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_db import options -from oslo_log import log - -from ec2api import paths -from ec2api import version - - -CONF = cfg.CONF - -_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite') - -_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', - 'sqlalchemy=WARN', 'suds=INFO', - 'iso8601=WARN', - 'requests.packages.urllib3.connectionpool=WARN', - 'urllib3.connectionpool=WARN', 'websocket=WARN', - 'keystonemiddleware=WARN', 'routes.middleware=WARN', - 'stevedore=WARN', 'keystoneclient.auth=WARN'] - - -def parse_args(argv, default_config_files=None): - log.set_defaults(default_log_levels=_DEFAULT_LOG_LEVELS) - log.register_options(CONF) - options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION) - - cfg.CONF(argv[1:], - project='ec2api', - version=version.version_info.version_string(), - default_config_files=default_config_files) diff --git a/ec2api/context.py b/ec2api/context.py deleted file mode 100644 index c91d1b6d..00000000 --- a/ec2api/context.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""RequestContext: context for requests that persist through all of ec2.""" - -from oslo_config import cfg -from oslo_context import context -from oslo_log import log as logging -from oslo_utils import timeutils - -from ec2api import clients -from ec2api import exception - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class RequestContext(context.RequestContext): - """Security context and request information. - - Represents the user taking a given action within the system. - - """ - - def __init__(self, user_id, project_id, request_id=None, - is_admin=None, remote_address=None, - auth_token=None, user_name=None, project_name=None, - overwrite=True, service_catalog=None, api_version=None, - is_os_admin=None, **kwargs): - """Parameters - - :param overwrite: Set to False to ensure that the greenthread local - copy of the index is not overwritten. - - - :param kwargs: Extra arguments that might be present, but we ignore - because they possibly came in from older rpc messages. - """ - user = kwargs.pop('user', None) - tenant = kwargs.pop('tenant', None) - super(RequestContext, self).__init__( - auth_token=auth_token, - user=user_id or user, - project_id=project_id or tenant, - is_admin=is_admin, - request_id=request_id, - resource_uuid=kwargs.pop('resource_uuid', None), - overwrite=overwrite) - # oslo_context's RequestContext.to_dict() generates this field, we can - # safely ignore this as we don't use it. - kwargs.pop('user_identity', None) - self.session = kwargs.pop('session', None) - if kwargs: - LOG.warning('Arguments dropped when creating context: %s', - str(kwargs)) - - self.user_id = user_id - self.project_id = project_id - self.remote_address = remote_address - timestamp = timeutils.utcnow() - if isinstance(timestamp, str): - timestamp = timeutils.parse_strtime(timestamp) - self.timestamp = timestamp - - self.service_catalog = service_catalog - if self.service_catalog is None: - # if list is empty or none - self.service_catalog = [] - - self.user_name = user_name - self.project_name = project_name - self.is_admin = is_admin - # TODO(ft): call policy.check_is_admin if is_admin is None - self.is_os_admin = is_os_admin - self.api_version = api_version - - def to_dict(self): - values = super(RequestContext, self).to_dict() - # FIXME(dims): defensive hasattr() checks need to be - # removed once we figure out why we are seeing stack - # traces - values.update({ - 'user_id': getattr(self, 'user_id', None), - 'project_id': getattr(self, 'project_id', None), - 'is_admin': getattr(self, 'is_admin', None), - 'remote_address': getattr(self, 'remote_address', None), - 'timestamp': self.timestamp.strftime( - timeutils.PERFECT_TIME_FORMAT) if hasattr( - self, 'timestamp') else None, - 'request_id': getattr(self, 'request_id', None), - 'quota_class': getattr(self, 'quota_class', None), - 'user_name': getattr(self, 'user_name', None), - 'service_catalog': getattr(self, 'service_catalog', None), - 'project_name': getattr(self, 'project_name', None), - 'is_os_admin': getattr(self, 'is_os_admin', None), - 'api_version': getattr(self, 'api_version', None), - }) - return values - - @classmethod - def from_dict(cls, values): - return cls(**values) - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context: - return False - if context.is_os_admin: - return False - if not context.user_id or not context.project_id: - return False - return True - - -def require_context(ctxt): - """Raise exception.AuthFailure() - - if context is not a user or an admin context. - """ - if not ctxt.is_os_admin and not is_user_context(ctxt): - raise exception.AuthFailure() - - -def get_os_admin_context(): - """Create a context to interact with OpenStack as an administrator.""" - admin_session = clients.get_os_admin_session() - return RequestContext( - None, None, - session=admin_session, - is_os_admin=True, - overwrite=False) diff --git a/ec2api/db/__init__.py b/ec2api/db/__init__.py deleted file mode 100644 index ae15e1b6..00000000 --- a/ec2api/db/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -DB abstraction for EC2api -""" - -from ec2api.db.api import * # noqa: F401 diff --git a/ec2api/db/api.py b/ec2api/db/api.py deleted file mode 100644 index 3dd7f369..00000000 --- a/ec2api/db/api.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Defines interface for DB access. - -Functions in this module are imported into the ec2api.db namespace. Call these -functions from ec2api.db namespace, not the ec2api.db.api namespace. - -**Related Flags** - -:dbackend: string to lookup in the list of LazyPluggable backends. - `sqlalchemy` is the only supported backend right now. - -:connection: string specifying the sqlalchemy connection to use, like: - `sqlite:///var/lib/ec2api/ec2api.sqlite`. - -""" - -from eventlet import tpool -from oslo_config import cfg -from oslo_db import api as db_api -from oslo_log import log as logging - - -tpool_opts = [ - cfg.BoolOpt('use_tpool', - default=False, - deprecated_name='dbapi_use_tpool', - deprecated_group='DEFAULT', - help='Enable the experimental use of thread pooling for ' - 'all DB API calls'), -] - -CONF = cfg.CONF -CONF.register_opts(tpool_opts, 'database') - -_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.db.sqlalchemy.api'} - - -class EC2DBAPI(object): - """ec2's DB API wrapper class. - - This wraps the oslo DB API with an option to be able to use eventlet's - thread pooling. Since the CONF variable may not be loaded at the time - this class is instantiated, we must look at it on the first DB API call. - """ - - def __init__(self): - self.__db_api = None - - @property - def _db_api(self): - if not self.__db_api: - ec2_db_api = db_api.DBAPI(CONF.database.backend, - backend_mapping=_BACKEND_MAPPING) - if CONF.database.use_tpool: - self.__db_api = tpool.Proxy(ec2_db_api) - else: - self.__db_api = ec2_db_api - return self.__db_api - - def __getattr__(self, key): - return getattr(self._db_api, key) - - -IMPL = EC2DBAPI() - -LOG = logging.getLogger(__name__) - - -def add_item(context, kind, data): - return IMPL.add_item(context, kind, data) - - -def add_item_id(context, kind, os_id, project_id=None): - return IMPL.add_item_id(context, kind, os_id, project_id) - - -def update_item(context, item): - IMPL.update_item(context, item) - - -def delete_item(context, item_id): - IMPL.delete_item(context, item_id) - - -def restore_item(context, kind, data): - return IMPL.restore_item(context, kind, data) - - -def get_items(context, kind): - return IMPL.get_items(context, kind) - - -def get_item_by_id(context, item_id): - return IMPL.get_item_by_id(context, item_id) - - -def get_items_by_ids(context, item_ids): - return IMPL.get_items_by_ids(context, item_ids) - - -def get_public_items(context, kind, item_ids=None): - return IMPL.get_public_items(context, kind, item_ids) - - -def get_items_ids(context, kind, item_ids=None, item_os_ids=None): - return IMPL.get_items_ids(context, kind, item_ids=item_ids, - item_os_ids=item_os_ids) - - -def add_tags(context, tags): - return IMPL.add_tags(context, tags) - - -def delete_tags(context, item_ids, tag_pairs=None): - return IMPL.delete_tags(context, item_ids, tag_pairs) - - -def get_tags(context, kinds=None, item_ids=None): - return IMPL.get_tags(context, kinds, item_ids) diff --git a/ec2api/db/migration.py b/ec2api/db/migration.py deleted file mode 100644 index 8b6427a1..00000000 --- a/ec2api/db/migration.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -from oslo_config import cfg - -from ec2api import exception -from ec2api.i18n import _ - -CONF = cfg.CONF - - -class LazyPluggable(object): - """A pluggable backend loaded lazily based on some value.""" - - def __init__(self, pivot, config_group=None, **backends): - self.__backends = backends - self.__pivot = pivot - self.__backend = None - self.__config_group = config_group - - def __get_backend(self): - if not self.__backend: - if self.__config_group is None: - backend_name = CONF[self.__pivot] - else: - backend_name = CONF[self.__config_group][self.__pivot] - if backend_name not in self.__backends: - msg = _('Invalid backend: %s') % backend_name - raise exception.EC2Exception(msg) - - backend = self.__backends[backend_name] - if isinstance(backend, tuple): - name = backend[0] - fromlist = backend[1] - else: - name = backend - fromlist = backend - - self.__backend = __import__(name, None, None, fromlist) - return self.__backend - - def __getattr__(self, key): - backend = self.__get_backend() - return getattr(backend, key) - - -IMPL = LazyPluggable('backend', - config_group='database', - sqlalchemy='ec2api.db.sqlalchemy.migration') - - -def db_sync(version=None): - """Migrate the database to `version` or the most recent version.""" - return IMPL.db_sync(version=version) - - -def db_version(): - """Display the current database version.""" - return IMPL.db_version() - - -def db_initial_version(): - """The starting version for the database.""" - return IMPL.db_initial_version() diff --git a/ec2api/db/sqlalchemy/__init__.py b/ec2api/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ec2api/db/sqlalchemy/api.py b/ec2api/db/sqlalchemy/api.py deleted file mode 100644 index dbdf8ba5..00000000 --- a/ec2api/db/sqlalchemy/api.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of SQLAlchemy backend.""" - -import copy -import functools -import random -import sys - -from oslo_config import cfg -from oslo_db import exception as db_exception -from oslo_db.sqlalchemy import session as db_session -from oslo_serialization import jsonutils -from sqlalchemy import and_ -from sqlalchemy import or_ -from sqlalchemy.sql import bindparam - -import ec2api.context -from ec2api.db.sqlalchemy import models -from ec2api import exception - -CONF = cfg.CONF - - -_MASTER_FACADE = None - - -def _create_facade_lazily(): - global _MASTER_FACADE - - if _MASTER_FACADE is None: - # FIXME(priteau): Remove autocommit=True (and ideally use of - # LegacyEngineFacade) asap since it's not compatible with SQLAlchemy - # 2.0. - _MASTER_FACADE = db_session.EngineFacade.from_config(CONF, - autocommit=True) - return _MASTER_FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return sys.modules[__name__] - - -def require_context(f): - """Decorator to require *any* user or admin context. - - The first argument to the wrapped function must be the context. - """ - - @functools.wraps(f) - def wrapper(*args, **kwargs): - ec2api.context.require_context(args[0]) - return f(*args, **kwargs) - return wrapper - - -def model_query(context, model, *args, **kwargs): - """Query helper that accounts for context's `read_deleted` field. - - :param context: context to query under - :param session: if present, the session to use - """ - session = kwargs.get('session') or get_session() - - return session.query(model, *args) - - -def _new_id(kind): - obj_id = "%(kind)s-%(id)08x" % {"kind": kind, - "id": random.randint(1, 0xffffffff)} - return obj_id - - -@require_context -def add_item(context, kind, data): - item_ref = models.Item() - item_ref.update({ - "project_id": context.project_id, - "id": _new_id(kind), - }) - item_ref.update(_pack_item_data(data)) - try: - item_ref.save() - except db_exception.DBDuplicateEntry as ex: - if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and - 'os_id' not in ex.columns): - raise - item_ref = (model_query(context, models.Item). - filter_by(os_id=data["os_id"]). - filter(or_(models.Item.project_id == context.project_id, - models.Item.project_id.is_(None))). - filter(models.Item.id.like('%s-%%' % kind)). - one()) - item_data = _unpack_item_data(item_ref) - item_data.update(data) - item_ref.update(_pack_item_data(item_data)) - item_ref.project_id = context.project_id - item_ref.save() - return _unpack_item_data(item_ref) - - -@require_context -def add_item_id(context, kind, os_id, project_id=None): - item_ref = models.Item() - item_ref.update({ - "id": _new_id(kind), - "os_id": os_id, - }) - if project_id: - item_ref.project_id = project_id - try: - item_ref.save() - except db_exception.DBDuplicateEntry as ex: - if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and - ex.columns != ['os_id']): - raise - item_ref = (model_query(context, models.Item). - filter_by(os_id=os_id). - one()) - return item_ref.id - - -@require_context -def update_item(context, item): - item_ref = (model_query(context, models.Item). - filter_by(project_id=context.project_id, - id=item['id']). - one()) - if item_ref.os_id and item_ref.os_id != item['os_id']: - raise exception.EC2DBInvalidOsIdUpdate(item_id=item['id'], - old_os_id=item_ref.os_id, - new_os_id=item['os_id']) - item_ref.update(_pack_item_data(item)) - item_ref.save() - return _unpack_item_data(item_ref) - - -@require_context -def delete_item(context, item_id): - session = get_session() - deleted_count = (model_query(context, models.Item, session=session). - filter_by(project_id=context.project_id, - id=item_id). - delete(synchronize_session=False)) - if not deleted_count: - return - try: - (model_query(context, models.Tag, session=session). - filter_by(project_id=context.project_id, - item_id=item_id). - delete(synchronize_session=False)) - except Exception: - # NOTE(ft): ignore all exceptions because DB integrity is insignificant - # for tags - pass - - -@require_context -def restore_item(context, kind, data): - try: - item_ref = models.Item() - item_ref.update({ - "project_id": context.project_id, - }) - item_ref.id = data['id'] - item_ref.update(_pack_item_data(data)) - item_ref.save() - return _unpack_item_data(item_ref) - except db_exception.DBDuplicateEntry: - raise exception.EC2DBDuplicateEntry(id=data['id']) - - -@require_context -def get_items(context, kind): - return [_unpack_item_data(item) - for item in (model_query(context, models.Item). - filter_by(project_id=context.project_id). - filter(models.Item.id.like('%s-%%' % kind)). - all())] - - -@require_context -def get_item_by_id(context, item_id): - return (_unpack_item_data(model_query(context, models.Item). - filter_by(project_id=context.project_id, - id=item_id). - first())) - - -@require_context -def get_items_by_ids(context, item_ids): - if not item_ids: - return [] - return [_unpack_item_data(item) - for item in (model_query(context, models.Item). - filter_by(project_id=context.project_id). - filter(models.Item.id.in_(item_ids)). - all())] - - -@require_context -def get_public_items(context, kind, item_ids=None): - query = (model_query(context, models.Item). - filter(models.Item.id.like('%s-%%' % kind)). - filter(models.Item.data.like('%"is_public": True%'))) - if item_ids: - query = query.filter(models.Item.id.in_(item_ids)) - return [_unpack_item_data(item) - for item in query.all()] - - -@require_context -def get_items_ids(context, kind, item_ids=None, item_os_ids=None): - query = (model_query(context, models.Item). - filter(models.Item.id.like('%s-%%' % kind))) - if item_ids: - query = query.filter(models.Item.id.in_(item_ids)) - if item_os_ids: - query = query.filter(models.Item.os_id.in_(item_os_ids)) - return [(item['id'], item['os_id']) - for item in query.all()] - - -@require_context -def add_tags(context, tags): - session = get_session() - get_query = (model_query(context, models.Tag, session=session). - filter_by(project_id=context.project_id, - # NOTE(ft): item_id param name is reserved for - # sqlalchemy internal use - item_id=bindparam('tag_item_id'), - key=bindparam('tag_key'))) - with session.begin(): - for tag in tags: - tag_ref = models.Tag(project_id=context.project_id, - item_id=tag['item_id'], - key=tag['key'], - value=tag['value']) - try: - with session.begin(nested=True): - tag_ref.save(session) - except db_exception.DBDuplicateEntry as ex: - if ('PRIMARY' not in ex.columns and - ex.columns != ['project_id', 'item_id', 'key']): - raise - (get_query.params(tag_item_id=tag['item_id'], - tag_key=tag['key']). - update({'value': tag['value']})) - - -@require_context -def delete_tags(context, item_ids, tag_pairs=None): - if not item_ids: - return - - query = (model_query(context, models.Tag). - filter_by(project_id=context.project_id). - filter(models.Tag.item_id.in_(item_ids))) - - if tag_pairs: - tag_fltr = None - for tag_pair in tag_pairs: - pair_fltr = None - for col in ('key', 'value'): - if col in tag_pair: - expr = getattr(models.Tag, col) == tag_pair[col] - pair_fltr = (expr if pair_fltr is None else - and_(pair_fltr, expr)) - if pair_fltr is not None: - tag_fltr = (pair_fltr if tag_fltr is None else - or_(tag_fltr, pair_fltr)) - if tag_fltr is not None: - query = query.filter(tag_fltr) - - query.delete(synchronize_session=False) - - -@require_context -def get_tags(context, kinds=None, item_ids=None): - query = (model_query(context, models.Tag). - filter_by(project_id=context.project_id)) - if kinds: - fltr = None - for kind in kinds: - expr = models.Tag.item_id.like('%s-%%' % kind) - fltr = expr if fltr is None else or_(fltr, expr) - query = query.filter(fltr) - if item_ids: - query = query.filter(models.Tag.item_id.in_(item_ids)) - return [dict(item_id=tag.item_id, - key=tag.key, - value=tag.value) - for tag in query.all()] - - -def _pack_item_data(item_data): - data = copy.deepcopy(item_data) - data.pop("id", None) - return { - "os_id": data.pop("os_id", None), - "vpc_id": data.pop("vpc_id", None), - "data": jsonutils.dumps(data), - } - - -def _unpack_item_data(item_ref): - if item_ref is None: - return None - data = item_ref.data - data = jsonutils.loads(data) if data is not None else {} - data["id"] = item_ref.id - data["os_id"] = item_ref.os_id - data["vpc_id"] = item_ref.vpc_id - return data diff --git a/ec2api/db/sqlalchemy/migrate_repo/README b/ec2api/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index 605845e2..00000000 --- a/ec2api/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -https://github.com/openstack/sqlalchemy-migrate diff --git a/ec2api/db/sqlalchemy/migrate_repo/__init__.py b/ec2api/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ec2api/db/sqlalchemy/migrate_repo/manage.py b/ec2api/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index fdca2559..00000000 --- a/ec2api/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate.versioning.shell import main - - -if __name__ == '__main__': - main(debug='False', repository='.') diff --git a/ec2api/db/sqlalchemy/migrate_repo/migrate.cfg b/ec2api/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index edc88614..00000000 --- a/ec2api/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=ec2api - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/ec2api/db/sqlalchemy/migrate_repo/versions/001_juno.py b/ec2api/db/sqlalchemy/migrate_repo/versions/001_juno.py deleted file mode 100644 index bac3be5e..00000000 --- a/ec2api/db/sqlalchemy/migrate_repo/versions/001_juno.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, MetaData -from sqlalchemy import PrimaryKeyConstraint, String, Table, Text -from sqlalchemy import UniqueConstraint - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - items = Table('items', meta, - Column("id", String(length=30)), - Column("project_id", String(length=64)), - Column("vpc_id", String(length=12)), - Column("os_id", String(length=36)), - Column("data", Text()), - PrimaryKeyConstraint('id'), - UniqueConstraint('os_id', name='items_os_id_idx'), - mysql_engine="InnoDB", - mysql_charset="utf8" - ) - items.create() - - tags = Table('tags', meta, - Column("project_id", String(length=64)), - Column("item_id", String(length=30)), - Column("key", String(length=127)), - Column("value", String(length=255)), - PrimaryKeyConstraint('project_id', 'item_id', 'key'), - mysql_engine="InnoDB", - mysql_charset="utf8" - ) - tags.create() - - if migrate_engine.name == "mysql": - # In Folsom we explicitly converted migrate_version to UTF8. - sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;" - migrate_engine.execute(sql) - # Set default DB charset to UTF8. - sql = (" ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % - migrate_engine.url.database) - migrate_engine.execute(sql) - - -def downgrade(migrate_engine): - raise NotImplementedError("Downgrade from Juno is unsupported.") diff --git a/ec2api/db/sqlalchemy/migrate_repo/versions/__init__.py b/ec2api/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ec2api/db/sqlalchemy/migration.py b/ec2api/db/sqlalchemy/migration.py deleted file mode 100644 index 9c859958..00000000 --- a/ec2api/db/sqlalchemy/migration.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os - -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository -import sqlalchemy - -from ec2api.db.sqlalchemy import api as db_session -from ec2api import exception -from ec2api.i18n import _ - -INIT_VERSION = 0 -_REPOSITORY = None - -get_engine = db_session.get_engine - - -def db_sync(version=None): - if version is not None: - try: - version = int(version) - except ValueError: - raise exception.EC2Exception(_("version should be an integer")) - - current_version = db_version() - repository = _find_migrate_repo() - if version is None or version > current_version: - return versioning_api.upgrade(get_engine(), repository, version) - else: - return versioning_api.downgrade(get_engine(), repository, - version) - - -def db_version(): - repository = _find_migrate_repo() - try: - return versioning_api.db_version(get_engine(), repository) - except versioning_exceptions.DatabaseNotControlledError: - meta = sqlalchemy.MetaData() - engine = get_engine() - meta.reflect(bind=engine) - tables = meta.tables - if len(tables) == 0: - db_version_control(INIT_VERSION) - return versioning_api.db_version(get_engine(), repository) - else: - # Some pre-Essex DB's may not be version controlled. - # Require them to upgrade using Essex first. - raise exception.EC2Exception( - _("Upgrade DB using Essex release first.")) - - -def db_initial_version(): - return INIT_VERSION - - -def db_version_control(version=None): - repository = _find_migrate_repo() - versioning_api.version_control(get_engine(), repository, version) - return version - - -def _find_migrate_repo(): - """Get the path for the migrate repository.""" - global _REPOSITORY - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') - assert os.path.exists(path) - if _REPOSITORY is None: - _REPOSITORY = Repository(path) - return _REPOSITORY diff --git a/ec2api/db/sqlalchemy/models.py b/ec2api/db/sqlalchemy/models.py deleted file mode 100644 index d9d6f41d..00000000 --- a/ec2api/db/sqlalchemy/models.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2013 Cloudscaling Group, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for ec2api data. -""" - -from oslo_db.sqlalchemy import models -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, PrimaryKeyConstraint, String, Text -from sqlalchemy import UniqueConstraint - -BASE = declarative_base() - -ITEMS_OS_ID_INDEX_NAME = 'items_os_id_idx' - - -class EC2Base(models.ModelBase): - metadata = None - - def save(self, session=None): - from ec2api.db.sqlalchemy import api - - if session is None: - session = api.get_session() - - super(EC2Base, self).save(session=session) - - -class Item(BASE, EC2Base): - __tablename__ = 'items' - __table_args__ = ( - PrimaryKeyConstraint('id'), - UniqueConstraint('os_id', name=ITEMS_OS_ID_INDEX_NAME), - ) - id = Column(String(length=30)) - project_id = Column(String(length=64)) - vpc_id = Column(String(length=12)) - os_id = Column(String(length=36)) - data = Column(Text()) - - -class Tag(BASE, EC2Base): - __tablename__ = 'tags' - __table_args__ = ( - PrimaryKeyConstraint('project_id', 'item_id', 'key'), - ) - project_id = Column(String(length=64)) - item_id = Column(String(length=30)) - key = Column(String(length=127)) - value = Column(String(length=255)) diff --git a/ec2api/exception.py b/ec2api/exception.py deleted file mode 100644 index a00b5c1a..00000000 --- a/ec2api/exception.py +++ /dev/null @@ -1,498 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""ec2api base exception handling. - -Includes decorator for re-raising ec2api-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.i18n import _ - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class EC2APIException(Exception): - """Base EC2 API Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - """ - msg_fmt = _('An unknown exception occurred.') - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if not message: - try: - message = self.msg_fmt % kwargs - except Exception as e: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation for ' - '%s exception', self.__class__.__name__) - for name, value in kwargs.items(): - LOG.error('%s: %s' % (name, value)) - - if CONF.fatal_exception_format_errors: - raise e - else: - # at least get the core message out if something happened - message = self.msg_fmt - elif not isinstance(message, str): - LOG.error("Message '%(msg)s' for %(ex)s exception is not " - "a string", - {'msg': message, 'ex': self.__class__.__name__}) - if CONF.fatal_exception_format_errors: - raise TypeError(_('Invalid exception message format')) - else: - message = self.msg_fmt - - super(EC2APIException, self).__init__(message) - - def format_message(self): - # NOTE(mrodden): use the first argument to the python Exception object - # which should be our full EC2APIException message, (see __init__) - return self.args[0] - - -# Internal ec2api exceptions - -class EC2APIConfigNotFound(EC2APIException): - msg_fmt = _("Could not find config at %(path)s") - - -class EC2APIPasteAppNotFound(EC2APIException): - msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") - - -class EC2KeystoneDiscoverFailure(EC2APIException): - msg_fmt = _("Could not discover keystone versions.") - - -class EC2DBInvalidOsIdUpdate(EC2APIException): - msg_fmt = _('Invalid update of os_id of %(item_id)s item ' - 'from %(old_os_id)s to %(new_os_id)s') - - -class EC2DBDuplicateEntry(EC2APIException): - msg_fmt = _('Entry %(id)s already exists in DB.') - - -# Internal ec2api metadata exceptions - -class EC2MetadataException(EC2APIException): - pass - - -class EC2MetadataNotFound(EC2MetadataException): - pass - - -class EC2MetadataInvalidAddress(EC2MetadataException): - pass - - -# Intermediate exception classes to organize AWS exception hierarchy - -class EC2Exception(EC2APIException): - """Base EC2 compliant exception - - To correctly use this class, inherit from it and define - a 'ec2_code' property if a new class name doesn't coincide with - AWS Error Code. - """ - code = 400 - - -class EC2InvalidException(EC2Exception): - pass - - -class EC2IncorrectStateException(EC2Exception): - pass - - -class EC2DuplicateException(EC2InvalidException): - pass - - -class EC2InUseException(EC2InvalidException): - pass - - -class EC2NotFoundException(EC2InvalidException): - pass - - -class EC2OverlimitException(EC2Exception): - pass - - -# AWS compliant exceptions - -class Unsupported(EC2Exception): - msg_fmt = _("The specified request is unsupported. %(reason)s") - - -class UnsupportedOperation(EC2Exception): - msg_fmt = _('The specified request includes an unsupported operation.') - - -class OperationNotPermitted(EC2Exception): - msg_fmt = _('The specified operation is not allowed.') - - -class InvalidRequest(EC2InvalidException): - msg_fmt = _('The request received was invalid.') - - -class InvalidAttribute(EC2InvalidException): - msg_fmt = _("Attribute not supported: %(attr)s") - - -class InvalidID(EC2InvalidException): - msg_fmt = _("The ID '%(id)s' is not valid") - - -class InvalidInput(EC2InvalidException): - msg_fmt = _("Invalid input received: %(reason)s") - - -class AuthFailure(EC2InvalidException): - msg_fmt = _('Not authorized.') - - -class ValidationError(EC2InvalidException): - msg_fmt = _("The input fails to satisfy the constraints " - "specified by an AWS service: '%(reason)s'") - - -class MissingInput(EC2InvalidException): - pass - - -class MissingParameter(EC2InvalidException): - msg_fmt = _("The required parameter '%(param)s' is missing") - - -class InvalidParameter(EC2InvalidException): - msg_fmt = _("The property '%(name)s' is not valid") - - -class InvalidParameterValue(EC2InvalidException): - msg_fmt = _("Value (%(value)s) for parameter %(parameter)s is invalid. " - "%(reason)s") - - -class InvalidFilter(EC2InvalidException): - msg_fmt = _('The filter is invalid.') - - -class InvalidParameterCombination(EC2InvalidException): - msg_fmt = _('The combination of parameters in incorrect') - - -class InvalidVpcRange(EC2InvalidException): - ec2_code = 'InvalidVpc.Range' - msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.") - - -class InvalidVpcState(EC2InvalidException): - msg_fmt = _('VPC %(vpc_id)s is currently attached to ' - 'the Virtual Private Gateway %(vgw_id)s') - - -class InvalidSubnetRange(EC2InvalidException): - ec2_code = 'InvalidSubnet.Range' - msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.") - - -class InvalidSubnetConflict(EC2InvalidException): - ec2_code = 'InvalidSubnet.Conflict' - msg_fmt = _("The CIDR '%(cidr_block)s' conflicts with another subnet") - - -class InvalidInstanceId(EC2InvalidException): - ec2_code = 'InvalidInstanceID' - msg_fmt = _("There are multiple interfaces attached to instance " - "'%(instance_id)s'. Please specify an interface ID for " - "the operation instead.") - - -class InvalidSnapshotIDMalformed(EC2InvalidException): - ec2_code = 'InvalidSnapshotID.Malformed' - # TODO(ft): Change the message with the real AWS message - msg_fmg = _('The snapshot %(id)s ID is not valid') - - -class InvalidBlockDeviceMapping(EC2InvalidException): - pass - - -class IncorrectState(EC2IncorrectStateException): - msg_fmt = _("The resource is in incorrect state for the request - reason: " - "'%(reason)s'") - - -class DependencyViolation(EC2IncorrectStateException): - msg_fmt = _('Object %(obj1_id)s has dependent resource %(obj2_id)s') - - -class CannotDelete(EC2IncorrectStateException): - msg_fmt = _('Cannot delete the default VPC security group') - - -class ResourceAlreadyAssociated(EC2IncorrectStateException): - ec2_code = 'Resource.AlreadyAssociated' - - -class GatewayNotAttached(EC2IncorrectStateException): - ec2_code = 'Gateway.NotAttached' - msg_fmt = _("resource %(gw_id)s is not attached to network %(vpc_id)s") - - -class IncorrectInstanceState(EC2IncorrectStateException): - msg_fmt = _("The instance '%(instance_id)s' is not in a state from which " - "the requested operation can be performed.") - - -class InvalidAMIIDUnavailable(EC2IncorrectStateException): - ec2_code = 'InvalidAMIID.Unavailable' - # TODO(ft): Change the message with the real AWS message - msg_fmt = _("Image %(image_id)s is not active.") - - -class InvalidNetworkInterfaceInUse(EC2InUseException): - ec2_code = 'InvalidNetworkInterface.InUse' - msg_fmt = _('Interface: %(interface_ids)s in use.') - - -class InvalidIPAddressInUse(EC2InUseException): - ec2_code = 'InvalidIPAddress.InUse' - msg_fmt = _('Address %(ip_address)s is in use.') - - -class InvalidKeyPairDuplicate(EC2DuplicateException): - ec2_code = 'InvalidKeyPair.Duplicate' - msg_fmt = _("Key pair '%(key_name)s' already exists.") - - -class InvalidPermissionDuplicate(EC2DuplicateException): - ec2_code = 'InvalidPermission.Duplicate' - msg_fmt = _('The specified rule already exists for that security group.') - - -class InvalidGroupDuplicate(EC2DuplicateException): - ec2_code = 'InvalidGroup.Duplicate' - msg_fmt = _("Security group '%(name)s' already exists.") - - -class RouteAlreadyExists(EC2DuplicateException): - msg_fmt = _('The route identified by %(destination_cidr_block)s ' - 'already exists.') - - -class InvalidCustomerGatewayDuplicateIpAddress(EC2DuplicateException): - ec2_code = 'InvalidCustomerGateway.DuplicateIpAddress' - msg_fmt = _('Conflict among chosen gateway IP addresses.') - - -class InvalidVpcIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidVpcID.NotFound' - msg_fmt = _("The vpc ID '%(id)s' does not exist") - - -class InvalidInternetGatewayIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidInternetGatewayID.NotFound' - msg_fmt = _("The internetGateway ID '%(id)s' does not exist") - - -class InvalidSubnetIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidSubnetID.NotFound' - msg_fmt = _("The subnet ID '%(id)s' does not exist") - - -class InvalidNetworkInterfaceIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidNetworkInterfaceID.NotFound' - msg_fmt = _("Network interface %(id)s could not " - "be found.") - - -class InvalidAttachmentIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidAttachmentID.NotFound' - msg_fmt = _("Attachment %(id)s could not " - "be found.") - - -class InvalidInstanceIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidInstanceID.NotFound' - msg_fmt = _("The instance ID '%(id)s' does not exist") - - -class InvalidDhcpOptionsIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidDhcpOptionsID.NotFound' - msg_fmt = _("The dhcp options ID '%(id)s' does not exist") - - -class InvalidAddressNotFound(EC2NotFoundException): - ec2_code = 'InvalidAddress.NotFound' - msg_fmt = _('The specified elastic IP address %(ip)s cannot be found.') - - -class InvalidAllocationIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidAllocationID.NotFound' - msg_fmt = _("The allocation ID '%(id)s' does not exist") - - -class InvalidAssociationIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidAssociationID.NotFound' - msg_fmt = _("The association ID '%(id)s' does not exist") - - -class InvalidSecurityGroupIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidSecurityGroupID.NotFound' - msg_fmt = _("The securityGroup ID '%(id)s' does not exist") - - -class InvalidGroupNotFound(EC2NotFoundException): - ec2_code = 'InvalidGroup.NotFound' - msg_fmt = _("The security group ID '%(id)s' does not exist") - - -class InvalidPermissionNotFound(EC2NotFoundException): - ec2_code = 'InvalidPermission.NotFound' - msg_fmg = _('The specified permission does not exist') - - -class InvalidRouteTableIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidRouteTableID.NotFound' - msg_fmt = _("The routeTable ID '%(id)s' does not exist") - - -class InvalidRouteNotFound(EC2NotFoundException): - ec2_code = 'InvalidRoute.NotFound' - msg_fmt = _('No route with destination-cidr-block ' - '%(destination_cidr_block)s in route table %(route_table_id)s') - - -class InvalidAMIIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidAMIID.NotFound' - msg_fmt = _("The image id '[%(id)s]' does not exist") - - -class InvalidVolumeNotFound(EC2NotFoundException): - ec2_code = 'InvalidVolume.NotFound' - msg_fmt = _("The volume '%(id)s' does not exist.") - - -class InvalidSnapshotNotFound(EC2NotFoundException): - ec2_code = 'InvalidSnapshot.NotFound' - msg_fmt = _("Snapshot %(id)s could not be found.") - - -class InvalidKeypairNotFound(EC2NotFoundException): - ec2_code = 'InvalidKeyPair.NotFound' - msg_fmt = _("Keypair %(id)s is not found") - - -class InvalidAvailabilityZoneNotFound(EC2NotFoundException): - ec2_code = 'InvalidAvailabilityZone.NotFound' - msg_fmt = _("Availability zone %(id)s not found") - - -class InvalidGatewayIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidGatewayID.NotFound' - msg_fmt = _("The gateway ID '%(id)s' does not exist") - - -class InvalidVpnGatewayIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidVpnGatewayID.NotFound' - msg_fmt = _("The vpnGateway ID '%(id)s' does not exist") - - -class InvalidCustomerGatewayIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidCustomerGatewayID.NotFound' - msg_fmt = _("The customerGateway ID '%(id)s' does not exist") - - -class InvalidVpnConnectionIDNotFound(EC2NotFoundException): - ec2_code = 'InvalidVpnConnectionID.NotFound' - msg_fmt = _("The vpnConnection ID '%(id)s' does not exist") - - -class InvalidVpnGatewayAttachmentNotFound(EC2NotFoundException): - ec2_code = 'InvalidVpnGatewayAttachment.NotFound' - msg_fmt = _("The attachment with vpn gateway ID '%(vgw_id)s' " - "and vpc ID '%(vpc_id)s' does not exist") - - -class ResourceLimitExceeded(EC2OverlimitException): - msg_fmt = _('You have reached the limit of %(resource)s') - - -class VpcLimitExceeded(EC2OverlimitException): - msg_fmt = _('The maximum number of VPCs has been reached.') - - -class SubnetLimitExceeded(EC2OverlimitException): - msg_fmt = _('You have reached the limit on the number of subnets that you ' - 'can create') - - -class InsufficientFreeAddressesInSubnet(EC2OverlimitException): - msg_fmt = _('The specified subnet does not have enough free addresses to ' - 'satisfy the request.') - - -class AddressLimitExceeded(EC2OverlimitException): - msg_fmt = _('The maximum number of addresses has been reached.') - - -class SecurityGroupLimitExceeded(EC2OverlimitException): - msg_fmt = _('You have reached the limit of security groups') - - -class RulesPerSecurityGroupLimitExceeded(EC2OverlimitException): - msg_fmt = _("You've reached the limit on the number of rules that " - "you can add to a security group.") - - -class VpnGatewayAttachmentLimitExceeded(EC2OverlimitException): - msg_fmt = _('The maximum number of virtual private gateway attachments ' - 'has been reached.') - - -class InvalidGroupReserved(EC2InvalidException): - ec2_code = 'InvalidGroup.Reserved' - msg_fmt = _("The security group '%(group_name)' is reserved.") - - -class VPCIdNotSpecified(EC2InvalidException): - msg_fmt = _("No default VPC for this user.") diff --git a/ec2api/hacking/__init__.py b/ec2api/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ec2api/hacking/checks.py b/ec2api/hacking/checks.py deleted file mode 100644 index f0dbd681..00000000 --- a/ec2api/hacking/checks.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from hacking import core - - -_all_log_levels = {'critical', 'error', 'exception', 'info', - 'warning', 'debug'} - -# Since _Lx() have been removed, we just need to check _() -_all_hints = {'_'} - -_log_translation_hint = re.compile( - r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % { - 'levels': '|'.join(_all_log_levels), - 'hints': '|'.join(_all_hints), - }) - - -@core.flake8ext -def no_translate_logs(logical_line, filename): - """N537 - Don't translate logs. - - Check for 'LOG.*(_(' - - Translators don't provide translations for log messages, and operators - asked not to translate them. - - * This check assumes that 'LOG' is a logger. - - :param logical_line: The logical line to check. - :param filename: The file name where the logical line exists. - :returns: None if the logical line passes the check, otherwise a tuple - is yielded that contains the offending index in logical line and a - message describe the check validation failure. - """ - if _log_translation_hint.match(logical_line): - yield (0, "N537: Log messages should not be translated!") diff --git a/ec2api/i18n.py b/ec2api/i18n.py deleted file mode 100644 index bcc68cbf..00000000 --- a/ec2api/i18n.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n - -DOMAIN = 'ec2-api' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def translate(value, user_locale): - return oslo_i18n.translate(value, user_locale) - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/ec2api/metadata/__init__.py b/ec2api/metadata/__init__.py deleted file mode 100644 index 78f37ebf..00000000 --- a/ec2api/metadata/__init__.py +++ /dev/null @@ -1,284 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib -import hmac -import posixpath - -import httplib2 -from oslo_cache import core as cache_core -from oslo_config import cfg -from oslo_log import log as logging -import urllib.parse as urlparse -import webob - -from ec2api import context as ec2_context -from ec2api import exception -from ec2api.i18n import _ -from ec2api.metadata import api -from ec2api import utils -from ec2api import wsgi - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('use_forwarded_for', 'ec2api.api.auth') - -metadata_opts = [ - cfg.StrOpt('nova_metadata_ip', - default='127.0.0.1', - help=_("IP address used by Nova metadata server.")), - cfg.IntOpt('nova_metadata_port', - default=8775, - help=_("TCP Port used by Nova metadata server.")), - cfg.StrOpt('nova_metadata_protocol', - default='http', - choices=['http', 'https'], - help=_("Protocol to access nova metadata, http or https")), - cfg.BoolOpt('nova_metadata_insecure', - default=False, - help=_("Allow to perform insecure SSL (https) requests to " - "nova metadata")), - cfg.StrOpt('auth_ca_cert', - help=_("Certificate Authority public key (CA cert) " - "file for ssl")), - cfg.StrOpt('nova_client_cert', - default='', - help=_("Client certificate for nova metadata api server.")), - cfg.StrOpt('nova_client_priv_key', - default='', - help=_("Private key of client certificate.")), - cfg.StrOpt('metadata_proxy_shared_secret', - default='', - help=_('Shared secret to sign instance-id request'), - secret=True), - cfg.IntOpt("cache_expiration", - default=15, - min=0, - help=_('This option is the time (in seconds) to cache metadata. ' - 'Increasing this setting should improve response times of the ' - 'metadata API when under heavy load. Higher values may ' - 'increase memory usage, and result in longer times for host ' - 'metadata changes to take effect.')) -] - -CONF.register_opts(metadata_opts, group='metadata') -cache_core.configure(CONF) - - -class MetadataRequestHandler(wsgi.Application): - """Serve metadata.""" - - def __init__(self): - if not CONF.cache.enabled: - LOG.warning("Metadata doesn't use cache. " - "Configure cache options to use cache.") - self.cache_region = cache_core.create_region() - cache_core.configure_cache_region(CONF, self.cache_region) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - LOG.debug('Request: %s', req) - - path = req.path_info - if path == '' or path[0] != '/': - path = '/' + path - path = posixpath.normpath(path) - path_tokens = path.split('/')[1:] - if path_tokens[0] == 'ec2': - path_tokens = path_tokens[1:] - - if path_tokens == ['']: - resp = api.get_version_list() - return self._add_response_data(req.response, resp) - - try: - requester = self._get_requester(req) - if path_tokens[0] == 'openstack': - return self._proxy_request(req, requester) - - resp = self._get_metadata(path_tokens, requester) - return self._add_response_data(req.response, resp) - - except exception.EC2MetadataNotFound: - return webob.exc.HTTPNotFound() - except Exception: - LOG.exception("Unexpected error.") - msg = _('An unknown error has occurred. ' - 'Please try your request again.') - return webob.exc.HTTPInternalServerError( - explanation=str(msg)) - - def _proxy_request(self, req, requester): - headers = self._build_proxy_request_headers(requester) - nova_ip_port = '%s:%s' % (CONF.metadata.nova_metadata_ip, - CONF.metadata.nova_metadata_port) - url = urlparse.urlunsplit(( - CONF.metadata.nova_metadata_protocol, - nova_ip_port, - req.path_info, - req.query_string, - '')) - - h = httplib2.Http( - ca_certs=CONF.metadata.auth_ca_cert, - disable_ssl_certificate_validation=( - CONF.metadata.nova_metadata_insecure) - ) - if (CONF.metadata.nova_client_cert and - CONF.metadata.nova_client_priv_key): - h.add_certificate(CONF.metadata.nova_client_priv_key, - CONF.metadata.nova_client_cert, - nova_ip_port) - resp, content = h.request(url, method=req.method, headers=headers, - body=req.body) - - if resp.status == 200: - LOG.debug(str(resp)) - req.response.content_type = resp['content-type'] - req.response.body = content - return req.response - elif resp.status == 403: - LOG.warning( - 'The remote metadata server responded with Forbidden. This ' - 'response usually occurs when shared secrets do not match.' - ) - return webob.exc.HTTPForbidden() - elif resp.status == 400: - return webob.exc.HTTPBadRequest() - elif resp.status == 404: - return webob.exc.HTTPNotFound() - elif resp.status == 409: - return webob.exc.HTTPConflict() - elif resp.status == 500: - msg = _( - 'Remote metadata server experienced an internal server error.' - ) - LOG.warning(msg) - return webob.exc.HTTPInternalServerError( - explanation=str(msg)) - else: - raise Exception(_('Unexpected response code: %s') % resp.status) - - def _build_proxy_request_headers(self, requester): - signature = self._sign_instance_id(requester['os_instance_id']) - return { - 'X-Forwarded-For': requester['private_ip'], - 'X-Instance-ID': requester['os_instance_id'], - 'X-Tenant-ID': requester['project_id'], - 'X-Instance-ID-Signature': signature, - } - - def _sign_instance_id(self, instance_id): - return hmac.new( - CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"), - instance_id.encode(), - hashlib.sha256).hexdigest() - - def _get_requester(self, req): - if req.headers.get('X-Metadata-Provider'): - provider_id, remote_ip = self._unpack_nsx_request(req) - context = ec2_context.get_os_admin_context() - os_instance_id, project_id = ( - api.get_os_instance_and_project_id_by_provider_id( - context, provider_id, remote_ip)) - else: - os_instance_id, project_id, remote_ip = ( - self._unpack_neutron_request(req)) - return {'os_instance_id': os_instance_id, - 'project_id': project_id, - 'private_ip': remote_ip} - - def _unpack_neutron_request(self, req): - os_instance_id = req.headers.get('X-Instance-ID') - project_id = req.headers.get('X-Tenant-ID') - signature = req.headers.get('X-Instance-ID-Signature') - remote_ip = req.headers.get('X-Forwarded-For') - - if not remote_ip: - raise exception.EC2MetadataInvalidAddress() - - if os_instance_id is None: - msg = _('X-Instance-ID header is missing from request.') - elif project_id is None: - msg = _('X-Tenant-ID header is missing from request.') - elif not isinstance(os_instance_id, str): - msg = _('Multiple X-Instance-ID headers found within request.') - elif not isinstance(project_id, str): - msg = _('Multiple X-Tenant-ID headers found within request.') - else: - msg = None - - if msg: - raise webob.exc.HTTPBadRequest(explanation=msg) - - self._validate_signature(signature, os_instance_id, remote_ip) - return os_instance_id, project_id, remote_ip - - def _unpack_nsx_request(self, req): - remote_address = req.headers.get('X-Forwarded-For') - if remote_address is None: - msg = _('X-Forwarded-For is missing from request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - provider_id = req.headers.get('X-Metadata-Provider') - if provider_id is None: - msg = _('X-Metadata-Provider is missing from request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - remote_ip = remote_address.split(',')[0] - - if CONF.metadata.metadata_proxy_shared_secret: - signature = req.headers.get('X-Metadata-Provider-Signature') - self._validate_signature(signature, provider_id, remote_ip) - - return provider_id, remote_ip - - def _validate_signature(self, signature, requester_id, requester_ip): - expected_signature = hmac.new( - CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"), - requester_id.encode(), - hashlib.sha256).hexdigest() - - if not (signature and - utils.constant_time_compare(expected_signature, signature)): - LOG.warning('X-Instance-ID-Signature: %(signature)s does ' - 'not match the expected value: ' - '%(expected_signature)s for id: ' - '%(requester_id)s. Request From: ' - '%(requester_ip)s', - {'signature': signature, - 'expected_signature': expected_signature, - 'requester_id': requester_id, - 'requester_ip': requester_ip}) - - msg = _('Invalid proxy request signature.') - raise webob.exc.HTTPForbidden(explanation=msg) - - def _get_metadata(self, path_tokens, requester): - context = ec2_context.get_os_admin_context() - # NOTE(ft): substitute project_id for context to instance's one. - # It's needed for correct describe and auto update DB operations. - # It doesn't affect operations via OpenStack's clients because - # these clients use auth_token field only - context.project_id = requester['project_id'] - return api.get_metadata_item(context, path_tokens, - requester['os_instance_id'], - requester['private_ip'], - self.cache_region) - - def _add_response_data(self, response, data): - if isinstance(data, str): - response.text = data - else: - response.body = data - response.content_type = 'text/plain' - return response diff --git a/ec2api/metadata/api.py b/ec2api/metadata/api.py deleted file mode 100644 index 15c53cb8..00000000 --- a/ec2api/metadata/api.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import itertools - -from novaclient import exceptions as nova_exception -from oslo_cache import core as cache_core -from oslo_config import cfg -from oslo_log import log as logging - -from ec2api.api import clients -from ec2api.api import ec2utils -from ec2api.api import instance as instance_api -from ec2api import exception - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -VERSIONS = [ - '1.0', - '2007-01-19', - '2007-03-01', - '2007-08-29', - '2007-10-10', - '2007-12-15', - '2008-02-01', - '2008-09-01', - '2009-04-04', -] - -VERSION_DATA = { - '1.0': ['ami-id', - 'ami-launch-index', - 'ami-manifest-path', - 'hostname', - 'instance-id', - 'local-ipv4', - 'public-keys', - 'reservation-id', - 'security-groups'], - '2007-01-19': ['local-hostname', - 'public-hostname', - 'public-ipv4'], - '2007-03-01': ['product-codes'], - '2007-08-29': ['instance-type'], - '2007-10-10': ['ancestor-ami-ids', - 'ramdisk-id'], - '2007-12-15': ['block-device-mapping'], - '2008-02-01': ['kernel-id', - 'placement'], - '2008-09-01': ['instance-action'], - '2009-04-04': [], -} - - -def get_version_list(): - return _format_metadata_item(VERSIONS + ["latest"]) - - -def get_os_instance_and_project_id_by_provider_id(context, provider_id, - fixed_ip): - neutron = clients.neutron(context) - os_subnets = neutron.list_subnets(advanced_service_providers=[provider_id], - fields=['network_id']) - if not os_subnets: - raise exception.EC2MetadataNotFound() - os_networks = [subnet['network_id'] - for subnet in os_subnets['subnets']] - try: - os_port = neutron.list_ports( - fixed_ips='ip_address=' + fixed_ip, - network_id=os_networks, - fields=['device_id', 'tenant_id'])['ports'][0] - except IndexError: - raise exception.EC2MetadataNotFound() - os_instance_id = os_port['device_id'] - project_id = os_port['tenant_id'] - return os_instance_id, project_id - - -def get_metadata_item(context, path_tokens, os_instance_id, remote_ip, - cache_region): - version = path_tokens[0] - if version == "latest": - version = VERSIONS[-1] - elif version not in VERSIONS: - raise exception.EC2MetadataNotFound() - - cache_key = 'ec2api-metadata-%s' % os_instance_id - cache = cache_region.get( - cache_key, expiration_time=CONF.metadata.cache_expiration) - if cache and cache != cache_core.NO_VALUE: - _check_instance_owner(context, os_instance_id, cache['owner_id']) - LOG.debug("Using cached metadata for instance %s", os_instance_id) - else: - ec2_instance, ec2_reservation = ( - _get_ec2_instance_and_reservation(context, os_instance_id)) - - _check_instance_owner(context, os_instance_id, - ec2_reservation['ownerId']) - - metadata = _build_metadata(context, ec2_instance, ec2_reservation, - os_instance_id, remote_ip) - LOG.debug('get_metadata_item: result %s', str(metadata)) - cache = {'metadata': metadata, - 'owner_id': ec2_reservation['ownerId']} - - cache_region.set(cache_key, cache) - - metadata = cache['metadata'] - metadata = _cut_down_to_version(metadata, version) - metadata_item = _find_path_in_tree(metadata, path_tokens[1:]) - return _format_metadata_item(metadata_item) - - -def _get_ec2_instance_and_reservation(context, os_instance_id): - instance_id = ec2utils.os_id_to_ec2_id(context, 'i', os_instance_id) - LOG.debug('_get_ec2_instance_and_reservation(%s)', os_instance_id) - try: - ec2_reservations = instance_api.describe_instances( - context, [instance_id]) - LOG.debug('_get_ec2_instance_and_reservation: result by id %s', - str(ec2_reservations)) - except exception.InvalidInstanceIDNotFound: - ec2_reservations = instance_api.describe_instances( - context, filter=[{'name': 'instance-id', - 'value': [instance_id]}]) - LOG.debug('_get_ec2_instance_and_reservation: result by name %s', - str(ec2_reservations)) - if (len(ec2_reservations['reservationSet']) != 1 or - len(ec2_reservations['reservationSet'][0]['instancesSet']) != 1): - LOG.error('Failed to get metadata for instance id: %s', - os_instance_id) - raise exception.EC2MetadataNotFound() - - ec2_reservation = ec2_reservations['reservationSet'][0] - ec2_instance = ec2_reservation['instancesSet'][0] - - return ec2_instance, ec2_reservation - - -def _check_instance_owner(context, os_instance_id, owner_id): - # NOTE(ft): check for case of Neutron metadata proxy. - # It sends project_id as X-Tenant-ID HTTP header. - # We make sure it's correct - if context.project_id != owner_id: - LOG.warning('Tenant_id %(tenant_id)s does not match tenant_id ' - 'of instance %(instance_id)s.', - {'tenant_id': context.project_id, - 'instance_id': os_instance_id}) - raise exception.EC2MetadataNotFound() - - -def _build_metadata(context, ec2_instance, ec2_reservation, - os_instance_id, remote_ip): - metadata = { - 'ami-id': ec2_instance['imageId'], - 'ami-launch-index': ec2_instance['amiLaunchIndex'], - # NOTE (ft): the fake value as it is in Nova EC2 metadata - 'ami-manifest-path': 'FIXME', - # NOTE (ft): empty value as it is in Nova EC2 metadata - 'ancestor-ami-ids': [], - 'block-device-mapping': _build_block_device_mappings(context, - ec2_instance, - os_instance_id), - # NOTE(ft): Nova EC2 metadata returns instance's hostname with - # dhcp_domain suffix if it's set in config. - # But i don't see any reason to return a hostname differs from EC2 - # describe output one. If we need to consider dhcp_domain suffix - # then we should do it in the describe operation - 'hostname': ec2_instance['privateDnsName'], - # NOTE (ft): the fake value as it is in Nova EC2 metadata - 'instance-action': 'none', - 'instance-id': ec2_instance['instanceId'], - 'instance-type': ec2_instance['instanceType'], - 'local-hostname': ec2_instance['privateDnsName'], - 'local-ipv4': ec2_instance['privateIpAddress'] or remote_ip, - 'placement': { - 'availability-zone': ec2_instance['placement']['availabilityZone'] - }, - # NOTE (ft): empty value as it is in Nova EC2 metadata - 'product-codes': [], - 'public-hostname': ec2_instance['dnsName'], - 'public-ipv4': ec2_instance.get('ipAddress', ''), - 'reservation-id': ec2_reservation['reservationId'], - 'security-groups': [sg['groupName'] - for sg in ec2_reservation.get('groupSet', [])], - } - if 'kernelId' in ec2_instance: - metadata['kernel-id'] = ec2_instance['kernelId'] - if 'ramdiskId' in ec2_instance: - metadata['ramdisk-id'] = ec2_instance['ramdiskId'] - # public keys are strangely rendered in ec2 metadata service - # meta-data/public-keys/ returns '0=keyname' (with no trailing /) - # and only if there is a public key given. - # '0=keyname' means there is a normally rendered dict at - # meta-data/public-keys/0 - # - # meta-data/public-keys/ : '0=%s' % keyname - # meta-data/public-keys/0/ : 'openssh-key' - # meta-data/public-keys/0/openssh-key : '%s' % publickey - if ec2_instance['keyName']: - metadata['public-keys'] = { - '0': {'_name': "0=" + ec2_instance['keyName']}} - nova = clients.nova(context) - os_instance = nova.servers.get(os_instance_id) - try: - keypair = nova.keypairs._get( - '/%s/%s?user_id=%s' % (nova.keypairs.keypair_prefix, - ec2_instance['keyName'], - os_instance.user_id), - 'keypair') - except nova_exception.NotFound: - pass - else: - metadata['public-keys']['0']['openssh-key'] = keypair.public_key - - full_metadata = {'meta-data': metadata} - - userdata = instance_api.describe_instance_attribute( - context, ec2_instance['instanceId'], 'userData') - if 'userData' in userdata: - userdata = userdata['userData']['value'] - userdata = base64.b64decode(userdata) - try: - userdata = userdata.decode("utf-8") - except UnicodeDecodeError: - pass - full_metadata['user-data'] = userdata - - return full_metadata - - -def _build_block_device_mappings(context, ec2_instance, os_instance_id): - mappings = {'root': ec2_instance.get('rootDeviceName', ''), - 'ami': ec2utils.block_device_strip_dev( - ec2_instance.get('rootDeviceName', ''))} - if 'blockDeviceMapping' in ec2_instance: - # NOTE(yamahata): I'm not sure how ebs device should be numbered. - # Right now sort by device name for deterministic - # result. - ebs_devices = [ebs['deviceName'] - for ebs in ec2_instance['blockDeviceMapping']] - ebs_devices.sort() - ebs_devices = {'ebs%d' % num: ebs - for num, ebs in enumerate(ebs_devices)} - mappings.update(ebs_devices) - - # TODO(ft): extend Nova API to get ephemerals and swap - return mappings - - -def _cut_down_to_version(metadata, version): - version_number = VERSIONS.index(version) + 1 - if version_number == len(VERSIONS): - return metadata - return {attr: metadata[attr] - for attr in itertools.chain( - *(VERSION_DATA[ver] for ver in VERSIONS[:version_number])) - if attr in metadata} - - -def _format_metadata_item(data): - if isinstance(data, dict): - output = '' - for key in sorted(data.keys()): - if key == '_name': - continue - if isinstance(data[key], dict): - if '_name' in data[key]: - output += str(data[key]['_name']) - else: - output += key + '/' - else: - output += key - - output += '\n' - return output[:-1] - elif isinstance(data, list): - return '\n'.join(data) - else: - return str(data) - - -def _find_path_in_tree(data, path_tokens): - # given a dict/list tree, and a path in that tree, return data found there. - for i in range(0, len(path_tokens)): - if isinstance(data, dict) or isinstance(data, list): - if path_tokens[i] in data: - data = data[path_tokens[i]] - else: - raise exception.EC2MetadataNotFound() - else: - if i != len(path_tokens) - 1: - raise exception.EC2MetadataNotFound() - data = data[path_tokens[i]] - return data diff --git a/ec2api/metadata/opts.py b/ec2api/metadata/opts.py deleted file mode 100644 index 9aca41d0..00000000 --- a/ec2api/metadata/opts.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -import ec2api.metadata - - -def list_opts(): - return [ - ('metadata', - itertools.chain( - ec2api.metadata.metadata_opts, - )), - ] diff --git a/ec2api/opts.py b/ec2api/opts.py deleted file mode 100644 index ce9d6847..00000000 --- a/ec2api/opts.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools -import operator - -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg - -import ec2api.clients -import ec2api.db.api -import ec2api.exception -import ec2api.paths -import ec2api.service -import ec2api.utils -import ec2api.wsgi - - -CONF = cfg.CONF - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - ec2api.clients.ec2_opts, - ec2api.db.api.tpool_opts, - ec2api.exception.exc_log_opts, - ec2api.paths.path_opts, - ec2api.service.service_opts, - ec2api.wsgi.wsgi_opts, - )), - ] - - -GROUP_AUTHTOKEN = 'keystone_authtoken' - - -def list_auth_opts(): - opt_list = ks_loading.register_session_conf_options(CONF, GROUP_AUTHTOKEN) - opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0]) - # NOTE(mhickey): There are a lot of auth plugins, we just generate - # the config options for a few common ones - plugins = ['password', 'v2password', 'v3password'] - for name in plugins: - for plugin_option in ks_loading.get_auth_plugin_conf_options(name): - if all(option.name != plugin_option.name for option in opt_list): - opt_list.append(plugin_option) - opt_list.sort(key=operator.attrgetter('name')) - return [(GROUP_AUTHTOKEN, opt_list)] diff --git a/ec2api/paths.py b/ec2api/paths.py deleted file mode 100644 index 0ef00af8..00000000 --- a/ec2api/paths.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from oslo_config import cfg - -path_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/ec2api', - help="Top-level directory for maintaining ec2api's state"), -] - -CONF = cfg.CONF -CONF.register_opts(path_opts) - - -def state_path_def(*args): - """Return an uninterpolated path relative to $state_path.""" - return os.path.join('$state_path', *args) diff --git a/ec2api/s3/__init__.py b/ec2api/s3/__init__.py deleted file mode 100644 index 1f0e83a3..00000000 --- a/ec2api/s3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`ec2api.s3` -- S3-type object store -===================================================== - -.. automodule:: ec2api.s3 - :platform: Unix - :synopsis: Currently a trivial file-based system, getting extended w/ swift. -""" diff --git a/ec2api/s3/opts.py b/ec2api/s3/opts.py deleted file mode 100644 index 77e150c9..00000000 --- a/ec2api/s3/opts.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -import ec2api.s3.s3server - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - ec2api.s3.s3server.s3_opts, - )), - ] diff --git a/ec2api/s3/s3server.py b/ec2api/s3/s3server.py deleted file mode 100644 index 71cfc4c8..00000000 --- a/ec2api/s3/s3server.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack Foundation -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of an S3-like storage server based on local files. - -Useful to test features that will eventually run on S3, or if you want to -run something locally that was once running on S3. - -We don't support all the features of S3, but it does work with the -standard S3 client for the most basic semantics. To use the standard -S3 client with this module:: - - c = S3.AWSAuthConnection("", "", server="localhost", port=8888, - is_secure=False) - c.create_bucket("mybucket") - c.put("mybucket", "mykey", "a value") - print c.get("mybucket", "mykey").body - -""" - -import bisect -import datetime -import os.path - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -import routes -from urllib import parse -import webob - -from ec2api import paths -from ec2api import utils -from ec2api import wsgi - - -s3_opts = [ - cfg.StrOpt('buckets_path', - default=paths.state_path_def('buckets'), - help='Path to S3 buckets'), - cfg.StrOpt('s3_listen', - default="0.0.0.0", - help='IP address for S3 API to listen'), - cfg.IntOpt('s3_listen_port', - default=3334, - help='Port for S3 API to listen'), -] - -CONF = cfg.CONF -CONF.register_opts(s3_opts) -LOG = logging.getLogger(__name__) - - -def get_wsgi_server(): - return wsgi.Server("S3 Objectstore", - S3Application(CONF.buckets_path), - port=CONF.s3_listen_port, - host=CONF.s3_listen) - - -class S3Application(wsgi.Router): - """Implementation of an S3-like storage server based on local files. - - If bucket depth is given, we break files up into multiple directories - to prevent hitting file system limits for number of files in each - directories. 1 means one level of directories, 2 means 2, etc. - - """ - - def __init__(self, root_directory, bucket_depth=0, mapper=None): - if mapper is None: - mapper = routes.Mapper() - - mapper.connect( - '/', - controller=lambda *a, **kw: RootHandler(self)(*a, **kw)) - mapper.connect( - '/{bucket}/{object_name}', - controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw)) - mapper.connect( - '/{bucket_name}', - controller=lambda *a, **kw: BucketHandler(self)(*a, **kw), - requirements={'bucket_name': '[^/]+/?'}) - self.directory = os.path.abspath(root_directory) - fileutils.ensure_tree(self.directory) - self.bucket_depth = bucket_depth - super(S3Application, self).__init__(mapper) - - -class BaseRequestHandler(object): - """Base class emulating Tornado's web framework pattern in WSGI. - - This is a direct port of Tornado's implementation, so some key decisions - about how the code interacts have already been chosen. - - The two most common ways of designing web frameworks can be - classified as async object-oriented and sync functional. - - Tornado's is on the OO side because a response is built up in and using - the shared state of an object and one of the object's methods will - eventually trigger the "finishing" of the response asynchronously. - - Most WSGI stuff is in the functional side, we pass a request object to - every call down a chain and the eventual return value will be a response. - - Part of the function of the routing code in S3Application as well as the - code in BaseRequestHandler's __call__ method is to merge those two styles - together enough that the Tornado code can work without extensive - modifications. - - To do that it needs to give the Tornado-style code clean objects that it - can modify the state of for each request that is processed, so we use a - very simple factory lambda to create new state for each request, that's - the stuff in the router, and when we let the Tornado code modify that - object to handle the request, then we return the response it generated. - This wouldn't work the same if Tornado was being more async'y and doing - other callbacks throughout the process, but since Tornado is being - relatively simple here we can be satisfied that the response will be - complete by the end of the get/post method. - - """ - - def __init__(self, application): - self.application = application - - @webob.dec.wsgify - def __call__(self, request): - try: - method = request.method.lower() - f = getattr(self, method, self.invalid) - self.request = request - self.response = webob.Response() - params = request.environ['wsgiorg.routing_args'][1] - del params['controller'] - f(**params) - except Exception: - # TODO(andrey-mp): improve this block - LOG.exception('Unhandled error') - self.render_xml({"Error": { - "Code": "BadRequest", - "Message": "Unhandled error" - }}) - self.set_status(501) - - return self.response - - def get_argument(self, arg, default): - return self.request.params.get(arg, default) - - def set_header(self, header, value): - self.response.headers[header] = value - - def set_status(self, status_code): - self.response.status = status_code - - def set_404(self): - self.render_xml({"Error": { - "Code": "NoSuchKey", - "Message": "The resource you requested does not exist" - }}) - self.set_status(404) - - def finish(self, body=''): - if isinstance(body, bytes): - self.response.body = body - else: - self.response.body = body.encode("utf-8") - - def invalid(self, **kwargs): - pass - - def render_xml(self, value): - assert isinstance(value, dict) and len(value) == 1 - self.set_header("Content-Type", "application/xml; charset=UTF-8") - name = next(iter(value.keys())) - parts = [] - parts.append('<' + name + - ' xmlns="http://s3.amazonaws.com/doc/2006-03-01/">') - self._render_parts(next(iter(value.values())), parts) - parts.append('') - self.finish('\n' + - ''.join(parts)) - - def _render_parts(self, value, parts=None): - if not parts: - parts = [] - - if isinstance(value, str): - parts.append(utils.xhtml_escape(value)) - elif isinstance(value, int): - parts.append(str(value)) - elif isinstance(value, datetime.datetime): - parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) - elif isinstance(value, dict): - for name, subvalue in value.items(): - if not isinstance(subvalue, list): - subvalue = [subvalue] - for subsubvalue in subvalue: - parts.append('<' + name + '>') - self._render_parts(subsubvalue, parts) - parts.append('') - else: - raise Exception("Unknown S3 value type %r", value) - - def _object_path(self, bucket, object_name): - if self.application.bucket_depth < 1: - return os.path.abspath(os.path.join( - self.application.directory, bucket, object_name)) - name_hash = utils.get_hash_str(object_name) - path = os.path.abspath(os.path.join( - self.application.directory, bucket)) - for i in range(self.application.bucket_depth): - path = os.path.join(path, name_hash[:2 * (i + 1)]) - return os.path.join(path, object_name) - - -class RootHandler(BaseRequestHandler): - def get(self): - names = os.listdir(self.application.directory) - buckets = [] - for name in names: - path = os.path.join(self.application.directory, name) - info = os.stat(path) - buckets.append({ - "Name": name, - "CreationDate": datetime.datetime.utcfromtimestamp( - info.st_ctime), - }) - self.render_xml({"ListAllMyBucketsResult": { - "Buckets": {"Bucket": buckets}, - }}) - - -class BucketHandler(BaseRequestHandler): - def get(self, bucket_name): - prefix = self.get_argument("prefix", u"") - marker = self.get_argument("marker", u"") - max_keys = int(self.get_argument("max-keys", 50000)) - path = os.path.abspath(os.path.join(self.application.directory, - bucket_name)) - terse = int(self.get_argument("terse", 0)) - if (not path.startswith(self.application.directory) or - not os.path.isdir(path)): - self.set_404() - return - object_names = [] - for root, _dirs, files in os.walk(path): - for file_name in files: - object_names.append(os.path.join(root, file_name)) - skip = len(path) + 1 - for i in range(self.application.bucket_depth): - skip += 2 * (i + 1) + 1 - object_names = [n[skip:] for n in object_names] - object_names.sort() - contents = [] - - start_pos = 0 - if marker: - start_pos = bisect.bisect_right(object_names, marker, start_pos) - if prefix: - start_pos = bisect.bisect_left(object_names, prefix, start_pos) - - truncated = False - for object_name in object_names[start_pos:]: - if not object_name.startswith(prefix): - break - if len(contents) >= max_keys: - truncated = True - break - object_path = self._object_path(bucket_name, object_name) - c = {"Key": object_name} - if not terse: - info = os.stat(object_path) - c.update({ - "LastModified": datetime.datetime.utcfromtimestamp( - info.st_mtime), - "Size": info.st_size, - }) - contents.append(c) - marker = object_name - self.render_xml({"ListBucketResult": { - "Name": bucket_name, - "Prefix": prefix, - "Marker": marker, - "MaxKeys": max_keys, - "IsTruncated": truncated, - "Contents": contents, - }}) - - def put(self, bucket_name): - path = os.path.abspath(os.path.join( - self.application.directory, bucket_name)) - if (not path.startswith(self.application.directory) or - os.path.exists(path)): - self.set_status(403) - return - fileutils.ensure_tree(path) - self.finish() - - def delete(self, bucket_name): - path = os.path.abspath(os.path.join( - self.application.directory, bucket_name)) - if (not path.startswith(self.application.directory) or - not os.path.isdir(path)): - self.set_404() - return - if len(os.listdir(path)) > 0: - self.set_status(403) - return - os.rmdir(path) - self.set_status(204) - self.finish() - - def head(self, bucket_name): - path = os.path.abspath(os.path.join(self.application.directory, - bucket_name)) - if (not path.startswith(self.application.directory) or - not os.path.isdir(path)): - self.set_404() - return - self.set_status(200) - self.finish() - - -class ObjectHandler(BaseRequestHandler): - def get(self, bucket, object_name): - object_name = parse.unquote(object_name) - path = self._object_path(bucket, object_name) - if (not path.startswith(self.application.directory) or - not os.path.isfile(path)): - self.set_404() - return - info = os.stat(path) - self.set_header("Content-Type", "application/unknown") - self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( - info.st_mtime)) - object_file = open(path, "rb") - try: - self.finish(object_file.read()) - finally: - object_file.close() - - def put(self, bucket, object_name): - object_name = parse.unquote(object_name) - bucket_dir = os.path.abspath(os.path.join( - self.application.directory, bucket)) - if (not bucket_dir.startswith(self.application.directory) or - not os.path.isdir(bucket_dir)): - self.set_404() - return - path = self._object_path(bucket, object_name) - if not path.startswith(bucket_dir) or os.path.isdir(path): - self.set_status(403) - return - directory = os.path.dirname(path) - fileutils.ensure_tree(directory) - object_file = open(path, "wb") - object_file.write(self.request.body) - object_file.close() - self.set_header('ETag', - '"%s"' % utils.get_hash_str(self.request.body)) - self.finish() - - def delete(self, bucket, object_name): - object_name = parse.unquote(object_name) - path = self._object_path(bucket, object_name) - if (not path.startswith(self.application.directory) or - not os.path.isfile(path)): - self.set_404() - return - os.unlink(path) - self.set_status(204) - self.finish() diff --git a/ec2api/service.py b/ec2api/service.py deleted file mode 100644 index dd59e265..00000000 --- a/ec2api/service.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import service -from oslo_utils import importutils - -from ec2api import exception -from ec2api.i18n import _ -from ec2api import wsgi - -LOG = logging.getLogger(__name__) - -service_opts = [ - cfg.StrOpt('ec2api_listen', - default="0.0.0.0", - help='The IP address on which the EC2 API will listen.'), - cfg.IntOpt('ec2api_listen_port', - default=8788, - help='The port on which the EC2 API will listen.'), - cfg.BoolOpt('ec2api_use_ssl', - default=False, - help='Enable ssl connections or not for EC2 API'), - cfg.IntOpt('ec2api_workers', - help='Number of workers for EC2 API service. The default will ' - 'be equal to the number of CPUs available.'), - cfg.StrOpt('metadata_listen', - default="0.0.0.0", - help='The IP address on which the metadata API will listen.'), - cfg.IntOpt('metadata_listen_port', - default=8789, - help='The port on which the metadata API will listen.'), - cfg.BoolOpt('metadata_use_ssl', - default=False, - help='Enable ssl connections or not for EC2 API Metadata'), - cfg.IntOpt('metadata_workers', - help='Number of workers for metadata service. The default will ' - 'be the number of CPUs available.'), -] - -CONF = cfg.CONF -CONF.register_opts(service_opts) - - -class WSGIService(service.ServiceBase): - """Provides ability to launch API from a 'paste' configuration.""" - - def __init__(self, name, loader=None, max_url_len=None): - """Initialize, but do not start the WSGI server. - - :param name: The name of the WSGI server given to the loader. - :param loader: Loads the WSGI application using the given name. - :returns: None - - """ - self.name = name - self.manager = self._get_manager() - self.loader = loader or wsgi.Loader() - self.app = self.loader.load_app(name) - self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") - self.port = getattr(CONF, '%s_listen_port' % name, 0) - self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False) - self.workers = (getattr(CONF, '%s_workers' % name, None) or - processutils.get_worker_count()) - if self.workers and self.workers < 1: - worker_name = '%s_workers' % name - msg = (_("%(worker_name)s value of %(workers)s is invalid, " - "must be greater than 0") % - {'worker_name': worker_name, - 'workers': str(self.workers)}) - raise exception.InvalidInput(msg) - self.server = wsgi.Server(name, - self.app, - host=self.host, - port=self.port, - use_ssl=self.use_ssl, - max_url_len=max_url_len) - # Pull back actual port used - self.port = self.server.port - - def reset(self): - """Reset server greenpool size to default. - - :returns: None - - """ - self.server.reset() - - def _get_manager(self): - """Initialize a Manager object appropriate for this service. - - Use the service name to look up a Manager subclass from the - configuration and initialize an instance. If no class name - is configured, just return None. - - :returns: a Manager instance, or None. - - """ - fl = '%s_manager' % self.name - if fl not in CONF: - return None - - manager_class_name = CONF.get(fl, None) - if not manager_class_name: - return None - - manager_class = importutils.import_class(manager_class_name) - return manager_class() - - def start(self): - """Start serving this service using loaded configuration. - - Also, retrieve updated port number in case '0' was passed in, which - indicates a random port should be used. - - :returns: None - - """ - if self.manager: - self.manager.init_host() - self.manager.pre_start_hook() - self.server.start() - if self.manager: - self.manager.post_start_hook() - - def stop(self): - """Stop serving this API. - - :returns: None - - """ - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API. - - :returns: None - - """ - self.server.wait() - - -# NOTE(vish): the global launcher is to maintain the existing -# functionality of calling service.serve + -# service.wait -_launcher = None - - -def serve(server, workers=None): - global _launcher - if _launcher: - raise RuntimeError(_('serve() can only be called once')) - - _launcher = service.launch(CONF, server, workers=workers) - - -def wait(): - _launcher.wait() diff --git a/ec2api/tests/__init__.py b/ec2api/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ec2api/tests/botocoreclient.py b/ec2api/tests/botocoreclient.py deleted file mode 100644 index 848be8c2..00000000 --- a/ec2api/tests/botocoreclient.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import botocore.session -from oslo_config import types - - -def _get_client(client_name, url, region, access, secret, ca_bundle): - connection_data = { - 'config_file': (None, 'AWS_CONFIG_FILE', None, None), - 'region': ('region', 'AWS_DEFAULT_REGION', region, None), - } - session = botocore.session.get_session(connection_data) - kwargs = { - 'region_name': region, - 'endpoint_url': url, - 'aws_access_key_id': access, - 'aws_secret_access_key': secret - } - if ca_bundle: - try: - kwargs['verify'] = types.Boolean()(ca_bundle) - except Exception: - kwargs['verify'] = ca_bundle - return session.create_client(client_name, **kwargs) - - -def get_ec2_client(url, region, access, secret, ca_bundle=None): - return _get_client('ec2', url, region, access, secret, ca_bundle) - - -def get_s3_client(url, region, access, secret, ca_bundle=None): - return _get_client('s3', url, region, access, secret, ca_bundle) diff --git a/ec2api/tests/unit/__init__.py b/ec2api/tests/unit/__init__.py deleted file mode 100644 index 68783956..00000000 --- a/ec2api/tests/unit/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -:mod:`ec2api.tests.unit.unit` -- EC2api Unittests -===================================================== - -.. automodule:: ec2api.tests.unit.unit - :platform: Unix -""" - -# See http://code.google.com/p/python-nose/issues/detail?id=373 -# The code below enables nosetests to work with i18n _() blocks -import builtins -setattr(builtins, '_', lambda x: x) - -# NOTE(ft): this is required by test_s3.S3APITestCase to switch execution -# between test and server threads -import eventlet # noqa: E402 -eventlet.monkey_patch(socket=True) diff --git a/ec2api/tests/unit/abs.tar.gz b/ec2api/tests/unit/abs.tar.gz deleted file mode 100644 index 4d39507340b2afa707112791ed3af4a3597667c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 153 zcmb2|=3wYvb;FN=`RzqVu0sX_tq<4Oy5>!oIm_oEHb{ zF=GS%xD}UG=Xi6jldn8wxpk%G$tt}$t7g8OaD`1>EB9^iaUcEd*|O>HW7mE^yL<27 zelG9-%M0IqT`Za-X8x-`|8KU 0, - 'Filter by %s does not work' % name) - - resp = self.execute(operation, - {'Filter.1.Name': name, - 'Filter.1.Value.1': 'dummy filter value'}) - self.assertTrue(resp[resultset_key] is None or - len(resp[resultset_key]) == 0) - - def check_tag_support(self, operation, resultset_key, sample_item_id, - id_key, item_kinds=[]): - self.db_api.get_tags = tools.CopyingMock( - return_value=[{'item_id': sample_item_id, - 'key': 'fake_key', - 'value': 'fake_value'}]) - ec2_tags = [{'key': 'fake_key', - 'value': 'fake_value'}] - - resp = self.execute(operation, {}) - tag_found = False - if type(resultset_key) is list: - resp_items = itertools.chain(*(r[resultset_key[1]] - for r in resp[resultset_key[0]])) - else: - resp_items = resp[resultset_key] - resultset_key = [resultset_key] - for resp_item in resp_items: - if resp_item.get(id_key) == sample_item_id: - self.assertIn('tagSet', resp_item) - self.assertThat(resp_item['tagSet'], - matchers.ListMatches(ec2_tags)) - tag_found = True - else: - self.assertTrue('tagSet' not in resp_item or - resp_item['tagSet'] == []) - self.assertTrue(tag_found) - if not item_kinds: - item_kinds = (ec2utils.get_ec2_id_kind(sample_item_id),) - self.assertTrue(self.db_api.get_tags.call_count == 1 and - (self.db_api.get_tags.mock_calls[0] in - (mock.call(mock.ANY, item_kinds, set()), - mock.call(mock.ANY, item_kinds, None)))) - self.db_api.reset_mock() - - id_param = '%s%s.1' % (id_key[0].capitalize(), id_key[1:]) - resp = self.execute(operation, {id_param: sample_item_id}) - self.assertTrue( - self.db_api.get_tags.call_count == 1 and - (self.db_api.get_tags.mock_calls[0] in - (mock.call(mock.ANY, item_kinds, set([sample_item_id])), - mock.call(mock.ANY, item_kinds, [sample_item_id])))) - - self.check_filtering( - operation, resultset_key[0], - [('tag-key', 'fake_key'), - ('tag-value', 'fake_value'), - ('tag:fake_key', 'fake_value')]) - - def _execute(self, action, args): - ec2_request = ec2api.api.apirequest.APIRequest(action, 'fake_v1', args) - ec2_context = create_context() - environ = {'REQUEST_METHOD': 'FAKE', - 'ec2.request': ec2_request, - 'ec2api.context': ec2_context} - request = ec2api.wsgi.Request(environ) - response = request.send(ec2api.api.Executor()) - return (response.status_code, - self._check_and_transform_response(response, action)) - - def _check_and_transform_response(self, response, action): - body = tools.parse_xml(response.body) - if response.status_code == 200: - action_tag = '%sResponse' % action - self.assertIn(action_tag, body) - body = body.pop(action_tag) - self.assertIn('requestId', body) - body.pop('requestId') - else: - self.assertIn('Response', body) - body = body.pop('Response') - self.assertIn('RequestID', body) - body.pop('RequestID') - self.assertEqual(1, len(body)) - self.assertIn('Errors', body) - body = body.pop('Errors') - self.assertEqual(1, len(body)) - self.assertIn('Error', body) - self.assertEqual(2, len(body['Error'])) - return body - - def _format_error_message(self, status_code, response): - if status_code >= 400: - return '%s: %s' % (response['Error']['Code'], - response['Error']['Message']) - else: - return '' - - -class DbTestCase(BaseTestCase): - - def setUp(self): - super(DbTestCase, self).setUp() - self.configure(connection='sqlite://', group='database') - self.configure(sqlite_synchronous=False, group='database') - self._init_db_schema() - engine = db_backend.get_engine() - conn = engine.connect() - conn.connection.executescript(DB_SCHEMA) - self.addCleanup(engine.dispose) - - def _init_db_schema(self): - global DB_SCHEMA - if not DB_SCHEMA: - - engine = db_backend.get_engine() - conn = engine.connect() - migration.db_sync() - DB_SCHEMA = "".join(line for line in conn.connection.iterdump()) - engine.dispose() diff --git a/ec2api/tests/unit/fakes.py b/ec2api/tests/unit/fakes.py deleted file mode 100644 index 1f8a5972..00000000 --- a/ec2api/tests/unit/fakes.py +++ /dev/null @@ -1,2307 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import copy -import random -import uuid - -from lxml import etree -from oslo_serialization import jsonutils - -from ec2api.api import ec2utils -from ec2api.tests.unit import tools - - -# Helper functions section - -# random identifier generators -def random_os_id(): - return str(uuid.uuid4()) - - -def random_ec2_id(kind): - return '%s-%08x' % (kind, random.randint(0, 0xffffffff)) - -# Plain constants section -# Constant name notation: -# [[]] -# where -# type - type of object the constant represents -# ID - for identifiers, CIDR for cidrs, etc -# subtype - type of object storage, is used for IDs only -# EC2 - object representation to end user -# OS - object is stored in OpenStack -# object_name - identifies the object - - -# common constants -ID_OS_USER = random_os_id() -ID_OS_PROJECT = random_os_id() -TIME_ATTACH_NETWORK_INTERFACE = ec2utils.isotime(None, True) -MAC_ADDRESS = 'fb:10:2e:b2:ba:b7' - -# vpc constants -ID_EC2_VPC_DEFAULT = random_ec2_id('vpc') -ID_EC2_VPC_1 = random_ec2_id('vpc') -ID_EC2_VPC_2 = random_ec2_id('vpc') -ID_OS_ROUTER_DEFAULT = random_os_id() -ID_OS_ROUTER_1 = random_os_id() -ID_OS_ROUTER_2 = random_os_id() - -CIDR_VPC_DEFAULT = '172.31.0.0/16' -CIDR_VPC_1 = '10.10.0.0/16' -CIDR_VPC_2 = '10.20.0.0/16' -ID_OS_PUBLIC_NETWORK = random_os_id() -NAME_OS_PUBLIC_NETWORK = 'public_external' -IP_ROUTER_1_EXTERNAL_V4 = '172.20.12.25' -IP_ROUTER_1_EXTERNAL_V6 = '::ffff:172.20.12.25' - - -# internet gateway constants -ID_EC2_IGW_DEFAULT = random_ec2_id('igw') -ID_EC2_IGW_1 = random_ec2_id('igw') -ID_EC2_IGW_2 = random_ec2_id('igw') - - -# subnet constants -ID_EC2_SUBNET_DEFAULT = random_ec2_id('subnet') -ID_EC2_SUBNET_1 = random_ec2_id('subnet') -ID_EC2_SUBNET_2 = random_ec2_id('subnet') -ID_OS_SUBNET_DEFAULT = random_os_id() -ID_OS_SUBNET_1 = random_os_id() -ID_OS_SUBNET_2 = random_os_id() -ID_OS_NETWORK_DEFAULT = random_os_id() -ID_OS_NETWORK_1 = random_os_id() -ID_OS_NETWORK_2 = random_os_id() - -CIDR_SUBNET_DEFAULT = '172.31.0.0/20' -CIDR_SUBNET_1 = '10.10.1.0/24' -IP_FIRST_SUBNET_1 = '10.10.1.4' -IP_LAST_SUBNET_1 = '10.10.1.254' -IP_GATEWAY_SUBNET_DEFAULT = '172.31.0.1' -IP_GATEWAY_SUBNET_1 = '10.10.1.1' -IP_GATEWAY_SUBNET_2 = '10.10.2.1' -CIDR_SUBNET_2 = '10.10.2.0/24' -IP_FIRST_SUBNET_2 = '10.10.2.4' -IP_LAST_SUBNET_2 = '10.10.2.254' - - -# network interface constants -ID_EC2_NETWORK_INTERFACE_DEFAULT = random_ec2_id('eni') -ID_EC2_NETWORK_INTERFACE_1 = random_ec2_id('eni') -ID_EC2_NETWORK_INTERFACE_2 = random_ec2_id('eni') -ID_EC2_NETWORK_INTERFACE_2_ATTACH = ( - ID_EC2_NETWORK_INTERFACE_2.replace('eni', 'eni-attach')) -ID_OS_PORT_DEFAULT = random_os_id() -ID_OS_PORT_1 = random_os_id() -ID_OS_PORT_2 = random_os_id() - -IP_NETWORK_INTERFACE_DEFAULT = '172.31.0.4' -IP_NETWORK_INTERFACE_1 = '10.10.1.4' -IP_NETWORK_INTERFACE_2 = '10.10.2.254' -IP_NETWORK_INTERFACE_2_EXT_1 = '10.10.2.4' -IP_NETWORK_INTERFACE_2_EXT_2 = '10.10.2.5' -IPS_NETWORK_INTERFACE_2 = (IP_NETWORK_INTERFACE_2, - IP_NETWORK_INTERFACE_2_EXT_1, - IP_NETWORK_INTERFACE_2_EXT_2) -DESCRIPTION_NETWORK_INTERFACE_DEFAULT = 'descriptionDefault' -DESCRIPTION_NETWORK_INTERFACE_1 = 'description1' -DESCRIPTION_NETWORK_INTERFACE_2 = 'description2' - - -# instance constants -ID_EC2_INSTANCE_DEFAULT = random_ec2_id('i') -ID_EC2_INSTANCE_1 = random_ec2_id('i') -ID_EC2_INSTANCE_2 = random_ec2_id('i') -ID_OS_INSTANCE_DEFAULT = random_os_id() -ID_OS_INSTANCE_1 = random_os_id() -ID_OS_INSTANCE_2 = random_os_id() -ID_EC2_RESERVATION_DEFAULT = random_ec2_id('r') -ID_EC2_RESERVATION_1 = random_ec2_id('r') -ID_EC2_RESERVATION_2 = random_ec2_id('r') - -ROOT_DEVICE_NAME_INSTANCE_1 = '/dev/vda' -ROOT_DEVICE_NAME_INSTANCE_2 = '/dev/sdb1' -IPV6_INSTANCE_2 = 'fe80:b33f::a8bb:ccff:fedd:eeff' -CLIENT_TOKEN_INSTANCE_2 = 'client-token-2' -USER_DATA_INSTANCE_2 = base64.b64encode(b'fake-user data').decode('ascii') - - -# DHCP options constants -ID_EC2_DHCP_OPTIONS_1 = random_ec2_id('dopt') -ID_EC2_DHCP_OPTIONS_2 = random_ec2_id('dopt') - - -# address constants -ID_EC2_ADDRESS_DEFAULT = random_ec2_id('eipalloc') -ID_EC2_ADDRESS_1 = random_ec2_id('eipalloc') -ID_EC2_ADDRESS_2 = random_ec2_id('eipalloc') -ID_EC2_ASSOCIATION_DEFAULT = ID_EC2_ADDRESS_DEFAULT.replace('eipalloc', - 'eipassoc') -ID_EC2_ASSOCIATION_1 = ID_EC2_ADDRESS_1.replace('eipalloc', 'eipassoc') -ID_EC2_ASSOCIATION_2 = ID_EC2_ADDRESS_2.replace('eipalloc', 'eipassoc') -ID_OS_FLOATING_IP_1 = random_os_id() -ID_OS_FLOATING_IP_2 = random_os_id() - -IP_ADDRESS_1 = '192.168.1.100' -IP_ADDRESS_2 = '192.168.1.200' -IP_ADDRESS_NOVA_1 = '192.168.2.100' - - -# security group constants -ID_EC2_SECURITY_GROUP_DEFAULT = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_1 = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_2 = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_3 = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_4 = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_5 = random_ec2_id('sg') -ID_EC2_SECURITY_GROUP_6 = random_ec2_id('sg') -ID_OS_SECURITY_GROUP_DEFAULT = random_os_id() -ID_OS_SECURITY_GROUP_1 = random_os_id() -ID_OS_SECURITY_GROUP_2 = random_os_id() -ID_OS_SECURITY_GROUP_3 = random_os_id() -ID_OS_SECURITY_GROUP_4 = random_os_id() -ID_OS_SECURITY_GROUP_5 = random_os_id() - -ID_NOVA_OS_SECURITY_GROUP_1 = 1 -ID_NOVA_OS_SECURITY_GROUP_2 = 2 - -NAME_DEFAULT_OS_SECURITY_GROUP = 'default' - - -# route table constants -ID_EC2_ROUTE_TABLE_DEFAULT = random_ec2_id('rtb') -ID_EC2_ROUTE_TABLE_1 = random_ec2_id('rtb') -ID_EC2_ROUTE_TABLE_2 = random_ec2_id('rtb') -ID_EC2_ROUTE_TABLE_3 = random_ec2_id('rtb') -ID_EC2_ROUTE_TABLE_ASSOCIATION_DEFAULT = ID_EC2_VPC_DEFAULT.replace('vpc', - 'rtbassoc') -ID_EC2_ROUTE_TABLE_ASSOCIATION_1 = ID_EC2_VPC_1.replace('vpc', 'rtbassoc') -ID_EC2_ROUTE_TABLE_ASSOCIATION_2 = ID_EC2_SUBNET_2.replace('subnet', - 'rtbassoc') -ID_EC2_ROUTE_TABLE_ASSOCIATION_3 = ID_EC2_SUBNET_2.replace('subnet', - 'rtbassoc') - -CIDR_EXTERNAL_NETWORK = '192.168.50.0/24' - - -# image constants -ID_EC2_IMAGE_1 = random_ec2_id('ami') -ID_EC2_IMAGE_2 = random_ec2_id('ami') -ID_EC2_IMAGE_AKI_1 = random_ec2_id('aki') -ID_EC2_IMAGE_ARI_1 = random_ec2_id('ari') -ID_OS_IMAGE_1 = random_os_id() -ID_OS_IMAGE_2 = random_os_id() -ID_OS_IMAGE_AKI_1 = random_os_id() -ID_OS_IMAGE_ARI_1 = random_os_id() - -ROOT_DEVICE_NAME_IMAGE_1 = '/dev/sda1' -ROOT_DEVICE_NAME_IMAGE_2 = '/dev/sdb1' -LOCATION_IMAGE_1 = 'fake_bucket/fake.img.manifest.xml' -LOCATION_IMAGE_2 = 'https://download.cirros-cloud.net/0.4.0/' \ - + 'cirros-0.4.0-aarch64-disk.img' - -# volumes constants -ID_EC2_VOLUME_1 = random_ec2_id('vol') -ID_EC2_VOLUME_2 = random_ec2_id('vol') -ID_EC2_VOLUME_3 = random_ec2_id('vol') -ID_OS_VOLUME_1 = random_os_id() -ID_OS_VOLUME_2 = random_os_id() -ID_OS_VOLUME_3 = random_os_id() - - -# snapshots constants -ID_EC2_SNAPSHOT_1 = random_ec2_id('snap') -ID_EC2_SNAPSHOT_2 = random_ec2_id('snap') -ID_OS_SNAPSHOT_1 = random_os_id() -ID_OS_SNAPSHOT_2 = random_os_id() - - -# availability zone constants -NAME_AVAILABILITY_ZONE = 'nova' - - -# key pair constans -NAME_KEY_PAIR = 'keyname' -PRIVATE_KEY_KEY_PAIR = ( - '-----BEGIN RSA PRIVATE KEY-----\n' - 'MIIEowIBAAKCAQEAgXvm1sZ9MDiAXvGraRFja0/WqyJ1gE6j/QPjreNryd34zBFcv2pQXLyvb' - 'gQG\nFxN4rMGNScgKgLSgHjE/TNywkT8N7aYOiRmGkzQciP5t+zf8ZdCyl+hqgoQig1uY8sV/' - 'fSxUWCB9\n8sF7Tpl0iGkWM6Wo0H/PvcwiS2+UPSzArj+b+Erb/JbBF4O8GgSmtLMeq60RuDM' - 'dJi5JYCP66HUw\njtYb/f9y1Q9nEGVcxY2v0RI1n0yOaZDKPInLKHeR/ole2QVwPZB69mBj11' - 'LErqb+jzCaSivnhy6g\nPzaSHdZaRmy1f+6ltFI1iKt+4y/iINOY0skYC1hc7IevE7j7dGQTD' - 'wIDAQABAoIBAEbD2Vfd6MM2\nzemVuHFWoHggjRjAX2k9EWCRBJifJuSPXI7imka+qqbUNCgz' - 'KMTpzlTT/wyouBy5Gp0Fmyu9nP30\ncP9FdsI04hiHLWUtcBwQ7+8RDNn6mmM0JcyWfdOIXnG' - 'hjYMQVuUaGvLM6SQ4EnsteUJh57451zBV\nDbYVRES2Fbq+j8tPQj1KuD0HhZBboNPOxo6E5n' - 'TxvMXnvuI+cb9D99lqATcb8c0zsLMl/5SKEBDc\nj72X4GPfE3Dc5/MO6L/89ms3TqF3lx8lh' - 'wFSMfFfA3Nf5xrX3gnorGe81odXBXFveqMCemvfJYxg\nS9KPkM8CMnwn6yPS3ftW5xH3nMkC' - 'gYEAvN4lQuOTy9RONCtfgZ6lhR00xfDiibOsE2jFXqXlXrZS\nunBx2WRwNuhAcYGbC4T71iC' - 'BR+LJHECpFjEFX9cKjd8xZPdIzJmwMBylPnli8IxK9UMroxF/MDNy\nnJfdPIWagIrk9VRsQH' - 'UOQW8Ab5dYJuP6c03L5xwmnFfeFnlz10MCgYEAr4Iu182bC2ppwr5AYD8T\n/QKVPZTmizbtG' - 'H/7a2+WnfNCz2u0MOo2h1rF7/SOYR8nalTTsN1z4D8cRX7YQ0P4yBtNRNiN7WH3\n+smTWztI' - 'VYvJA2RsOeP0zfGLJiFSMWLOjlqpJ7KbkEuPcxshGd+/w8upxgJeV8Dwz0ZWbY302kUC\ngYE' - 'AhneTB+CHpaNuWm5W/S46ol9850DtySSG6vq5Kv3qJFii5eKQ7Do6Op145FdmT/lKY9WYtdmd' - '\nXeQbfpVAQlAUT5YM0NnOlv0FF/wNGkHKU4FPDPfZ5avbZjH688qb1S86JTK+eHy25d1xXNz' - 'u7oRO\nWsIN2nIVLmI4iy90C4RFGYkCgYBXpKPtwk/VkItF46nUJku+Agcy3GOQS5p0rJyJ1w' - 'yYzbykRf2S\nm7MlPpAvtqlPGLafI8MexEe0SO++SIyIcq4Oh4u7gITHcS/bfcPnQCBsD8UOu' - '5xMAGjkWuWI4gTg\ngp3xepaUK14B3anB6l9KQ3DIvrCGH/Kq0b+vUkmgpc4LHQKBgBtul9bN' - 'KLF+LJf4JHYNFSurE8Y/\nn8FZ3dZo3T0Q3Sap9bP3ZHemoQ6QXbmpu3H4Mf+2kcNg6YKFW3p' - 'hxW3cuAcZOMHPCrpr3mCdyhF0\nKM74ANEwg8MekBJTcWZUNFv9HZDvTuhp6HSrbMnNEQogkd' - '5PoubiusvAKpeb6NBGnLMq\n' - '-----END RSA PRIVATE KEY-----' -) -PUBLIC_KEY_KEY_PAIR = ( - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIkYwwXm8UeQXx1c2eFrDIB6b' - '6ApI0KTKs1wezDfFdSIs93vAt4Jx1MyaR/PwqwLk2CDyFoGJBWBI9YcodLAjoRg' - 'Ovr6JigEv5V3yp+eEkeAJO0cPA21vN/KQ8Vxml68ZvvqbdqKZXc/rpFZ1OgCmHt' - 'udo96uQiRB0FM3mdE8YOTswcfkJxTvCe3axX50pYXXfIb0dn9CzC1hyQWYPXvlv' - 'qFNvr/Li7sSBycTBAh4Ar/uEigs/uOjhvzd7GpzY7qDqBVJFAmP7HiiOxoXPkKu' - 'W62Ftd') -FINGERPRINT_KEY_PAIR = ( - '2a:72:dd:aa:0d:a6:45:4d:27:4f:75:28:73:0d:a6:10:35:88:e1:ce') - - -# vpn gateway constants -ID_EC2_VPN_GATEWAY_1 = random_ec2_id('vgw') -ID_EC2_VPN_GATEWAY_2 = random_ec2_id('vgw') - -ID_OS_VPNSERVICE_1 = random_os_id() -ID_OS_VPNSERVICE_2 = random_os_id() - -# customer gateway constants -ID_EC2_CUSTOMER_GATEWAY_1 = random_ec2_id('cgw') -ID_EC2_CUSTOMER_GATEWAY_2 = random_ec2_id('cgw') - -IP_CUSTOMER_GATEWAY_ADDRESS_1 = '172.16.1.11' -IP_CUSTOMER_GATEWAY_ADDRESS_2 = '172.31.2.22' - - -# vpn connection constants -ID_EC2_VPN_CONNECTION_1 = random_ec2_id('vpn') -ID_EC2_VPN_CONNECTION_2 = random_ec2_id('vpn') - -ID_OS_IKEPOLICY_1 = random_os_id() -ID_OS_IKEPOLICY_2 = random_os_id() -ID_OS_IPSECPOLICY_1 = random_os_id() -ID_OS_IPSECPOLICY_2 = random_os_id() -ID_OS_IPSEC_SITE_CONNECTION_2 = random_os_id() - -PRE_SHARED_KEY_1 = 'Z54kLbANio5A1.XmkjwYvWuSfVx3_xuG' -PRE_SHARED_KEY_2 = 'FSbXpA.G9306W.BQ2n6W9JZJsyZcMN2G' -CIDR_VPN_1_STATIC = '192.168.101.0/24' -CIDR_VPN_1_PROPAGATED_1 = '192.168.110.0/24' -CIDR_VPN_2_PROPAGATED_1 = '192.168.210.0/24' -CIDR_VPN_2_PROPAGATED_2 = '192.168.220.0/24' - -CUSTOMER_GATEWAY_CONFIGURATION_1_DATA = ( - '' - '' - ' ' + (ID_EC2_CUSTOMER_GATEWAY_1 + - '') + - ' ' + (ID_EC2_VPN_GATEWAY_1 + - '') + - ' ipsec.1' - ' ' + ('NoBGPVPNConnection' - '') + - ' ' - ' ' - ' ' - ' ' + IP_CUSTOMER_GATEWAY_ADDRESS_1 + '' - ' ' - ' ' - ' ' - ' ' - ' ' + IP_ROUTER_1_EXTERNAL_V4 + '' - ' ' - ' ' - ' ' - ' sha1' - ' aes-128' - ' 28800' - ' group2' - ' main' - ' ' + PRE_SHARED_KEY_1 + '' - ' ' - ' ' - ' esp' - ' sha1' - ' aes-128' - ' 3600' - ' group2' - ' tunnel' - ' 1387' - ' ' - ' ' - '').encode("utf-8") -CUSTOMER_GATEWAY_CONFIGURATION_1 = etree.tostring( - etree.fromstring(CUSTOMER_GATEWAY_CONFIGURATION_1_DATA, - parser=etree.XMLParser(remove_blank_text=True)), - xml_declaration=True, encoding='UTF-8', pretty_print=True).decode("utf-8") -CUSTOMER_GATEWAY_CONFIGURATION_2_DATA = ( - '' - '' - ' ' + (ID_EC2_CUSTOMER_GATEWAY_2 + - '') + - ' ' + (ID_EC2_VPN_GATEWAY_2 + - '') + - ' ipsec.1' - ' ' + ('NoBGPVPNConnection' - '') + - ' ' - ' ' - ' ' - ' ' + IP_CUSTOMER_GATEWAY_ADDRESS_2 + '' - ' ' - ' ' - ' ' - ' ' - ' ' - ' ' - ' ' - ' ' - ' sha1' - ' aes-128' - ' 28800' - ' group2' - ' main' - ' ' + PRE_SHARED_KEY_2 + '' - ' ' - ' ' - ' esp' - ' sha1' - ' aes-128' - ' 3600' - ' group2' - ' tunnel' - ' 1387' - ' ' - ' ' - '').encode("utf-8") -CUSTOMER_GATEWAY_CONFIGURATION_2 = etree.tostring( - etree.fromstring(CUSTOMER_GATEWAY_CONFIGURATION_2_DATA, - parser=etree.XMLParser(remove_blank_text=True)), - xml_declaration=True, encoding='UTF-8', pretty_print=True).decode("utf-8") - - -# Object constants section -# Constant name notation: -# [] -# where -# subtype - type of object storage, is not used for DB objects -# DB - object is stored in ec2api DB -# EC2 - object representation to end user -# OS - object is stored in OpenStack -# NOVA - object is stored in Nova (for EC2 Classic mode only) -# object_name - identifies the object - -# vpc objects -# 2 vpcs in normal state -DB_VPC_DEFAULT = {'id': ID_EC2_VPC_DEFAULT, - 'os_id': ID_OS_ROUTER_DEFAULT, - 'vpc_id': None, - 'is_default': True, - 'cidr_block': CIDR_VPC_DEFAULT, - 'route_table_id': ID_EC2_ROUTE_TABLE_DEFAULT} -DB_VPC_1 = {'id': ID_EC2_VPC_1, - 'os_id': ID_OS_ROUTER_1, - 'vpc_id': None, - 'is_default': False, - 'cidr_block': CIDR_VPC_1, - 'route_table_id': ID_EC2_ROUTE_TABLE_1} -DB_VPC_2 = {'id': ID_EC2_VPC_2, - 'os_id': ID_OS_ROUTER_2, - 'vpc_id': None, - # is_default is false by default, omit it to check this - 'cidr_block': CIDR_VPC_2} - -EC2_VPC_DEFAULT = {'vpcId': ID_EC2_VPC_DEFAULT, - 'cidrBlock': CIDR_VPC_DEFAULT, - 'isDefault': True, - 'state': 'available', - 'dhcpOptionsId': 'default'} -EC2_VPC_1 = {'vpcId': ID_EC2_VPC_1, - 'cidrBlock': CIDR_VPC_1, - 'isDefault': False, - 'state': 'available', - 'dhcpOptionsId': 'default'} -EC2_VPC_2 = {'vpcId': ID_EC2_VPC_2, - 'cidrBlock': CIDR_VPC_2, - 'isDefault': False, - 'state': 'available', - 'dhcpOptionsId': 'default'} - -OS_ROUTER_DEFAULT = {'id': ID_OS_ROUTER_DEFAULT, - 'name': ID_EC2_VPC_DEFAULT, - 'external_gateway_info': None} -OS_ROUTER_1 = {'id': ID_OS_ROUTER_1, - 'name': ID_EC2_VPC_1, - 'external_gateway_info': { - 'external_fixed_ips': [ - {'ip_address': IP_ROUTER_1_EXTERNAL_V6}, - {'ip_address': IP_ROUTER_1_EXTERNAL_V4}]}} -OS_ROUTER_2 = {'id': ID_OS_ROUTER_2, - 'name': ID_EC2_VPC_2, - 'external_gateway_info': None} - - -# internet gateway objects -# 2 internate gateway, the first is attached to the first vpc -DB_IGW_DEFAULT = {'id': ID_EC2_IGW_DEFAULT, - 'os_id': None, - 'vpc_id': ID_EC2_VPC_DEFAULT} -DB_IGW_1 = {'id': ID_EC2_IGW_1, - 'os_id': None, - 'vpc_id': ID_EC2_VPC_1} -DB_IGW_2 = {'id': ID_EC2_IGW_2, - 'os_id': None, - 'vpc_id': None} - -EC2_IGW_DEFAULT = {'internetGatewayId': ID_EC2_IGW_DEFAULT, - 'attachmentSet': [{'vpcId': ID_EC2_VPC_DEFAULT, - 'state': 'available'}]} -EC2_IGW_1 = {'internetGatewayId': ID_EC2_IGW_1, - 'attachmentSet': [{'vpcId': ID_EC2_VPC_1, - 'state': 'available'}]} -EC2_IGW_2 = {'internetGatewayId': ID_EC2_IGW_2, - 'attachmentSet': []} - - -# subnet objects -# 2 subnets in the first vpc -DB_SUBNET_DEFAULT = {'id': ID_EC2_SUBNET_DEFAULT, - 'os_id': ID_OS_SUBNET_DEFAULT, - 'vpc_id': ID_EC2_VPC_DEFAULT} -DB_SUBNET_1 = {'id': ID_EC2_SUBNET_1, - 'os_id': ID_OS_SUBNET_1, - 'vpc_id': ID_EC2_VPC_1, - 'os_vpnservice_id': ID_OS_VPNSERVICE_1} -DB_SUBNET_2 = {'id': ID_EC2_SUBNET_2, - 'os_id': ID_OS_SUBNET_2, - 'vpc_id': ID_EC2_VPC_1, - 'route_table_id': ID_EC2_ROUTE_TABLE_3, - 'os_vpnservice_id': ID_OS_VPNSERVICE_2} - -EC2_SUBNET_DEFAULT = {'subnetId': ID_EC2_SUBNET_DEFAULT, - 'state': 'available', - 'vpcId': ID_EC2_VPC_DEFAULT, - 'cidrBlock': CIDR_SUBNET_DEFAULT, - 'defaultForAz': False, - 'availableIpAddressCount': 4093, - 'mapPublicIpOnLaunch': False} -EC2_SUBNET_1 = {'subnetId': ID_EC2_SUBNET_1, - 'state': 'available', - 'vpcId': ID_EC2_VPC_1, - 'cidrBlock': CIDR_SUBNET_1, - 'defaultForAz': False, - 'availableIpAddressCount': 253, - 'mapPublicIpOnLaunch': False} -EC2_SUBNET_2 = {'subnetId': ID_EC2_SUBNET_2, - 'state': 'available', - 'vpcId': ID_EC2_VPC_1, - 'cidrBlock': CIDR_SUBNET_2, - 'defaultForAz': False, - 'availableIpAddressCount': 253, - 'mapPublicIpOnLaunch': False} - -OS_SUBNET_DEFAULT = {'id': ID_OS_SUBNET_DEFAULT, - 'network_id': ID_OS_NETWORK_DEFAULT, - 'name': ID_EC2_SUBNET_DEFAULT, - 'ip_version': '4', - 'cidr': CIDR_SUBNET_DEFAULT, - 'host_routes': [{'nexthop': IP_GATEWAY_SUBNET_DEFAULT, - 'destination': CIDR_VPC_DEFAULT}, - {'nexthop': IP_GATEWAY_SUBNET_DEFAULT, - 'destination': '0.0.0.0/0'}], - 'gateway_ip': None} -OS_SUBNET_1 = {'id': ID_OS_SUBNET_1, - 'network_id': ID_OS_NETWORK_1, - 'name': ID_EC2_SUBNET_1, - 'ip_version': '4', - 'cidr': CIDR_SUBNET_1, - 'host_routes': [{'nexthop': IP_GATEWAY_SUBNET_1, - 'destination': '10.10.0.0/16'}, - {'nexthop': IP_GATEWAY_SUBNET_1, - 'destination': '169.254.169.254/32'}], - 'gateway_ip': IP_GATEWAY_SUBNET_1} -OS_SUBNET_2 = {'id': ID_OS_SUBNET_2, - 'network_id': ID_OS_NETWORK_2, - 'name': ID_EC2_SUBNET_2, - 'ip_version': '4', - 'cidr': CIDR_SUBNET_2, - 'host_routes': [], - 'gateway_ip': None} -OS_NETWORK_DEFAULT = {'id': ID_OS_NETWORK_DEFAULT, - 'name': ID_EC2_SUBNET_DEFAULT, - 'status': 'available'} -OS_NETWORK_1 = {'id': ID_OS_NETWORK_1, - 'name': ID_EC2_SUBNET_1, - 'status': 'available'} -OS_NETWORK_2 = {'id': ID_OS_NETWORK_2, - 'name': ID_EC2_SUBNET_2, - 'status': 'available'} - - -# network interface objects -# 2 ports in both subnets, the second is attached to the first instance -DB_NETWORK_INTERFACE_DEFAULT = {'id': ID_EC2_NETWORK_INTERFACE_DEFAULT, - 'os_id': ID_OS_PORT_DEFAULT, - 'vpc_id': ID_EC2_VPC_DEFAULT, - 'subnet_id': ID_EC2_SUBNET_DEFAULT, - 'description': ( - DESCRIPTION_NETWORK_INTERFACE_DEFAULT), - 'private_ip_address': ( - IP_NETWORK_INTERFACE_DEFAULT)} -DB_NETWORK_INTERFACE_1 = {'id': ID_EC2_NETWORK_INTERFACE_1, - 'os_id': ID_OS_PORT_1, - 'vpc_id': ID_EC2_VPC_1, - 'subnet_id': ID_EC2_SUBNET_1, - 'description': DESCRIPTION_NETWORK_INTERFACE_1, - 'private_ip_address': IP_NETWORK_INTERFACE_1} -DB_NETWORK_INTERFACE_2 = {'id': ID_EC2_NETWORK_INTERFACE_2, - 'os_id': ID_OS_PORT_2, - 'vpc_id': ID_EC2_VPC_1, - 'subnet_id': ID_EC2_SUBNET_2, - 'description': DESCRIPTION_NETWORK_INTERFACE_2, - 'private_ip_address': IP_NETWORK_INTERFACE_2, - 'instance_id': ID_EC2_INSTANCE_1, - 'device_index': 0, - 'delete_on_termination': False, - 'attach_time': TIME_ATTACH_NETWORK_INTERFACE} - -EC2_NETWORK_INTERFACE_DEFAULT = { - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_DEFAULT, - 'status': 'available', - 'vpcId': ID_EC2_VPC_DEFAULT, - 'subnetId': ID_EC2_SUBNET_DEFAULT, - 'description': DESCRIPTION_NETWORK_INTERFACE_DEFAULT, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': IP_NETWORK_INTERFACE_DEFAULT, - 'privateIpAddressesSet': [ - {'privateIpAddress': IP_NETWORK_INTERFACE_DEFAULT, - 'primary': True}], - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_DEFAULT}], - 'tagSet': [], -} -EC2_NETWORK_INTERFACE_1 = { - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_1, - 'status': 'available', - 'vpcId': ID_EC2_VPC_1, - 'subnetId': ID_EC2_SUBNET_1, - 'description': DESCRIPTION_NETWORK_INTERFACE_1, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': IP_NETWORK_INTERFACE_1, - 'privateIpAddressesSet': [{'privateIpAddress': IP_NETWORK_INTERFACE_1, - 'primary': True}], - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}], - 'tagSet': [], -} -EC2_NETWORK_INTERFACE_2 = { - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_2, - 'status': 'in-use', - 'vpcId': ID_EC2_VPC_1, - 'subnetId': ID_EC2_SUBNET_2, - 'description': DESCRIPTION_NETWORK_INTERFACE_2, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'association': { - 'associationId': ID_EC2_ASSOCIATION_2, - 'allocationId': ID_EC2_ADDRESS_2, - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2, - }, - 'privateIpAddressesSet': [ - {'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'primary': True, - 'association': { - 'associationId': ID_EC2_ASSOCIATION_2, - 'allocationId': ID_EC2_ADDRESS_2, - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2, - }}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_1, - 'primary': False}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_2, - 'primary': False}, - ], - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'attachment': { - 'status': 'attached', - 'attachTime': TIME_ATTACH_NETWORK_INTERFACE, - 'deleteOnTermination': False, - 'attachmentId': ID_EC2_NETWORK_INTERFACE_2_ATTACH, - 'instanceId': ID_EC2_INSTANCE_1, - 'instanceOwnerId': ID_OS_PROJECT, - 'deviceIndex': 0, - }, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}], - 'tagSet': [], -} - -OS_PORT_1 = {'id': ID_OS_PORT_1, - 'network_id': ID_OS_SUBNET_1, - 'name': ID_EC2_NETWORK_INTERFACE_1, - 'status': 'DOWN', - 'mac_address': MAC_ADDRESS, - 'fixed_ips': [{'ip_address': IP_NETWORK_INTERFACE_1, - 'subnet_id': ID_OS_SUBNET_1}], - 'device_id': None, - 'device_owner': "compute:{}".format(NAME_AVAILABILITY_ZONE), - 'security_groups': [ID_OS_SECURITY_GROUP_1]} -OS_PORT_2 = {'id': ID_OS_PORT_2, - 'network_id': ID_OS_SUBNET_2, - 'name': ID_EC2_NETWORK_INTERFACE_2, - 'status': 'ACTIVE', - 'mac_address': MAC_ADDRESS, - 'fixed_ips': [{'ip_address': IP_NETWORK_INTERFACE_2, - 'subnet_id': ID_OS_SUBNET_2}, - {'ip_address': IP_NETWORK_INTERFACE_2_EXT_1, - 'subnet_id': ID_OS_SUBNET_2}, - {'ip_address': IP_NETWORK_INTERFACE_2_EXT_2, - 'subnet_id': ID_OS_SUBNET_2}], - 'device_id': ID_OS_INSTANCE_1, - 'device_owner': "compute:{}".format(NAME_AVAILABILITY_ZONE), - 'security_groups': [ID_OS_SECURITY_GROUP_1], - 'tenant_id': ID_OS_PROJECT} - - -# instance objects -TIME_CREATE_INSTANCE_1 = ec2utils.isotime(None, True) -TIME_CREATE_INSTANCE_2 = ec2utils.isotime(None, True) - -DB_INSTANCE_DEFAULT = { - 'id': ID_EC2_INSTANCE_DEFAULT, - 'os_id': ID_OS_INSTANCE_DEFAULT, - 'vpc_id': ID_EC2_VPC_DEFAULT, - 'reservation_id': ID_EC2_RESERVATION_DEFAULT, - 'launch_index': 0, -} -DB_INSTANCE_1 = { - 'id': ID_EC2_INSTANCE_1, - 'os_id': ID_OS_INSTANCE_1, - 'vpc_id': ID_EC2_VPC_1, - 'reservation_id': ID_EC2_RESERVATION_1, - 'launch_index': 0, -} -DB_INSTANCE_2 = { - 'id': ID_EC2_INSTANCE_2, - 'os_id': ID_OS_INSTANCE_2, - 'vpc_id': None, - 'reservation_id': ID_EC2_RESERVATION_2, - 'launch_index': 0, - 'client_token': CLIENT_TOKEN_INSTANCE_2, -} - -EC2_INSTANCE_DEFAULT = { - 'instanceId': ID_EC2_INSTANCE_DEFAULT, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'vpcId': ID_EC2_VPC_DEFAULT, - 'subnetId': ID_EC2_SUBNET_DEFAULT, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}], - 'networkInterfaceSet': [ - {'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_2, - 'status': 'in-use', - 'vpcId': ID_EC2_VPC_DEFAULT, - 'subnetId': ID_EC2_SUBNET_DEFAULT, - 'description': DESCRIPTION_NETWORK_INTERFACE_2, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'association': { - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2, - }, - 'privateIpAddressesSet': [ - {'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'primary': True, - 'association': { - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2}}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_1, - 'primary': False}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_2, - 'primary': False}, - ], - 'attachment': { - 'status': 'attached', - 'deviceIndex': 0, - 'attachTime': TIME_ATTACH_NETWORK_INTERFACE, - 'deleteOnTermination': False, - 'attachmentId': ID_EC2_NETWORK_INTERFACE_2_ATTACH, - }, - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}]}, - ], - 'amiLaunchIndex': 0, - 'placement': {'availabilityZone': None}, - 'dnsName': None, - 'instanceState': {'code': 0, 'name': 'pending'}, - 'imageId': ID_EC2_IMAGE_1, - 'kernelId': ID_EC2_IMAGE_AKI_1, - 'ramdiskId': ID_EC2_IMAGE_ARI_1, - 'productCodesSet': [], - 'privateDnsName': '%s-%s' % (ID_EC2_RESERVATION_DEFAULT, 0), - 'keyName': NAME_KEY_PAIR, - 'launchTime': TIME_CREATE_INSTANCE_1, - 'rootDeviceType': 'instance-store', - 'instanceType': 'fake_flavor', - 'ipAddress': IP_ADDRESS_2, - 'rootDeviceName': ROOT_DEVICE_NAME_INSTANCE_1, - 'sourceDestCheck': True, -} -EC2_INSTANCE_1 = { - 'instanceId': ID_EC2_INSTANCE_1, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'vpcId': ID_EC2_VPC_1, - 'subnetId': ID_EC2_SUBNET_2, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}], - 'networkInterfaceSet': [ - {'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_2, - 'status': 'in-use', - 'vpcId': ID_EC2_VPC_1, - 'subnetId': ID_EC2_SUBNET_2, - 'description': DESCRIPTION_NETWORK_INTERFACE_2, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'association': { - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2, - }, - 'privateIpAddressesSet': [ - {'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'primary': True, - 'association': { - 'ipOwnerId': ID_OS_PROJECT, - 'publicDnsName': None, - 'publicIp': IP_ADDRESS_2}}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_1, - 'primary': False}, - {'privateIpAddress': IP_NETWORK_INTERFACE_2_EXT_2, - 'primary': False}, - ], - 'attachment': { - 'status': 'attached', - 'deviceIndex': 0, - 'attachTime': TIME_ATTACH_NETWORK_INTERFACE, - 'deleteOnTermination': False, - 'attachmentId': ID_EC2_NETWORK_INTERFACE_2_ATTACH, - }, - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'groupSet': [{'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'groupId': ID_EC2_SECURITY_GROUP_1}]}, - ], - 'amiLaunchIndex': 0, - 'placement': {'availabilityZone': None}, - 'dnsName': None, - 'instanceState': {'code': 0, 'name': 'pending'}, - 'imageId': ID_EC2_IMAGE_1, - 'kernelId': ID_EC2_IMAGE_AKI_1, - 'ramdiskId': ID_EC2_IMAGE_ARI_1, - 'productCodesSet': [], - 'privateDnsName': '%s-%s' % (ID_EC2_RESERVATION_1, 0), - 'keyName': NAME_KEY_PAIR, - 'launchTime': TIME_CREATE_INSTANCE_1, - 'rootDeviceType': 'instance-store', - 'instanceType': 'fake_flavor', - 'ipAddress': IP_ADDRESS_2, - 'rootDeviceName': ROOT_DEVICE_NAME_INSTANCE_1, - 'sourceDestCheck': True, -} -EC2_INSTANCE_2 = { - 'instanceId': ID_EC2_INSTANCE_2, - 'privateIpAddress': None, - 'groupSet': [{'groupName': 'groupname3', - 'groupId': ID_EC2_SECURITY_GROUP_3}], - 'amiLaunchIndex': 0, - 'placement': {'availabilityZone': NAME_AVAILABILITY_ZONE}, - 'dnsName': IP_ADDRESS_NOVA_1, - 'dnsNameV6': IPV6_INSTANCE_2, - 'instanceState': {'code': 0, 'name': 'pending'}, - 'imageId': None, - 'productCodesSet': [], - 'privateDnsName': 'Server %s' % ID_OS_INSTANCE_2, - 'keyName': None, - 'launchTime': TIME_CREATE_INSTANCE_2, - 'rootDeviceType': 'ebs', - 'blockDeviceMapping': [ - {'deviceName': ROOT_DEVICE_NAME_INSTANCE_2, - 'ebs': {'status': 'attached', - 'deleteOnTermination': False, - 'volumeId': ID_EC2_VOLUME_2}}], - 'instanceType': 'fake_flavor', - 'ipAddress': IP_ADDRESS_NOVA_1, - 'rootDeviceName': ROOT_DEVICE_NAME_INSTANCE_2, - 'clientToken': CLIENT_TOKEN_INSTANCE_2, -} -EC2_RESERVATION_DEFAULT = { - 'reservationId': ID_EC2_RESERVATION_DEFAULT, - 'ownerId': ID_OS_PROJECT, - 'instancesSet': [EC2_INSTANCE_DEFAULT], - 'groupSet': [], -} -EC2_RESERVATION_1 = { - 'reservationId': ID_EC2_RESERVATION_1, - 'ownerId': ID_OS_PROJECT, - 'instancesSet': [EC2_INSTANCE_1], - 'groupSet': [], -} -EC2_RESERVATION_2 = { - 'reservationId': ID_EC2_RESERVATION_2, - 'ownerId': ID_OS_PROJECT, - 'groupSet': [{'groupName': 'groupname3', - 'groupId': ID_EC2_SECURITY_GROUP_3}], - 'instancesSet': [EC2_INSTANCE_2], -} -EC2_BDM_METADATA_INSTANCE_1 = {} -EC2_BDM_METADATA_INSTANCE_2 = { - 'ebs0': ROOT_DEVICE_NAME_INSTANCE_2, -} - - -# fake class for a instance received from Nova API with v2.3 microversion -# support -class OSInstance(object): - def __init__(self, instance_dict): - self.id = instance_dict['id'] - self.flavor = instance_dict.get('flavor') - self.image = instance_dict.get('image') - self.key_name = instance_dict.get('key_name') - self.created = instance_dict.get('created') - self.tenant_id = instance_dict.get('tenant_id', ID_OS_PROJECT) - self.user_id = ID_OS_USER - self.addresses = copy.deepcopy(instance_dict.get('addresses', {})) - self.security_groups = copy.deepcopy( - instance_dict.get('security_groups', [])) - setattr(self, 'OS-EXT-STS:vm_state', instance_dict.get('vm_state')) - setattr(self, 'OS-EXT-SRV-ATTR:host', instance_dict.get('host')) - setattr(self, 'OS-EXT-AZ:availability_zone', - instance_dict.get('availability_zone')) - setattr(self, 'os-extended-volumes:volumes_attached', - copy.deepcopy(instance_dict.get('volumes_attached', []))) - - def get(self): - pass - - def delete(self): - pass - - def start(self): - pass - - def stop(self): - pass - - def reboot(self): - pass - - def get_password(self): - return None - - def get_console_output(self): - return None - - -# fake class for a instance received with an admin account from Nova API -# with v2.3 microversion support -class OSInstance_full(OSInstance): - def __init__(self, instance_dict): - super(OSInstance_full, self).__init__(instance_dict) - setattr(self, 'OS-EXT-SRV-ATTR:root_device_name', - instance_dict.get('root_device_name')) - setattr(self, 'OS-EXT-SRV-ATTR:kernel_id', - instance_dict.get('kernel_id')) - setattr(self, 'OS-EXT-SRV-ATTR:ramdisk_id', - instance_dict.get('ramdisk_id')) - setattr(self, 'OS-EXT-SRV-ATTR:user_data', - instance_dict.get('user_data')) - setattr(self, 'OS-EXT-SRV-ATTR:hostname', - instance_dict.get('hostname')) - - -OS_INSTANCE_1 = { - 'id': ID_OS_INSTANCE_1, - 'flavor': {'id': 'fakeFlavorId'}, - 'image': {'id': ID_OS_IMAGE_1}, - 'addresses': { - ID_EC2_SUBNET_2: [{'addr': IP_NETWORK_INTERFACE_2, - 'version': 4, - 'OS-EXT-IPS:type': 'fixed'}, - {'addr': IP_NETWORK_INTERFACE_2_EXT_1, - 'version': 4, - 'OS-EXT-IPS:type': 'fixed'}, - {'addr': IP_NETWORK_INTERFACE_2_EXT_2, - 'version': 4, - 'OS-EXT-IPS:type': 'fixed'}, - {'addr': IP_ADDRESS_2, - 'version': 4, - 'OS-EXT-IPS:type': 'floating'}]}, - 'key_name': NAME_KEY_PAIR, - 'root_device_name': ROOT_DEVICE_NAME_INSTANCE_1, - 'kernel_id': ID_OS_IMAGE_AKI_1, - 'ramdisk_id': ID_OS_IMAGE_ARI_1, - 'hostname': '%s-%s' % (ID_EC2_RESERVATION_1, 0), - 'created': TIME_CREATE_INSTANCE_1 -} -OS_INSTANCE_2 = { - 'id': ID_OS_INSTANCE_2, - 'flavor': {'id': 'fakeFlavorId'}, - 'security_groups': [{'name': 'groupname3'}], - 'availability_zone': NAME_AVAILABILITY_ZONE, - 'addresses': { - ID_EC2_SUBNET_1: [{'addr': IPV6_INSTANCE_2, - 'version': 6, - 'OS-EXT-IPS:type': 'fixed'}, - {'addr': IP_ADDRESS_NOVA_1, - 'version': 4, - 'OS-EXT-IPS:type': 'floating'}]}, - 'root_device_name': ROOT_DEVICE_NAME_INSTANCE_2, - 'volumes_attached': [{'id': ID_OS_VOLUME_2, - 'delete_on_termination': False}], - 'user_data': USER_DATA_INSTANCE_2, - 'hostname': 'Server %s' % ID_OS_INSTANCE_2, - 'created': TIME_CREATE_INSTANCE_2 -} - - -# DHCP options objects -DB_DHCP_OPTIONS_1 = {'id': ID_EC2_DHCP_OPTIONS_1, - 'dhcp_configuration': - {'domain-name': ['my.domain.com'], - 'domain-name-servers': ['8.8.8.8', '127.0.0.1']}} - -DB_DHCP_OPTIONS_2 = {'id': ID_EC2_DHCP_OPTIONS_2, - 'dhcp_configuration': - {'domain-name': ['my.domain.com'], - 'domain-name-servers': ['8.8.8.8', '127.0.0.1'], - 'netbios-name-servers': ['127.0.0.1'], - 'netbios-node-type': [1], - 'ntp-servers': ['127.0.0.1']}} - -EC2_DHCP_OPTIONS_1 = { - 'dhcpOptionsId': ID_EC2_DHCP_OPTIONS_1, - 'dhcpConfigurationSet': [ - {'valueSet': [{'value': 'my.domain.com'}], - 'key': 'domain-name'}, - {'valueSet': [{'value': '8.8.8.8'}, {'value': '127.0.0.1'}], - 'key': 'domain-name-servers'}]} - -EC2_DHCP_OPTIONS_2 = { - 'dhcpOptionsId': ID_EC2_DHCP_OPTIONS_2, - 'dhcpConfigurationSet': [ - {'valueSet': [{'value': 'my.domain.com'}], - 'key': 'domain-name'}, - {'valueSet': [{'value': '8.8.8.8'}, {'value': '127.0.0.1'}], - 'key': 'domain-name-servers'}, - {'valueSet': [{'value': 1}], - 'key': 'netbios-node-type'}, - {'valueSet': [{'value': '127.0.0.1'}], - 'key': 'ntp-servers'}, - {'valueSet': [{'value': '127.0.0.1'}], - 'key': 'netbios-name-servers'}] -} - -OS_DHCP_OPTIONS_1 = {'extra_dhcp_opts': [{'opt_name': 'domain-name', - 'opt_value': 'my.domain.com'}, - {'opt_name': 'dns-server', - 'opt_value': '8.8.8.8,127.0.0.1'}]} - - -# address objects - -DB_ADDRESS_DEFAULT = { - 'id': ID_EC2_ADDRESS_DEFAULT, - 'os_id': ID_OS_FLOATING_IP_2, - 'vpc_id': None, - 'public_ip': IP_ADDRESS_2, - 'network_interface_id': ID_EC2_NETWORK_INTERFACE_DEFAULT, - 'private_ip_address': IP_NETWORK_INTERFACE_DEFAULT, -} -DB_ADDRESS_1 = { - 'id': ID_EC2_ADDRESS_1, - 'os_id': ID_OS_FLOATING_IP_1, - 'vpc_id': None, - 'public_ip': IP_ADDRESS_1, -} -DB_ADDRESS_2 = { - 'id': ID_EC2_ADDRESS_2, - 'os_id': ID_OS_FLOATING_IP_2, - 'vpc_id': None, - 'public_ip': IP_ADDRESS_2, - 'network_interface_id': ID_EC2_NETWORK_INTERFACE_2, - 'private_ip_address': IP_NETWORK_INTERFACE_2, -} - -EC2_ADDRESS_CLASSIC_1 = { - 'publicIp': IP_ADDRESS_1, - 'domain': 'standard' -} -EC2_ADDRESS_CLASSIC_2 = { - 'publicIp': IP_ADDRESS_2, - 'instanceId': ID_EC2_INSTANCE_1, - 'domain': 'standard', - 'privateIpAddress': IP_NETWORK_INTERFACE_2 -} -EC2_ADDRESS_1 = { - 'allocationId': ID_EC2_ADDRESS_1, - 'publicIp': IP_ADDRESS_1, - 'domain': 'vpc', -} -EC2_ADDRESS_2 = { - 'allocationId': ID_EC2_ADDRESS_2, - 'publicIp': IP_ADDRESS_2, - 'domain': 'vpc', - 'instanceId': ID_EC2_INSTANCE_1, - 'associationId': ID_EC2_ASSOCIATION_2, - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_2, - 'privateIpAddress': IP_NETWORK_INTERFACE_2, - 'networkInterfaceOwnerId': ID_OS_PROJECT, -} -EC2_ADDRESS_DEFAULT = { - 'allocationId': ID_EC2_ADDRESS_DEFAULT, - 'publicIp': IP_ADDRESS_2, - 'domain': 'vpc', - 'instanceId': ID_EC2_INSTANCE_DEFAULT, - 'associationId': ID_EC2_ASSOCIATION_DEFAULT, - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_DEFAULT, - 'privateIpAddress': IP_NETWORK_INTERFACE_DEFAULT, - 'networkInterfaceOwnerId': ID_OS_PROJECT, -} - -OS_FLOATING_IP_1 = { - 'id': ID_OS_FLOATING_IP_1, - 'floating_ip_address': IP_ADDRESS_1, - 'port_id': None, - 'fixed_ip_address': None, -} -OS_FLOATING_IP_2 = { - 'id': ID_OS_FLOATING_IP_2, - 'floating_ip_address': IP_ADDRESS_2, - 'port_id': ID_OS_PORT_2, - 'fixed_ip_address': IP_NETWORK_INTERFACE_2, -} - - -# security group objects - -DB_SECURITY_GROUP_DEFAULT = { - 'id': ID_EC2_SECURITY_GROUP_DEFAULT, - 'os_id': ID_OS_SECURITY_GROUP_DEFAULT, - 'vpc_id': ID_EC2_VPC_DEFAULT, -} -DB_SECURITY_GROUP_1 = { - 'id': ID_EC2_SECURITY_GROUP_1, - 'os_id': ID_OS_SECURITY_GROUP_1, - 'vpc_id': ID_EC2_VPC_1, -} -DB_SECURITY_GROUP_2 = { - 'id': ID_EC2_SECURITY_GROUP_2, - 'os_id': ID_OS_SECURITY_GROUP_2, - 'vpc_id': ID_EC2_VPC_1, -} -DB_SECURITY_GROUP_3 = { - 'id': ID_EC2_SECURITY_GROUP_3, - 'os_id': ID_OS_SECURITY_GROUP_3, - 'vpc_id': None, -} -DB_SECURITY_GROUP_4 = { - 'id': ID_EC2_SECURITY_GROUP_4, - 'os_id': ID_OS_SECURITY_GROUP_4, - 'vpc_id': None, -} -DB_SECURITY_GROUP_5 = { - 'id': ID_EC2_SECURITY_GROUP_5, - 'os_id': ID_OS_SECURITY_GROUP_5, - 'vpc_id': ID_EC2_VPC_DEFAULT, -} -DB_SECURITY_GROUP_6 = { - 'id': ID_EC2_SECURITY_GROUP_6, - 'os_id': None, - 'vpc_id': None, -} -OS_SECURITY_GROUP_RULE_1 = { - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'id': random_os_id(), - 'port_range_min': 10, - 'port_range_max': 10, - 'protocol': 'tcp', - 'remote_group_id': None, - 'remote_ip_prefix': '192.168.1.0/24', - 'security_group_id': ID_OS_SECURITY_GROUP_2 -} -OS_SECURITY_GROUP_RULE_2 = { - 'direction': 'egress', - 'ethertype': 'IPv4', - 'id': random_os_id(), - 'port_range_min': 10, - 'port_range_max': None, - 'protocol': 100, - 'remote_group_id': ID_OS_SECURITY_GROUP_1, - 'remote_ip_prefix': None, - 'security_group_id': ID_OS_SECURITY_GROUP_2 -} -OS_SECURITY_GROUP_DEFAULT = { - 'id': ID_OS_SECURITY_GROUP_DEFAULT, - 'name': ID_EC2_VPC_DEFAULT, - 'security_group_rules': - [{'remote_group_id': None, - 'direction': 'egress', - 'remote_ip_prefix': None, - 'protocol': None, - 'port_range_max': None, - 'security_group_id': ID_OS_SECURITY_GROUP_DEFAULT, - 'port_range_min': None, - 'ethertype': 'IPv4', - 'id': random_os_id()}], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -OS_SECURITY_GROUP_1 = { - 'id': ID_OS_SECURITY_GROUP_1, - 'name': ID_EC2_VPC_1, - 'security_group_rules': - [{'remote_group_id': None, - 'direction': 'egress', - 'remote_ip_prefix': None, - 'protocol': None, - 'port_range_max': None, - 'security_group_id': ID_OS_SECURITY_GROUP_1, - 'port_range_min': None, - 'ethertype': 'IPv4', - 'id': random_os_id()}, - {'remote_group_id': None, - 'direction': 'egress', - 'remote_ip_prefix': None, - 'protocol': None, - 'port_range_max': None, - 'security_group_id': ID_OS_SECURITY_GROUP_1, - 'port_range_min': None, - 'ethertype': 'IPv6', - 'id': random_os_id()}], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -OS_SECURITY_GROUP_2 = { - 'id': ID_OS_SECURITY_GROUP_2, - 'name': 'groupname2', - 'security_group_rules': [ - OS_SECURITY_GROUP_RULE_1, - OS_SECURITY_GROUP_RULE_2 - ], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -OS_SECURITY_GROUP_3 = { - 'id': ID_OS_SECURITY_GROUP_3, - 'name': 'groupname3', - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -OS_SECURITY_GROUP_4 = { - 'id': ID_OS_SECURITY_GROUP_4, - 'name': 'groupname2', - 'security_group_rules': [ - {'direction': 'ingress', - 'ethertype': 'IPv4', - 'id': random_os_id(), - 'port_range_min': 10, - 'port_range_max': 10, - 'protocol': 'tcp', - 'remote_group_id': None, - 'remote_ip_prefix': '192.168.1.0/24', - 'security_group_id': ID_OS_SECURITY_GROUP_4}, - {'direction': 'egress', - 'ethertype': 'IPv4', - 'id': random_os_id(), - 'port_range_min': 10, - 'port_range_max': None, - 'protocol': 100, - 'remote_group_id': ID_OS_SECURITY_GROUP_1, - 'remote_ip_prefix': None, - 'security_group_id': ID_OS_SECURITY_GROUP_4} - ], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -OS_SECURITY_GROUP_5 = { - 'id': ID_OS_SECURITY_GROUP_5, - 'name': 'groupname2', - 'security_group_rules': [ - {'remote_group_id': None, - 'direction': 'egress', - 'remote_ip_prefix': None, - 'protocol': None, - 'port_range_max': None, - 'security_group_id': ID_OS_SECURITY_GROUP_5, - 'port_range_min': None, - 'ethertype': 'IPv4', - 'id': random_os_id()} - ], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -EC2_SECURITY_GROUP_DEFAULT = { - 'vpcId': ID_EC2_VPC_DEFAULT, - 'groupDescription': 'Group description', - 'ipPermissions': None, - 'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'ipPermissionsEgress': - [{'toPort': 65535, - 'ipProtocol': -1, - 'fromPort': 1}], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_DEFAULT -} -EC2_SECURITY_GROUP_1 = { - 'vpcId': ID_EC2_VPC_1, - 'groupDescription': 'Group description', - 'ipPermissions': None, - 'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'ipPermissionsEgress': - [{'toPort': 65535, - 'ipProtocol': -1, - 'fromPort': 1}], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_1 -} -EC2_SECURITY_GROUP_2 = { - 'vpcId': ID_EC2_VPC_1, - 'groupDescription': 'Group description', - 'ipPermissions': - [{'toPort': 10, - 'ipProtocol': 'tcp', - 'fromPort': 10, - 'ipRanges': - [{'cidrIp': '192.168.1.0/24'}] - }], - 'groupName': 'groupname2', - 'ipPermissionsEgress': - [{'toPort': 65535, - 'ipProtocol': 100, - 'fromPort': 10, - 'groups': - [{'groupId': ID_EC2_SECURITY_GROUP_1, - 'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'userId': ID_OS_PROJECT}] - }], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_2 -} -EC2_SECURITY_GROUP_3 = { - 'groupDescription': 'Group description', - 'ipPermissions': None, - 'ipPermissionsEgress': None, - 'groupName': 'groupname3', - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_3 -} -EC2_SECURITY_GROUP_4 = { - 'groupDescription': 'Group description', - 'ipPermissions': - [{'toPort': 10, - 'ipProtocol': 'tcp', - 'fromPort': 10, - 'ipRanges': - [{'cidrIp': '192.168.1.0/24'}] - }], - 'groupName': 'groupname2', - 'ipPermissionsEgress': - [{'toPort': 65535, - 'ipProtocol': 100, - 'fromPort': 10, - 'groups': - [{'groupId': ID_EC2_SECURITY_GROUP_1, - 'groupName': NAME_DEFAULT_OS_SECURITY_GROUP, - 'userId': ID_OS_PROJECT}] - }], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_4 -} -EC2_SECURITY_GROUP_5 = { - 'vpcId': ID_EC2_VPC_DEFAULT, - 'groupDescription': 'Group description', - 'ipPermissions': None, - 'ipPermissionsEgress': - [{'toPort': 65535, - 'ipProtocol': -1, - 'fromPort': 1}], - 'groupName': 'groupname2', - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_5 -} - -NOVA_DB_SECURITY_GROUP_1 = { - 'id': ID_EC2_SECURITY_GROUP_1, - 'os_id': str(ID_NOVA_OS_SECURITY_GROUP_1), - 'vpc_id': None, -} -NOVA_DB_SECURITY_GROUP_2 = { - 'id': ID_EC2_SECURITY_GROUP_2, - 'os_id': str(ID_NOVA_OS_SECURITY_GROUP_2), - 'vpc_id': None, -} -NOVA_SECURITY_GROUP_RULE_1 = { - 'id': random_os_id(), - 'from_port': 10, - 'to_port': 10, - 'ip_protocol': 'tcp', - 'group': None, - 'ip_range': {'cidr': '192.168.1.0/24'}, - 'parent_group_id': ID_NOVA_OS_SECURITY_GROUP_2 -} -NOVA_SECURITY_GROUP_RULE_2 = { - 'id': random_os_id(), - 'from_port': None, - 'to_port': None, - 'ip_protocol': 'icmp', - 'group': {'name': 'groupname'}, - 'ip_range': None, - 'parent_group_id': ID_NOVA_OS_SECURITY_GROUP_2 -} -NOVA_SECURITY_GROUP_1 = { - 'id': ID_NOVA_OS_SECURITY_GROUP_1, - 'name': 'groupname', - 'security_group_rules': - [{'group': None, - 'ip_range': None, - 'ip_protocol': None, - 'to_port': None, - 'parent_group_id': ID_NOVA_OS_SECURITY_GROUP_1, - 'from_port': None, - 'id': random_os_id()}], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -NOVA_SECURITY_GROUP_2 = { - 'id': ID_NOVA_OS_SECURITY_GROUP_2, - 'name': 'groupname2', - 'security_group_rules': [ - NOVA_SECURITY_GROUP_RULE_1, - NOVA_SECURITY_GROUP_RULE_2 - ], - 'description': 'Group description', - 'tenant_id': ID_OS_PROJECT -} -EC2_NOVA_SECURITY_GROUP_1 = { - 'groupDescription': 'Group description', - 'groupName': 'groupname', - 'ipPermissionsEgress': None, - 'ipPermissions': - [{'fromPort': -1, 'ipProtocol': -1, 'toPort': -1}], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_1 -} -EC2_NOVA_SECURITY_GROUP_2 = { - 'groupDescription': 'Group description', - 'groupName': 'groupname2', - 'ipPermissionsEgress': None, - 'ipPermissions': - [{'toPort': 10, - 'ipProtocol': 'tcp', - 'fromPort': 10, - 'ipRanges': - [{'cidrIp': '192.168.1.0/24'}] - }, - {'toPort': -1, - 'ipProtocol': 'icmp', - 'fromPort': -1, - 'groups': - [{'groupName': 'groupname', - 'groupId': ID_EC2_SECURITY_GROUP_1, - 'userId': ID_OS_PROJECT}] - }], - 'ownerId': ID_OS_PROJECT, - 'groupId': ID_EC2_SECURITY_GROUP_2 -} - - -# route table objects -DB_ROUTE_TABLE_DEFAULT = { - 'id': ID_EC2_ROUTE_TABLE_DEFAULT, - 'vpc_id': ID_EC2_VPC_DEFAULT, - 'routes': [{'destination_cidr_block': CIDR_VPC_DEFAULT, - 'gateway_id': None}, - {'destination_cidr_block': '0.0.0.0/0', - 'gateway_id': ID_EC2_IGW_DEFAULT}], -} - -DB_ROUTE_TABLE_1 = { - 'id': ID_EC2_ROUTE_TABLE_1, - 'vpc_id': ID_EC2_VPC_1, - 'routes': [{'destination_cidr_block': CIDR_VPC_1, - 'gateway_id': None}], -} -DB_ROUTE_TABLE_2 = { - 'id': ID_EC2_ROUTE_TABLE_2, - 'vpc_id': ID_EC2_VPC_1, - 'routes': [{'destination_cidr_block': CIDR_VPC_1, - 'gateway_id': None}, - {'destination_cidr_block': CIDR_EXTERNAL_NETWORK, - 'network_interface_id': ID_EC2_NETWORK_INTERFACE_2}, - {'destination_cidr_block': '0.0.0.0/0', - 'gateway_id': ID_EC2_IGW_1}], - 'propagating_gateways': [ID_EC2_VPN_GATEWAY_1], -} -DB_ROUTE_TABLE_3 = { - 'id': ID_EC2_ROUTE_TABLE_3, - 'vpc_id': ID_EC2_VPC_1, - 'routes': [{'destination_cidr_block': CIDR_VPC_1, - 'gateway_id': None}, - {'destination_cidr_block': CIDR_VPN_1_STATIC, - 'gateway_id': ID_EC2_VPN_GATEWAY_1}], -} -EC2_ROUTE_TABLE_DEFAULT = { - 'routeTableId': ID_EC2_ROUTE_TABLE_DEFAULT, - 'vpcId': ID_EC2_VPC_DEFAULT, - 'routeSet': [ - {'destinationCidrBlock': CIDR_VPC_DEFAULT, - 'gatewayId': 'local', - 'state': 'active', - 'origin': 'CreateRouteTable'}, - {'destinationCidrBlock': '0.0.0.0/0', - 'gatewayId': ID_EC2_IGW_DEFAULT, - 'state': 'active', - 'origin': 'CreateRoute'}], - 'associationSet': [ - {'routeTableAssociationId': ID_EC2_ROUTE_TABLE_ASSOCIATION_DEFAULT, - 'routeTableId': ID_EC2_ROUTE_TABLE_DEFAULT, - 'main': True}], - 'propagatingVgwSet': [], - 'tagSet': [], -} -EC2_ROUTE_TABLE_1 = { - 'routeTableId': ID_EC2_ROUTE_TABLE_1, - 'vpcId': ID_EC2_VPC_1, - 'routeSet': [ - {'destinationCidrBlock': CIDR_VPC_1, - 'gatewayId': 'local', - 'state': 'active', - 'origin': 'CreateRouteTable'}], - 'associationSet': [ - {'routeTableAssociationId': ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - 'routeTableId': ID_EC2_ROUTE_TABLE_1, - 'main': True}], - 'propagatingVgwSet': [], - 'tagSet': [], -} -EC2_ROUTE_TABLE_2 = { - 'routeTableId': ID_EC2_ROUTE_TABLE_2, - 'vpcId': ID_EC2_VPC_1, - 'routeSet': [ - {'destinationCidrBlock': CIDR_VPC_1, - 'gatewayId': 'local', - 'state': 'active', - 'origin': 'CreateRouteTable'}, - {'destinationCidrBlock': CIDR_EXTERNAL_NETWORK, - 'instanceId': ID_EC2_INSTANCE_1, - 'instanceOwnerId': ID_OS_PROJECT, - 'networkInterfaceId': ID_EC2_NETWORK_INTERFACE_2, - 'state': 'active', - 'origin': 'CreateRoute'}, - {'destinationCidrBlock': CIDR_VPN_1_PROPAGATED_1, - 'gatewayId': ID_EC2_VPN_GATEWAY_1, - 'state': 'active', - 'origin': 'EnableVgwRoutePropagation'}, - {'destinationCidrBlock': '0.0.0.0/0', - 'gatewayId': ID_EC2_IGW_1, - 'state': 'active', - 'origin': 'CreateRoute'}], - 'propagatingVgwSet': [{'gatewayId': ID_EC2_VPN_GATEWAY_1}], - 'tagSet': [], -} -EC2_ROUTE_TABLE_3 = { - 'routeTableId': ID_EC2_ROUTE_TABLE_3, - 'vpcId': ID_EC2_VPC_1, - 'routeSet': [ - {'destinationCidrBlock': CIDR_VPC_1, - 'gatewayId': 'local', - 'state': 'active', - 'origin': 'CreateRouteTable'}, - {'destinationCidrBlock': CIDR_VPN_1_STATIC, - 'gatewayId': ID_EC2_VPN_GATEWAY_1, - 'state': 'active', - 'origin': 'CreateRoute'}], - 'associationSet': [ - {'routeTableAssociationId': ID_EC2_ROUTE_TABLE_ASSOCIATION_3, - 'routeTableId': ID_EC2_ROUTE_TABLE_3, - 'subnetId': ID_EC2_SUBNET_2, - 'main': False}], - 'propagatingVgwSet': [], - 'tagSet': [], -} - - -# image objects -class OSImage(object): - - def __init__(self, image_dict, from_get=False): - - if from_get: - attrs = [k for k in image_dict.keys() - if image_dict[k] is not None] - else: - attrs = list(image_dict) - attrs.extend( - ['owner', 'created_at', 'visibility', 'status', - 'container_format', 'name']) - self._image_dict = {'id': image_dict['id']} - self._image_dict.update({k: image_dict.get(k) - for k in attrs}) - for complex_attr in ('mappings', 'block_device_mapping'): - if complex_attr in self._image_dict: - self._image_dict[complex_attr] = jsonutils.dumps( - self._image_dict[complex_attr]) - for k in self._image_dict: - setattr(self, k, self._image_dict[k]) - - def __eq__(self, other): - return type(self) == type(other) and self.__dict__ == other.__dict__ - - def __iter__(self): - for key in self._image_dict.items(): - yield key - - def __getitem__(self, key): - return self._image_dict.get(key) - - -TIME_CREATE_IMAGE = ec2utils.isotime(None, True) - -EC2_IMAGE_1 = { - 'imageId': ID_EC2_IMAGE_1, - 'imageOwnerId': ID_OS_PROJECT, - 'creationDate': TIME_CREATE_IMAGE, - 'isPublic': False, - 'imageState': 'available', - 'imageType': 'machine', - 'name': 'fake_name', - 'imageLocation': LOCATION_IMAGE_1, - 'kernelId': ID_EC2_IMAGE_AKI_1, - 'ramdiskId': ID_EC2_IMAGE_ARI_1, - 'architecture': None, - 'rootDeviceType': 'instance-store', - 'rootDeviceName': ROOT_DEVICE_NAME_IMAGE_1, - 'blockDeviceMapping': [ - {'deviceName': '/dev/sdb0', - 'virtualName': 'ephemeral0'}, - {'deviceName': '/dev/sdb1', - 'ebs': {'snapshotId': ID_EC2_SNAPSHOT_1, - 'volumeSize': 22, - 'deleteOnTermination': False}}, - {'deviceName': '/dev/sdb2', - 'ebs': {'snapshotId': ID_EC2_VOLUME_1, - 'deleteOnTermination': False}}, - {'deviceName': '/dev/sdb3', - 'virtualName': 'ephemeral5'}, - {'deviceName': '/dev/sdc0', - 'virtualName': 'swap'}, - {'deviceName': '/dev/sdc1', - 'ebs': {'snapshotId': ID_EC2_SNAPSHOT_2, - 'deleteOnTermination': False}}, - {'deviceName': '/dev/sdc2', - 'ebs': {'snapshotId': ID_EC2_VOLUME_2, - 'deleteOnTermination': False}}, - {'deviceName': '/dev/sdc3', - 'virtualName': 'ephemeral6'}], -} -EC2_IMAGE_2 = { - 'imageId': ID_EC2_IMAGE_2, - 'imageOwnerId': ID_OS_PROJECT, - 'creationDate': TIME_CREATE_IMAGE, - 'isPublic': True, - 'imageState': 'available', - 'imageType': 'machine', - 'name': None, - 'description': 'fake desc', - 'imageLocation': 'None (None)', - 'rootDeviceType': 'ebs', - 'rootDeviceName': ROOT_DEVICE_NAME_IMAGE_2, - 'architecture': 'x86_64', - 'blockDeviceMapping': [ - {'deviceName': '/dev/sdb1', - 'ebs': {'snapshotId': ID_EC2_SNAPSHOT_1, - 'deleteOnTermination': True}}], -} - - -DB_IMAGE_1 = { - 'id': ID_EC2_IMAGE_1, - 'os_id': ID_OS_IMAGE_1, - 'is_public': False, -} -DB_IMAGE_2 = { - 'id': ID_EC2_IMAGE_2, - 'os_id': ID_OS_IMAGE_2, - 'is_public': True, - 'description': 'fake desc' -} -DB_IMAGE_AKI_1 = { - 'id': ID_EC2_IMAGE_AKI_1, - 'os_id': ID_OS_IMAGE_AKI_1, - 'is_public': True, -} -DB_IMAGE_ARI_1 = { - 'id': ID_EC2_IMAGE_ARI_1, - 'os_id': ID_OS_IMAGE_ARI_1, - 'is_public': True, -} - -OS_IMAGE_1 = { - 'id': ID_OS_IMAGE_1, - 'owner': ID_OS_PROJECT, - 'created_at': TIME_CREATE_IMAGE, - 'visibility': 'private', - 'status': 'active', - 'container_format': 'ami', - 'name': 'fake_name', - 'kernel_id': ID_OS_IMAGE_AKI_1, - 'ramdisk_id': ID_OS_IMAGE_ARI_1, - 'type': 'machine', - 'image_state': 'available', - 'image_location': LOCATION_IMAGE_1, - 'mappings': [ - {'device': '/dev/sda1', 'virtual': 'root'}, - {'device': 'sdb0', 'virtual': 'ephemeral0'}, - {'device': 'sdb1', 'virtual': 'ephemeral1'}, - {'device': 'sdb2', 'virtual': 'ephemeral2'}, - {'device': 'sdb3', 'virtual': 'ephemeral3'}, - {'device': 'sdb4', 'virtual': 'ephemeral4'}, - {'device': 'sdc0', 'virtual': 'swap'}, - {'device': 'sdc1', 'virtual': 'swap'}, - {'device': 'sdc2', 'virtual': 'swap'}, - {'device': 'sdc3', 'virtual': 'swap'}, - {'device': 'sdc4', 'virtual': 'swap'}], - 'block_device_mapping': [ - {'device_name': '/dev/sdb1', - 'snapshot_id': ID_OS_SNAPSHOT_1, - 'volume_size': 22}, - {'device_name': '/dev/sdb2', - 'volume_id': ID_OS_VOLUME_1}, - {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'}, - {'device_name': '/dev/sdb4', 'no_device': True}, - {'device_name': '/dev/sdc1', - 'snapshot_id': ID_OS_SNAPSHOT_2}, - {'device_name': '/dev/sdc2', - 'volume_id': ID_OS_VOLUME_2}, - {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'}, - {'device_name': '/dev/sdc4', 'no_device': True}] -} -OS_IMAGE_2 = { - 'id': ID_OS_IMAGE_2, - 'owner': ID_OS_PROJECT, - 'created_at': TIME_CREATE_IMAGE, - 'visibility': 'public', - 'status': 'active', - 'container_format': None, - 'name': None, - 'type': 'machine', - 'root_device_name': '/dev/sdb1', - 'architecture': 'x86_64', - 'mappings': [{'device': '/dev/sda1', - 'virtual': 'root'}], - 'block_device_mapping': [ - {'device_name': '/dev/sdb1', - 'snapshot_id': ID_OS_SNAPSHOT_1, - 'delete_on_termination': True}], -} -OS_IMAGE_AKI_1 = { - 'id': ID_OS_IMAGE_AKI_1, -} -OS_IMAGE_ARI_1 = { - 'id': ID_OS_IMAGE_ARI_1, -} - - -# snapshot objects -class OSSnapshot(object): - - def __init__(self, snapshot): - self.id = snapshot['id'] - self.status = snapshot.get('status') - self.volume_id = snapshot.get('volume_id') - self.created_at = snapshot.get('created_at') - self.progress = snapshot.get('progress') - self.project_id = ID_OS_PROJECT - self.size = snapshot.get('size') - self.display_description = snapshot.get('description') - - def get(self): - pass - - def delete(self): - pass - - def update(self, *args, **kwargs): - pass - - -TIME_CREATE_SNAPSHOT_1 = ec2utils.isotime(None, True) -TIME_CREATE_SNAPSHOT_2 = ec2utils.isotime(None, True) - -EC2_SNAPSHOT_1 = { - 'description': None, - 'volumeId': ID_EC2_VOLUME_2, - 'status': 'completed', - 'volumeSize': 1, - 'progress': '100%', - 'startTime': TIME_CREATE_SNAPSHOT_1, - 'snapshotId': ID_EC2_SNAPSHOT_1, - 'ownerId': ID_OS_PROJECT -} -EC2_SNAPSHOT_2 = { - 'description': 'fake description', - 'volumeId': ID_EC2_VOLUME_2, - 'status': 'completed', - 'volumeSize': 1, - 'progress': '100%', - 'startTime': TIME_CREATE_SNAPSHOT_2, - 'snapshotId': ID_EC2_SNAPSHOT_2, - 'ownerId': ID_OS_PROJECT -} - -DB_SNAPSHOT_1 = { - 'id': ID_EC2_SNAPSHOT_1, - 'os_id': ID_OS_SNAPSHOT_1, -} -DB_SNAPSHOT_2 = { - 'id': ID_EC2_SNAPSHOT_2, - 'os_id': ID_OS_SNAPSHOT_2, -} - -OS_SNAPSHOT_1 = { - 'id': ID_OS_SNAPSHOT_1, - 'status': 'available', - 'volume_id': ID_OS_VOLUME_2, - 'created_at': TIME_CREATE_SNAPSHOT_1, - 'progress': '100%', - 'size': 1, - 'description': None, -} -OS_SNAPSHOT_2 = { - 'id': ID_OS_SNAPSHOT_2, - 'status': 'available', - 'volume_id': ID_OS_VOLUME_2, - 'created_at': TIME_CREATE_SNAPSHOT_2, - 'progress': '100%', - 'size': 1, - 'description': 'fake description', -} - - -# volume objects -class OSVolume(object): - - def __init__(self, volume): - self.id = volume['id'] - self.status = volume['status'] - self.availability_zone = volume.get('availability_zone') - self.size = volume.get('size') - self.created_at = volume.get('created_at') - self.display_name = volume.get('display_name') - self.display_description = volume.get('display_description') - self.snapshot_id = volume.get('snapshot_id') - self.attachments = copy.deepcopy(volume.get('attachments')) - self.volume_type = None - self.encrypted = False - - def get(self): - pass - - def delete(self): - pass - - def update(self, *args, **kwargs): - pass - - -TIME_CREATE_VOLUME_1 = ec2utils.isotime(None, True) -TIME_CREATE_VOLUME_2 = ec2utils.isotime(None, True) -TIME_CREATE_VOLUME_3 = ec2utils.isotime(None, True) - -EC2_VOLUME_1 = { - 'volumeId': ID_EC2_VOLUME_1, - 'snapshotId': None, - 'availabilityZone': NAME_AVAILABILITY_ZONE, - 'createTime': TIME_CREATE_VOLUME_1, - 'size': 1, - 'status': 'available', - 'attachmentSet': [], - 'encrypted': False, - 'volumeType': None, -} -EC2_VOLUME_2 = { - 'volumeId': ID_EC2_VOLUME_2, - 'snapshotId': None, - 'availabilityZone': NAME_AVAILABILITY_ZONE, - 'createTime': TIME_CREATE_VOLUME_2, - 'size': 1, - 'status': 'in-use', - 'attachmentSet': [{'status': 'attached', - 'instanceId': ID_EC2_INSTANCE_2, - 'volumeId': ID_EC2_VOLUME_2, - 'device': ROOT_DEVICE_NAME_INSTANCE_2, - 'deleteOnTermination': False}], - 'encrypted': False, - 'volumeType': None, -} -EC2_VOLUME_3 = { - 'volumeId': ID_EC2_VOLUME_3, - 'snapshotId': ID_EC2_SNAPSHOT_1, - 'availabilityZone': NAME_AVAILABILITY_ZONE, - 'createTime': TIME_CREATE_VOLUME_3, - 'size': 1, - 'status': 'available', - 'attachmentSet': [], - 'encrypted': False, - 'volumeType': None, -} - -DB_VOLUME_1 = { - 'id': ID_EC2_VOLUME_1, - 'os_id': ID_OS_VOLUME_1, -} -DB_VOLUME_2 = { - 'id': ID_EC2_VOLUME_2, - 'os_id': ID_OS_VOLUME_2, -} -DB_VOLUME_3 = { - 'id': ID_EC2_VOLUME_3, - 'os_id': ID_OS_VOLUME_3, -} - -OS_VOLUME_1 = { - 'id': ID_OS_VOLUME_1, - 'status': 'available', - 'availability_zone': NAME_AVAILABILITY_ZONE, - 'size': 1, - 'created_at': TIME_CREATE_VOLUME_1, - 'display_name': 'test-vol-name', - 'display_description': 'test-vol-desc', - 'snapshot_id': None, - 'attachments': [], -} -OS_VOLUME_2 = { - 'id': ID_OS_VOLUME_2, - 'status': 'in-use', - 'availability_zone': NAME_AVAILABILITY_ZONE, - 'size': 1, - 'created_at': TIME_CREATE_VOLUME_2, - 'display_name': 'test-vol-name', - 'display_description': 'test-vol-desc', - 'snapshot_id': None, - 'attachments': [{'device': ROOT_DEVICE_NAME_INSTANCE_2, - 'server_id': ID_OS_INSTANCE_2}], -} -OS_VOLUME_3 = { - 'id': ID_OS_VOLUME_3, - 'status': 'available', - 'availability_zone': NAME_AVAILABILITY_ZONE, - 'size': 1, - 'created_at': TIME_CREATE_VOLUME_3, - 'display_name': 'test-vol-name', - 'display_description': 'test-vol-desc', - 'snapshot_id': ID_OS_SNAPSHOT_1, - 'attachments': [], -} - - -# availability zone objects - -class NovaAvailabilityZone(object): - - def __init__(self, nova_availability_zone_dict): - self.zoneName = nova_availability_zone_dict['zoneName'] - self.zoneState = {'available': ( - nova_availability_zone_dict['zoneState'] == 'available')} - self.hosts = nova_availability_zone_dict['hosts'] - - -OS_AVAILABILITY_ZONE = {'zoneName': NAME_AVAILABILITY_ZONE, - 'zoneState': 'available', - 'hosts': {'host1': {'service1': { - 'active': 'True', - 'available': 'True', - 'updated_at': 'now'}, - 'service2': { - 'active': 'False', - 'available': 'False', - 'updated_at': 'now'}}, - 'host2': {'service1': { - 'active': 'True', - 'available': 'True', - 'updated_at': 'now'}} - }} -OS_AVAILABILITY_ZONE_INTERNAL = {'zoneName': 'internal', - 'zoneState': 'available', - 'hosts': {}} -EC2_AVAILABILITY_ZONE = {'zoneName': NAME_AVAILABILITY_ZONE, - 'zoneState': 'available'} - - -# keypair objects - -class NovaKeyPair(object): - - def __init__(self, nova_keypair_dict): - self.name = nova_keypair_dict['name'] - self.fingerprint = nova_keypair_dict['fingerprint'] - self.private_key = nova_keypair_dict['private_key'] - self.public_key = nova_keypair_dict['public_key'] - - -OS_KEY_PAIR = {'name': NAME_KEY_PAIR, - 'private_key': PRIVATE_KEY_KEY_PAIR, - 'public_key': PUBLIC_KEY_KEY_PAIR, - 'fingerprint': FINGERPRINT_KEY_PAIR} - -EC2_KEY_PAIR = {'keyName': NAME_KEY_PAIR, - 'keyFingerprint': FINGERPRINT_KEY_PAIR, - 'keyMaterial': PRIVATE_KEY_KEY_PAIR} - - -# vpn gateway objects -DB_VPN_GATEWAY_1 = { - 'id': ID_EC2_VPN_GATEWAY_1, - 'os_id': None, - 'vpc_id': ID_EC2_VPC_1, -} -DB_VPN_GATEWAY_2 = { - 'id': ID_EC2_VPN_GATEWAY_2, - 'os_id': None, - 'vpc_id': None, -} - -EC2_VPN_GATEWAY_1 = { - 'vpnGatewayId': ID_EC2_VPN_GATEWAY_1, - 'state': 'available', - 'type': 'ipsec.1', - 'attachments': [{'state': 'attached', - 'vpcId': ID_EC2_VPC_1}], -} -EC2_VPN_GATEWAY_2 = { - 'vpnGatewayId': ID_EC2_VPN_GATEWAY_2, - 'state': 'available', - 'type': 'ipsec.1', - 'attachments': None, -} - -OS_VPNSERVICE_1 = { - 'id': ID_OS_VPNSERVICE_1, - 'subnet_id': ID_OS_SUBNET_1, - 'router_id': ID_OS_ROUTER_1, - 'name': ID_EC2_SUBNET_1, -} -OS_VPNSERVICE_2 = { - 'id': ID_OS_VPNSERVICE_2, - 'subnet_id': ID_OS_SUBNET_2, - 'router_id': ID_OS_ROUTER_1, - 'name': ID_EC2_SUBNET_2, -} - -# customer gateway objects -DB_CUSTOMER_GATEWAY_1 = { - 'id': ID_EC2_CUSTOMER_GATEWAY_1, - 'ip_address': IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'os_id': None, - 'vpc_id': None, -} -DB_CUSTOMER_GATEWAY_2 = { - 'id': ID_EC2_CUSTOMER_GATEWAY_2, - 'ip_address': IP_CUSTOMER_GATEWAY_ADDRESS_2, - 'os_id': None, - 'vpc_id': None, -} - -EC2_CUSTOMER_GATEWAY_1 = { - 'customerGatewayId': ID_EC2_CUSTOMER_GATEWAY_1, - 'ipAddress': IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'state': 'available', - 'type': 'ipsec.1', - 'bgpAsn': 65000, -} -EC2_CUSTOMER_GATEWAY_2 = { - 'customerGatewayId': ID_EC2_CUSTOMER_GATEWAY_2, - 'ipAddress': IP_CUSTOMER_GATEWAY_ADDRESS_2, - 'state': 'available', - 'type': 'ipsec.1', - 'bgpAsn': 65000, -} - - -# VPN connection objects -DB_VPN_CONNECTION_1 = { - 'id': ID_EC2_VPN_CONNECTION_1, - 'vpc_id': None, - 'os_id': None, - 'customer_gateway_id': ID_EC2_CUSTOMER_GATEWAY_1, - 'vpn_gateway_id': ID_EC2_VPN_GATEWAY_1, - 'pre_shared_key': PRE_SHARED_KEY_1, - 'os_ikepolicy_id': ID_OS_IKEPOLICY_1, - 'os_ipsecpolicy_id': ID_OS_IPSECPOLICY_1, - 'cidrs': [CIDR_VPN_1_PROPAGATED_1], - 'os_ipsec_site_connections': { - ID_EC2_SUBNET_2: ID_OS_IPSEC_SITE_CONNECTION_2}, -} -DB_VPN_CONNECTION_2 = { - 'id': ID_EC2_VPN_CONNECTION_2, - 'vpc_id': None, - 'os_id': None, - 'customer_gateway_id': ID_EC2_CUSTOMER_GATEWAY_2, - 'vpn_gateway_id': ID_EC2_VPN_GATEWAY_2, - 'pre_shared_key': PRE_SHARED_KEY_2, - 'os_ikepolicy_id': ID_OS_IKEPOLICY_2, - 'os_ipsecpolicy_id': ID_OS_IPSECPOLICY_2, - 'cidrs': [CIDR_VPN_2_PROPAGATED_1, - CIDR_VPN_2_PROPAGATED_2], - 'os_ipsec_site_connections': {}, -} - -EC2_VPN_CONNECTION_1 = { - 'vpnConnectionId': ID_EC2_VPN_CONNECTION_1, - 'vpnGatewayId': ID_EC2_VPN_GATEWAY_1, - 'customerGatewayId': ID_EC2_CUSTOMER_GATEWAY_1, - 'state': 'available', - 'type': 'ipsec.1', - 'routes': [{'destinationCidrBlock': CIDR_VPN_1_PROPAGATED_1, - 'state': 'available'}], - 'vgwTelemetry': None, - 'options': {'staticRoutesOnly': True}, - 'customerGatewayConfiguration': CUSTOMER_GATEWAY_CONFIGURATION_1, -} -EC2_VPN_CONNECTION_2 = { - 'vpnConnectionId': ID_EC2_VPN_CONNECTION_2, - 'vpnGatewayId': ID_EC2_VPN_GATEWAY_2, - 'customerGatewayId': ID_EC2_CUSTOMER_GATEWAY_2, - 'state': 'available', - 'type': 'ipsec.1', - 'routes': [{'destinationCidrBlock': CIDR_VPN_2_PROPAGATED_1, - 'state': 'available'}, - {'destinationCidrBlock': CIDR_VPN_2_PROPAGATED_2, - 'state': 'available'}], - 'vgwTelemetry': None, - 'options': {'staticRoutesOnly': True}, - 'customerGatewayConfiguration': CUSTOMER_GATEWAY_CONFIGURATION_2, -} - -OS_IKEPOLICY_1 = { - 'id': ID_OS_IKEPOLICY_1, - 'ike_version': 'v1', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'phase1_negotiation_mode': 'main', - 'lifetime': {'units': 'seconds', - 'value': 28800} -} -OS_IKEPOLICY_2 = { - 'id': ID_OS_IKEPOLICY_2, - 'ike_version': 'v1', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'phase1_negotiation_mode': 'main', - 'lifetime': {'units': 'seconds', - 'value': 28800} -} -OS_IPSECPOLICY_1 = { - 'id': ID_OS_IPSECPOLICY_1, - 'transform_protocol': 'esp', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'encapsulation_mode': 'tunnel', - 'lifetime': {'units': 'seconds', - 'value': 3600} -} -OS_IPSECPOLICY_2 = { - 'id': ID_OS_IPSECPOLICY_2, - 'transform_protocol': 'esp', - 'auth_algorithm': 'sha1', - 'encryption_algorithm': 'aes-128', - 'pfs': 'group2', - 'encapsulation_mode': 'tunnel', - 'lifetime': {'units': 'seconds', - 'value': 3600} -} - - -# Object generator functions section - -# internet gateway generator functions -def gen_db_igw(ec2_id, ec2_vpc_id=None): - return {'id': ec2_id, - 'os_id': None, - 'vpc_id': ec2_vpc_id} - - -# network interface generator functions -def gen_db_network_interface(ec2_id, os_id, vpc_ec2_id, subnet_ec2_id, - private_ip_address, description=None, - instance_id=None, device_index=None, - delete_on_termination=False): - eni = {'id': ec2_id, - 'os_id': os_id, - 'vpc_id': vpc_ec2_id, - 'subnet_id': subnet_ec2_id, - 'description': description, - 'private_ip_address': private_ip_address} - if instance_id: - eni['instance_id'] = instance_id - eni['device_index'] = device_index - eni['delete_on_termination'] = delete_on_termination - eni['attach_time'] = TIME_ATTACH_NETWORK_INTERFACE - return eni - - -def gen_ec2_network_interface(ec2_network_interface_id, ec2_subnet, ips, - description=None, ec2_instance_id=None, - device_index=None, - delete_on_termination=False, - for_instance_output=False, - ec2_subnet_id=None, - ec2_vpc_id=None): - """Generate EC2 Network Interface dictionary. - - Set privateIpAddres from the first element of ips. - If ec2_subnet_id and ec2_vpc_id are used if passed instead of getting - appropriate values from ec2_subnet - """ - ec2_network_interface = { - 'networkInterfaceId': ec2_network_interface_id, - 'vpcId': ec2_vpc_id if ec2_vpc_id else ec2_subnet['vpcId'], - 'subnetId': ec2_subnet_id if ec2_subnet_id else ec2_subnet['subnetId'], - 'description': description, - 'macAddress': MAC_ADDRESS, - 'privateIpAddress': ips[0], - 'privateIpAddressesSet': [{'privateIpAddress': ip, - 'primary': ip == ips[0]} - for ip in ips], - 'sourceDestCheck': True, - 'ownerId': ID_OS_PROJECT, - 'requesterManaged': False, - 'groupSet': [], - 'tagSet': [], - } - if not ec2_instance_id: - ec2_network_interface['status'] = 'available' - else: - attachment_id = ec2_network_interface_id.replace('eni', 'eni-attach') - attachment = {'status': 'attached', - 'deviceIndex': device_index, - 'attachTime': TIME_ATTACH_NETWORK_INTERFACE, - 'deleteOnTermination': delete_on_termination, - 'attachmentId': attachment_id} - if not for_instance_output: - attachment.update({ - 'instanceId': ec2_instance_id, - 'instanceOwnerId': ID_OS_PROJECT}) - ec2_network_interface['status'] = 'in-use' - ec2_network_interface['attachment'] = attachment - return ec2_network_interface - - -def gen_os_port(os_id, ec2_network_interface, os_subnet_id, fixed_ips, - os_instance_id=None): - return {'id': os_id, - 'network_id': os_subnet_id, - 'name': ec2_network_interface['networkInterfaceId'], - 'status': 'ACTIVE' if os_instance_id else 'DOWN', - 'mac_address': MAC_ADDRESS, - 'fixed_ips': [{'ip_address': ip, 'subnet_id': os_subnet_id} - for ip in fixed_ips], - 'device_id': os_instance_id, - 'security_groups': []} - - -# instance generator functions -def gen_ec2_instance(ec2_instance_id, private_ip_address='', - ec2_network_interfaces=None, - floating_ip=None, image_id=None, kernel_id=None, - ramdisk_id=None, launch_index=0, reservation_id=None): - """Generate EC2 Instance dictionary. - - private_ip_address must be specified as IP value or None - Set vpcId from the first ec2_network_interfaces - If private_ip_address is not None, set subnetId from the first - ec2_network_interfaces - """ - ec2_instance = {'instanceId': ec2_instance_id, - 'privateIpAddress': private_ip_address, - 'amiLaunchIndex': launch_index, - 'placement': {'availabilityZone': None}, - 'dnsName': floating_ip, - 'instanceState': {'code': 0, 'name': 'pending'}, - 'imageId': image_id, - 'productCodesSet': [], - 'privateDnsName': '%s-%s' % (reservation_id, launch_index), - 'keyName': None, - 'launchTime': None, - 'rootDeviceType': 'instance-store', - 'instanceType': 'fake_flavor', - 'rootDeviceName': '/dev/vda'} - if floating_ip is not None: - ec2_instance['ipAddress'] = floating_ip - if ec2_network_interfaces: - ec2_instance['networkInterfaceSet'] = ( - [tools.patch_dict(ni, - {'attachment': tools.purge_dict( - ni['attachment'], - ['instanceId', 'instanceOwnerId'])}, - ['tagSet']) - for ni in ec2_network_interfaces]) - ec2_instance['vpcId'] = ec2_network_interfaces[0]['vpcId'] - primary_eni = next((eni for eni in ec2_network_interfaces - if eni['attachment']['deviceIndex'] == 0), None) - if primary_eni: - ec2_instance['subnetId'] = primary_eni['subnetId'] - ec2_instance['groupSet'] = primary_eni['groupSet'] - ec2_instance['sourceDestCheck'] = primary_eni['sourceDestCheck'] - if private_ip_address == '': - ec2_instance['privateIpAddress'] = ( - primary_eni['privateIpAddress']) - if kernel_id: - ec2_instance['kernelId'] = kernel_id - if ramdisk_id: - ec2_instance['ramdiskId'] = ramdisk_id - return ec2_instance - - -def gen_ec2_reservation(ec2_reservation_id, ec2_instances): - """Generate EC2 Reservation dictionary.""" - return {'reservationId': ec2_reservation_id, - 'ownerId': ID_OS_PROJECT, - 'instancesSet': [inst for inst in ec2_instances], - 'groupSet': []} diff --git a/ec2api/tests/unit/fakes_request_response.py b/ec2api/tests/unit/fakes_request_response.py deleted file mode 100644 index d822f65c..00000000 --- a/ec2api/tests/unit/fakes_request_response.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ec2api.tests.unit import tools - -XML_RESULT_TEMPLATE = ''' -<%(action)sResponse - xmlns="http://ec2.amazonaws.com/doc/%(api_version)s/"> - %(request_id)s - %(data)s -''' -XML_ERROR_TEMPLATE = ''' - - - %(code)s%(message)s - - %(request_id)s -''' - -XML_FAKE_RESULT = ''' - - - true - false - 1234 - fake - - - fake - - - fake - - - - - - - - - fake - - - - - - - - - - fake - - - fake - - - - - - -''' -DICT_FAKE_RESULT_DATA = { - 'fakeInfo': { - 'fakeNone': None, - 'fakeTrue': True, - 'fakeFalse': False, - 'fakeInt': 1234, - 'fakeStr': 'fake', - 'fakeSet': [{'fakeData': 'fake'}, - {'fakeData': 'fake'}], - }, - 'fakeEmptySet': [], - 'fakeComplexSet': [ - {'fakeSubSet': [{'fakeData': 'fake'}, - {'fakeData': None}]}, - {'fakeSubSet': [{'fakeData': 'fake'}, - {'fakeData': 'fake'}]}, - ], -} -DICT_FAKE_RESULT = { - 'FakeActionResponse': tools.update_dict( - DICT_FAKE_RESULT_DATA, - {'requestId': None}) -} - -XML_SINGLE_RESULT = ''' - - req-8a80bb71-1e1d-49be-819f-fba429b0ddf1 - pending - - vol-00000001 - 1 - - 2014-06-04T19:55:55.448117 - - snap-00000001 - -''' -DICT_SINGLE_RESULT = { - 'CreateSnapshotResponse': { - 'status': 'pending', - 'description': None, - 'volumeId': 'vol-00000001', - 'volumeSize': 1, - 'progress': None, - 'startTime': '2014-06-04T19:55:55.448117', - 'ownerId': None, - 'snapshotId': 'snap-00000001', - 'requestId': 'req-8a80bb71-1e1d-49be-819f-fba429b0ddf1', - } -} - -XML_RESULT_SET = ''' - - req-1fc541a8-477d-4928-a90e-4448ea57ba51 - - - - 77dcabaee8ea4a8fbae697ddc09afdaf - true - aki-00000001 - available - - None (cirros-0.3.2-x86_64-uec-kernel) - instance-store - /dev/sda1 - kernel - cirros-0.3.2-x86_64-uec-kernel - - - - 77dcabaee8ea4a8fbae697ddc09afdaf - true - ari-00000002 - available - - None (cirros-0.3.2-x86_64-uec-ramdisk) - instance-store - /dev/sda1 - ramdisk - cirros-0.3.2-x86_64-uec-ramdisk - - - cirros-0.3.2-x86_64-uec - 77dcabaee8ea4a8fbae697ddc09afdaf - true - ami-00000003 - available - instance-store - - None (cirros-0.3.2-x86_64-uec) - aki-00000001 - ari-00000002 - /dev/sda1 - machine - - - - - 77dcabaee8ea4a8fbae697ddc09afdaf - true - ami-00000004 - available - - None (Fedora-x86_64-20-20131211.1-sda) - instance-store - /dev/sda1 - machine - Fedora-x86_64-20-20131211.1-sda - - - -''' -DICT_RESULT_SET = { - 'DescribeImagesResponse': { - 'imagesSet': [{ - 'description': None, - 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', - 'isPublic': True, - 'imageId': 'aki-00000001', - 'imageState': 'available', - 'architecture': None, - 'imageLocation': 'None (cirros-0.3.2-x86_64-uec-kernel)', - 'rootDeviceType': 'instance-store', - 'rootDeviceName': '/dev/sda1', - 'imageType': 'kernel', - 'name': 'cirros-0.3.2-x86_64-uec-kernel', - }, - { - 'description': None, - 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', - 'isPublic': True, - 'imageId': 'ari-00000002', - 'imageState': 'available', - 'architecture': None, - 'imageLocation': 'None (cirros-0.3.2-x86_64-uec-ramdisk)', - 'rootDeviceType': 'instance-store', - 'rootDeviceName': '/dev/sda1', - 'imageType': 'ramdisk', - 'name': 'cirros-0.3.2-x86_64-uec-ramdisk', - }, - { - 'name': 'cirros-0.3.2-x86_64-uec', - 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', - 'isPublic': True, - 'imageId': 'ami-00000003', - 'imageState': 'available', - 'rootDeviceType': 'instance-store', - 'architecture': None, - 'imageLocation': 'None (cirros-0.3.2-x86_64-uec)', - 'kernelId': 'aki-00000001', - 'ramdiskId': 'ari-00000002', - 'rootDeviceName': '/dev/sda1', - 'imageType': 'machine', - 'description': None, - }, - { - 'description': None, - 'imageOwnerId': '77dcabaee8ea4a8fbae697ddc09afdaf', - 'isPublic': True, - 'imageId': 'ami-00000004', - 'imageState': 'available', - 'architecture': None, - 'imageLocation': 'None (Fedora-x86_64-20-20131211.1-sda)', - 'rootDeviceType': 'instance-store', - 'rootDeviceName': '/dev/sda1', - 'imageType': 'machine', - 'name': 'Fedora-x86_64-20-20131211.1-sda', - }], - 'requestId': 'req-1fc541a8-477d-4928-a90e-4448ea57ba51', - } -} - -XML_EMPTY_RESULT_SET = ''' - - a25fa489-f97f-428a-9d30-9fcb1e9b9b65 - - -''' -DICT_EMPTY_RESULT_SET = { - 'DescribeVolumesResponse': { - 'requestId': 'a25fa489-f97f-428a-9d30-9fcb1e9b9b65', - 'volumeSet': [], - } -} - -XML_ERROR = ''' -InvalidInstanceID.NotFound -Instance i-00000001 could not be found. -req-89eb083f-3c44-46e7-bc37-2c050ed7a9ce -''' -DICT_ERROR = { - 'Response': { - 'RequestID': 'req-89eb083f-3c44-46e7-bc37-2c050ed7a9ce', - 'Errors': { - 'Error': { - 'Code': 'InvalidInstanceID.NotFound', - 'Message': 'Instance i-00000001 could not be found.', - } - } - } -} - -XML_SILENT_OPERATIN_RESULT = ''' - - req-8a80bb71-1e1d-49be-819f-fba429b0ddf1 - true - -''' - -DOTTED_FAKE_PARAMS = { - 'FakeStr': 'fake', - 'FakeInt': '1234', - 'FakeBool': 'False', - 'FakeDict.FakeKey': 'fake', - 'FakeList.1.FakeElemKey': 'fake', - 'FakeList.2.FakeElemKey': 'fake', - 'FakeComplexList.1.FakeElemKey.1.FakeSubElemKey': 'fake', - 'FakeComplexList.1.FakeElemKey.2.FakeSubElemKey': 'fake', - 'FakeComplexList.1.FakeElemKeyOther': 'fake', - 'FakeComplexList.2.FakeElemKey.1.FakeSubElemKey': 'fake', - 'FakeComplexList.2.FakeElemKey.2.FakeSubElemKey': 'fake', - 'FakeComplexList.2.FakeElemKeyOther': 'fake', -} -DICT_FAKE_PARAMS = { - 'fake_str': 'fake', - 'fake_int': 1234, - 'fake_bool': False, - 'fake_dict': {'fake_key': 'fake'}, - 'fake_list': [{'fake_elem_key': 'fake'}, - {'fake_elem_key': 'fake'}], - 'fake_complex_list': [ - {'fake_elem_key': [{'fake_sub_elem_key': 'fake'}, - {'fake_sub_elem_key': 'fake'}], - 'fake_elem_key_other': 'fake'}, - {'fake_elem_key': [{'fake_sub_elem_key': 'fake'}, - {'fake_sub_elem_key': 'fake'}], - 'fake_elem_key_other': 'fake'}], -} diff --git a/ec2api/tests/unit/matchers.py b/ec2api/tests/unit/matchers.py deleted file mode 100644 index 6944ab8e..00000000 --- a/ec2api/tests/unit/matchers.py +++ /dev/null @@ -1,564 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Matcher classes to be used inside of the testtools assertThat framework.""" - -import copy -import pprint - -from lxml import etree -from testtools import content - - -class DictKeysMismatch(object): - - def __init__(self, d1only, d2only): - self.d1only = d1only - self.d2only = d2only - - def describe(self): - return ('Keys in d1 and not d2: %(d1only)s.' - ' Keys in d2 and not d1: %(d2only)s' % - {'d1only': self.d1only, 'd2only': self.d2only}) - - def get_details(self): - return {} - - -class ValueMismatch(object): - - def __init__(self, v1, v2): - self.v1 = v1 - self.v2 = v2 - - def describe(self): - return ("Values do not match. v1: %(v1)s v2: %(v2)s" % - {'v1': self.v1, 'v2': self.v2}) - - def get_details(self): - return {} - - -class DictMismatch(object): - - def __init__(self, key, d1_value, d2_value): - self.key = key - self.d1_value = d1_value - self.d2_value = d2_value - - def describe(self): - return ("Dictionaries do not match at %(key)s." - " d1: %(d1_value)s d2: %(d2_value)s" % - {'key': self.key, 'd1_value': self.d1_value, - 'd2_value': self.d2_value}) - - def get_details(self): - return {} - - -class ValueMatches(object): - - def __init__(self, v1, approx_equal=False, tolerance=0.001, - orderless_lists=False): - self.v1 = v1 - self.approx_equal = approx_equal - self.tolerance = tolerance - self.orderless_lists = orderless_lists - - def __str__(self): - return 'ValueMatches(%s)' % (pprint.pformat(self.v1)) - - # Useful assertions - def match(self, v2): - """Assert two values are equivalent. - - NOTE: - - If you don't care (or don't know) a given value, you can specify - the string DONTCARE as the value. This will cause that item - to be skipped. - - """ - - try: - error = abs(float(self.v1) - float(v2)) - within_tolerance = error <= self.tolerance - except (ValueError, TypeError): - # If both values aren't convertible to float, just ignore - # ValueError if arg is a str, TypeError if it's something else - # (like None) - within_tolerance = False - - if hasattr(self.v1, 'keys') and hasattr(v2, 'keys'): - matcher = DictMatches(self.v1, self.approx_equal, self.tolerance, - self.orderless_lists) - did_match = matcher.match(v2) - if did_match is not None: - return did_match - elif isinstance(self.v1, list) and isinstance(v2, list): - matcher = ListMatches(self.v1, self.approx_equal, self.tolerance, - self.orderless_lists) - did_match = matcher.match(v2) - if did_match is not None: - return did_match - elif 'DONTCARE' in (self.v1, v2): - return - elif self.approx_equal and within_tolerance: - return - elif self.v1 != v2: - return ValueMismatch(self.v1, v2) - - -class DictMatches(object): - - def __init__(self, d1, approx_equal=False, tolerance=0.001, - orderless_lists=False): - self.d1 = d1 - self.approx_equal = approx_equal - self.tolerance = tolerance - self.orderless_lists = orderless_lists - - def __str__(self): - return 'DictMatches(%s)' % (pprint.pformat(self.d1)) - - # Useful assertions - def match(self, d2): - """Assert two dicts are equivalent. - - This is a 'deep' match in the sense that it handles nested - dictionaries appropriately. - - NOTE: - - If you don't care (or don't know) a given value, you can specify - the string DONTCARE as the value. This will cause that dict-item - to be skipped. - - """ - - d1keys = set(self.d1.keys()) - d2keys = set(d2.keys()) - if d1keys != d2keys: - d1only = d1keys - d2keys - d2only = d2keys - d1keys - return DictKeysMismatch(d1only, d2only) - - for key in d1keys: - d1value = self.d1[key] - d2value = d2[key] - matcher = ValueMatches(d1value, self.approx_equal, self.tolerance, - self.orderless_lists) - did_match = matcher.match(d2value) - if did_match is not None: - return did_match - - -class ListMismatch(object): - - def __init__(self, l1, l2): - self.l1 = l1 - self.l2 = l2 - - def describe(self): - return ('Lists mismatch: L1=%(l1)s != L2=%(l2)s' % - {'l1': self.l1, 'l2': self.l2}) - - def get_details(self): - return {} - - -class ListLengthMismatch(object): - - def __init__(self, l1, l2): - self.l1 = l1 - self.l2 = l2 - - def describe(self): - return ('Lists lengths mismatch: L1=%(l1)s != L2=%(l2)s' % - {'l1': self.l1, 'l2': self.l2}) - - def get_details(self): - return {} - - -class ListMatches(object): - - def __init__(self, l1, approx_equal=False, tolerance=0.001, - orderless_lists=False): - self.l1 = l1 - self.approx_equal = approx_equal - self.tolerance = tolerance - self.orderless_lists = orderless_lists - - def __str__(self): - return 'ListMatches(%s)' % (pprint.pformat(self.l1)) - - # Useful assertions - def match(self, l2): - """Assert a list of dicts are equivalent.""" - - l1count = len(self.l1) - l2count = len(l2) - if l1count != l2count: - return ListLengthMismatch(self.l1, l2) - l2 = copy.deepcopy(l2) - for v1 in self.l1: - for v2 in l2: - matcher = ValueMatches(v1, self.approx_equal, self.tolerance, - self.orderless_lists) - did_match = matcher.match(v2) - if did_match is None: - l2.remove(v2) - break - if not self.orderless_lists: - break - if did_match is not None: - return did_match - - -class SubDictMismatch(object): - - def __init__(self, - key=None, - sub_value=None, - super_value=None, - keys=False): - self.key = key - self.sub_value = sub_value - self.super_value = super_value - self.keys = keys - - def describe(self): - if self.keys: - return "Keys between dictionaries did not match" - else: - return("Dictionaries do not match at %s. d1: %s d2: %s" - % (self.key, - self.super_value, - self.sub_value)) - - def get_details(self): - return {} - - -class IsSubDictOf(object): - - def __init__(self, super_dict): - self.super_dict = super_dict - - def __str__(self): - return 'IsSubDictOf(%s)' % (self.super_dict) - - def match(self, sub_dict): - """Assert a sub_dict is subset of super_dict.""" - if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())): - return SubDictMismatch(keys=True) - for k, sub_value in sub_dict.items(): - super_value = self.super_dict[k] - if isinstance(sub_value, dict): - matcher = IsSubDictOf(super_value) - did_match = matcher.match(sub_value) - if did_match is not None: - return did_match - elif 'DONTCARE' in (sub_value, super_value): - continue - else: - if sub_value != super_value: - return SubDictMismatch(k, sub_value, super_value) - - -class FunctionCallMatcher(object): - - def __init__(self, expected_func_calls): - self.expected_func_calls = expected_func_calls - self.actual_func_calls = [] - - def call(self, *args, **kwargs): - func_call = {'args': args, 'kwargs': kwargs} - self.actual_func_calls.append(func_call) - - def match(self): - dict_list_matcher = ListMatches(self.expected_func_calls) - return dict_list_matcher.match(self.actual_func_calls) - - -class XMLMismatch(object): - - """Superclass for XML mismatch.""" - - def __init__(self, state): - self.path = str(state) - self.expected = state.expected - self.actual = state.actual - - def describe(self): - return "%(path)s: XML does not match" % {'path': self.path} - - def get_details(self): - return { - 'expected': content.text_content(self.expected), - 'actual': content.text_content(self.actual), - } - - -class XMLTagMismatch(XMLMismatch): - - """XML tags don't match.""" - - def __init__(self, state, idx, expected_tag, actual_tag): - super(XMLTagMismatch, self).__init__(state) - self.idx = idx - self.expected_tag = expected_tag - self.actual_tag = actual_tag - - def describe(self): - return ("%(path)s: XML tag mismatch at index %(idx)d: " - "expected tag <%(expected_tag)s>; " - "actual tag <%(actual_tag)s>" % - {'path': self.path, 'idx': self.idx, - 'expected_tag': self.expected_tag, - 'actual_tag': self.actual_tag}) - - -class XMLAttrKeysMismatch(XMLMismatch): - - """XML attribute keys don't match.""" - - def __init__(self, state, expected_only, actual_only): - super(XMLAttrKeysMismatch, self).__init__(state) - self.expected_only = ', '.join(sorted(expected_only)) - self.actual_only = ', '.join(sorted(actual_only)) - - def describe(self): - return ("%(path)s: XML attributes mismatch: " - "keys only in expected: %(expected_only)s; " - "keys only in actual: %(actual_only)s" % - {'path': self.path, 'expected_only': self.expected_only, - 'actual_only': self.actual_only}) - - -class XMLAttrValueMismatch(XMLMismatch): - - """XML attribute values don't match.""" - - def __init__(self, state, key, expected_value, actual_value): - super(XMLAttrValueMismatch, self).__init__(state) - self.key = key - self.expected_value = expected_value - self.actual_value = actual_value - - def describe(self): - return ("%(path)s: XML attribute value mismatch: " - "expected value of attribute %(key)s: %(expected_value)r; " - "actual value: %(actual_value)r" % - {'path': self.path, 'key': self.key, - 'expected_value': self.expected_value, - 'actual_value': self.actual_value}) - - -class XMLTextValueMismatch(XMLMismatch): - - """XML text values don't match.""" - - def __init__(self, state, expected_text, actual_text): - super(XMLTextValueMismatch, self).__init__(state) - self.expected_text = expected_text - self.actual_text = actual_text - - def describe(self): - return ("%(path)s: XML text value mismatch: " - "expected text value: %(expected_text)r; " - "actual value: %(actual_text)r" % - {'path': self.path, 'expected_text': self.expected_text, - 'actual_text': self.actual_text}) - - -class XMLUnexpectedChild(XMLMismatch): - - """Unexpected child present in XML.""" - - def __init__(self, state, tag, idx): - super(XMLUnexpectedChild, self).__init__(state) - self.tag = tag - self.idx = idx - - def describe(self): - return ("%(path)s: XML unexpected child element <%(tag)s> " - "present at index %(idx)d" % - {'path': self.path, 'tag': self.tag, 'idx': self.idx}) - - -class XMLExpectedChild(XMLMismatch): - - """Expected child not present in XML.""" - - def __init__(self, state, tag, idx): - super(XMLExpectedChild, self).__init__(state) - self.tag = tag - self.idx = idx - - def describe(self): - return ("%(path)s: XML expected child element <%(tag)s> " - "not present at index %(idx)d" % - {'path': self.path, 'tag': self.tag, 'idx': self.idx}) - - -class XMLMatchState(object): - - """Maintain some state for matching. - - Tracks the XML node path and saves the expected and actual full - XML text, for use by the XMLMismatch subclasses. - """ - - def __init__(self, expected, actual): - self.path = [] - self.expected = expected - self.actual = actual - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, exc_tb): - self.path.pop() - return False - - def __str__(self): - return '/' + '/'.join(self.path) - - def node(self, tag, idx): - """Adds tag and index to the path; they will be popped off - - when the corresponding 'with' statement exits. - - :param tag: The element tag - :param idx: If not None, the integer index of the element - within its parent. Not included in the path - element if None. - """ - - if idx is not None: - self.path.append("%s[%d]" % (tag, idx)) - else: - self.path.append(tag) - return self - - -class XMLMatches(object): - - """Compare XML strings. More complete than string comparison.""" - - def __init__(self, expected, orderless_sequence=False): - self.expected_xml = expected - self.orderless_sequence = orderless_sequence - self.expected = etree.fromstring(expected) - if self.orderless_sequence: - self._sort_xml(self.expected) - - def __str__(self): - return 'XMLMatches(%r)' % self.expected_xml - - def _sort_xml(self, xml): - for parent in xml.xpath('//*[./*]'): - parent[:] = sorted(parent, key=lambda x: x.tag) - - def match(self, actual_xml): - actual = etree.fromstring(actual_xml) - if self.orderless_sequence: - self._sort_xml(actual) - - state = XMLMatchState(self.expected_xml, actual_xml) - result = self._compare_node(self.expected, actual, state, None) - - if result is False: - return XMLMismatch(state) - elif result is not True: - return result - - def _compare_node(self, expected, actual, state, idx): - """Recursively compares nodes within the XML tree.""" - - # Start by comparing the tags - if expected.tag != actual.tag: - return XMLTagMismatch(state, idx, expected.tag, actual.tag) - - with state.node(expected.tag, idx): - # Compare the attribute keys - expected_attrs = set(expected.attrib.keys()) - actual_attrs = set(actual.attrib.keys()) - if expected_attrs != actual_attrs: - expected_only = expected_attrs - actual_attrs - actual_only = actual_attrs - expected_attrs - return XMLAttrKeysMismatch(state, expected_only, actual_only) - - # Compare the attribute values - for key in expected_attrs: - expected_value = expected.attrib[key] - actual_value = actual.attrib[key] - - if 'DONTCARE' in (expected_value, actual_value): - continue - elif expected_value != actual_value: - return XMLAttrValueMismatch(state, key, expected_value, - actual_value) - - # Compare the contents of the node - if len(expected) == 0 and len(actual) == 0: - # No children, compare text values - if ('DONTCARE' not in (expected.text, actual.text) and - expected.text != actual.text): - return XMLTextValueMismatch(state, expected.text, - actual.text) - else: - expected_idx = 0 - actual_idx = 0 - while (expected_idx < len(expected) and - actual_idx < len(actual)): - # Ignore comments and processing instructions - # TODO(Vek): may interpret PIs in the future, to - # allow for, say, arbitrary ordering of some - # elements - if (expected[expected_idx].tag in - (etree.Comment, etree.ProcessingInstruction)): - expected_idx += 1 - continue - - # Compare the nodes - result = self._compare_node(expected[expected_idx], - actual[actual_idx], state, - actual_idx) - if result is not True: - return result - - # Step on to comparing the next nodes... - expected_idx += 1 - actual_idx += 1 - - # Make sure we consumed all nodes in actual - if actual_idx < len(actual): - return XMLUnexpectedChild(state, actual[actual_idx].tag, - actual_idx) - - # Make sure we consumed all nodes in expected - if expected_idx < len(expected): - for node in expected[expected_idx:]: - if (node.tag in - (etree.Comment, etree.ProcessingInstruction)): - continue - - return XMLExpectedChild(state, node.tag, actual_idx) - - # The nodes match - return True diff --git a/ec2api/tests/unit/rel.tar.gz b/ec2api/tests/unit/rel.tar.gz deleted file mode 100644 index b54f55aa790e66950b09ff69033d0e66d2c0c839..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 165 zcmb2|=3vlSeZ!A|`Rye~u0sX_tq<4Oy82y6$uhF_k1YvTGF)`;UYVlDMo|@oNB;$9 z9A)|RAk|~%ol~kim(BE-?9cN!Idj&ZC9faZEbabRX(u5VI@PRe+p*L8rY7X9nU!?( zXUMOdwVF?NRldLb&z*PuZ~HGh?@1RQT$Q-^|Asg5%ikx}?&Wy>Z_j_jrFXX*Ga!Q> PXJo%RS=%sZFfafBwk1wR diff --git a/ec2api/tests/unit/test_address.py b/ec2api/tests/unit/test_address.py deleted file mode 100644 index c3286d43..00000000 --- a/ec2api/tests/unit/test_address.py +++ /dev/null @@ -1,679 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutronclient.common import exceptions as neutron_exception -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class AddressTestCase(base.ApiTestCase): - - def setUp(self): - super(AddressTestCase, self).setUp() - - def test_allocate_ec2_classic_address(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - - resp = self.execute('AllocateAddress', {}) - self.assertEqual(fakes.IP_ADDRESS_1, resp['publicIp']) - self.assertEqual('standard', resp['domain']) - self.assertNotIn('allocationId', resp) - self.assertEqual(0, self.db_api.add_item.call_count) - self.neutron.create_floatingip.assert_called_once_with( - {'floatingip': { - 'floating_network_id': - fakes.ID_OS_PUBLIC_NETWORK}}) - - def test_allocate_vpc_address(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - self.db_api.add_item.return_value = fakes.DB_ADDRESS_1 - - resp = self.execute('AllocateAddress', {'Domain': 'vpc'}) - - self.assertEqual(fakes.IP_ADDRESS_1, resp['publicIp']) - self.assertEqual('vpc', resp['domain']) - self.assertEqual(fakes.ID_EC2_ADDRESS_1, - resp['allocationId']) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'eipalloc', - tools.purge_dict(fakes.DB_ADDRESS_1, - ('id', 'vpc_id'))) - self.neutron.create_floatingip.assert_called_once_with( - {'floatingip': { - 'floating_network_id': - fakes.ID_OS_PUBLIC_NETWORK}}) - self.neutron.list_networks.assert_called_once_with( - **{'router:external': True, - 'name': fakes.NAME_OS_PUBLIC_NETWORK}) - self.db_api.reset_mock() - self.neutron.create_floatingip.reset_mock() - self.neutron.list_networks.reset_mock() - - self.configure(disable_ec2_classic=True) - resp = self.execute('AllocateAddress', {}) - - self.assertEqual(fakes.IP_ADDRESS_1, resp['publicIp']) - self.assertEqual('vpc', resp['domain']) - self.assertEqual(fakes.ID_EC2_ADDRESS_1, - resp['allocationId']) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'eipalloc', - tools.purge_dict(fakes.DB_ADDRESS_1, - ('id', 'vpc_id'))) - self.neutron.create_floatingip.assert_called_once_with( - {'floatingip': { - 'floating_network_id': - fakes.ID_OS_PUBLIC_NETWORK}}) - self.neutron.list_networks.assert_called_once_with( - **{'router:external': True, - 'name': fakes.NAME_OS_PUBLIC_NETWORK}) - - def test_allocate_address_invalid_parameters(self): - self.assert_execution_error('InvalidParameterValue', 'AllocateAddress', - {'Domain': 'fake_domain'}) - self.assertEqual(0, self.db_api.add_item.call_count) - self.assertEqual(0, self.neutron.create_floatingip.call_count) - - def test_allocate_address_overlimit(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_floatingip.side_effect = ( - neutron_exception.OverQuotaClient()) - self.assert_execution_error('AddressLimitExceeded', 'AllocateAddress', - {'Domain': 'vpc'}) - self.assert_execution_error('AddressLimitExceeded', 'AllocateAddress', - {}) - - @tools.screen_unexpected_exception_logs - def test_allocate_address_vpc_rollback(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - self.db_api.add_item.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'AllocateAddress', - {'Domain': 'vpc'}) - - self.neutron.delete_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_1) - - # TODO(andrey-mp): api code has to be fixed - # There is no add-floating-ip and remove-floating-ip command in - # python-novaclient. Those command have been removed since 7.0.0 - # version (ocata) and ec2-api has version >9.1.0 since long. - @base.skip_not_implemented - def test_associate_address_ec2_classic(self): - self.set_mock_db_items(fakes.DB_INSTANCE_1) - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - self.nova.servers.add_floating_ip.return_value = True - - resp = self.execute('AssociateAddress', - {'PublicIp': fakes.IP_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - self.assertEqual(True, resp['return']) - - self.nova.servers.add_floating_ip.assert_called_once_with( - fakes.ID_OS_INSTANCE_1, - fakes.IP_ADDRESS_1) - - def test_associate_address_vpc(self): - - def do_check(params, fixed_ip): - resp = self.execute('AssociateAddress', params) - self.assertEqual(True, resp['return']) - self.assertEqual(fakes.ID_EC2_ASSOCIATION_1, resp['associationId']) - - self.neutron.update_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_1, - {'floatingip': {'port_id': fakes.ID_OS_PORT_2, - 'fixed_ip_address': fixed_ip}}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.update_dict( - fakes.DB_ADDRESS_1, - {'network_interface_id': - fakes.ID_EC2_NETWORK_INTERFACE_2, - 'private_ip_address': fixed_ip})) - - self.neutron.update_floatingip.reset_mock() - self.db_api.update_item.reset_mock() - - self.set_mock_db_items( - fakes.DB_ADDRESS_1, fakes.DB_IGW_1, fakes.DB_IGW_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - fakes.IP_NETWORK_INTERFACE_2) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2}, - fakes.IP_NETWORK_INTERFACE_2) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_2_EXT_1}, - fakes.IP_NETWORK_INTERFACE_2_EXT_1) - - assigned_db_address_1 = tools.update_dict( - fakes.DB_ADDRESS_1, - {'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'private_ip_address': fakes.IP_NETWORK_INTERFACE_1}) - self.add_mock_db_items(assigned_db_address_1) - assigned_floating_ip_1 = tools.update_dict( - fakes.OS_FLOATING_IP_1, - {'fixed_port_id': fakes.ID_OS_PORT_1, - 'fixed_ip_address': fakes.IP_NETWORK_INTERFACE_1}) - self.neutron.show_floatingip.return_value = ( - {'floatingip': assigned_floating_ip_1}) - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1, - 'AllowReassociation': 'True'}, - fakes.IP_NETWORK_INTERFACE_2) - - self.configure(disable_ec2_classic=True) - self.set_mock_db_items( - fakes.DB_VPC_DEFAULT, fakes.DB_ADDRESS_1, fakes.DB_IGW_1, - fakes.DB_NETWORK_INTERFACE_2) - do_check({'PublicIp': fakes.IP_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - fakes.IP_NETWORK_INTERFACE_2) - - def test_associate_address_vpc_idempotent(self): - - def do_check(params): - resp = self.execute('AssociateAddress', params) - self.assertEqual(True, resp['return']) - self.assertEqual(fakes.ID_EC2_ASSOCIATION_2, resp['associationId']) - - self.set_mock_db_items(fakes.DB_ADDRESS_2, - fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_NETWORK_INTERFACE_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_2, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_2, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2}) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_2, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_2}) - - def test_associate_address_invalid_main_parameters(self): - - def do_check(params, error): - self.assert_execution_error(error, 'AssociateAddress', params) - - do_check({}, - 'MissingParameter') - - do_check({'PublicIp': '0.0.0.0', - 'AllocationId': 'eipalloc-0'}, - 'InvalidParameterCombination') - - do_check({'PublicIp': '0.0.0.0'}, - 'MissingParameter') - - do_check({'AllocationId': 'eipalloc-0'}, - 'MissingParameter') - - def test_associate_address_invalid_ec2_classic_parameters(self): - # NOTE(ft): ec2 classic instance vs allocation_id parameter - self.set_mock_db_items(fakes.DB_INSTANCE_2) - self.assert_execution_error('InvalidParameterCombination', - 'AssociateAddress', - {'AllocationId': 'eipalloc-0', - 'InstanceId': fakes.ID_EC2_INSTANCE_2}) - - # NOTE(ft): ec2 classic instance vs not existing public IP - self.neutron.list_floatingips.return_value = {'floatingips': []} - self.assert_execution_error('AuthFailure', 'AssociateAddress', - {'PublicIp': fakes.IP_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_2}) - - # NOTE(ft): ec2 classic instance vs vpc public ip - self.add_mock_db_items(fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - self.assert_execution_error('AuthFailure', 'AssociateAddress', - {'PublicIp': fakes.IP_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_2}) - - def test_associate_address_invalid_vpc_parameters(self): - - def do_check(params, error): - self.assert_execution_error(error, 'AssociateAddress', params) - - # NOTE(ft): not registered instance id vs vpc address - self.set_mock_db_items() - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - 'InvalidInstanceID.NotFound') - - # NOTE(ft): vpc instance vs public ip parmeter - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_2) - do_check({'PublicIp': '0.0.0.0', - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - 'InvalidParameterCombination') - - # NOTE(ft): vpc instance vs not registered vpc address - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - 'InvalidAllocationID.NotFound') - - # NOTE(ft): not registered network interface id vs vpc address - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}, - 'InvalidNetworkInterfaceID.NotFound') - - # NOTE(ft): vpc instance vs broken vpc address - self.set_mock_db_items(fakes.DB_ADDRESS_1, - fakes.DB_NETWORK_INTERFACE_2) - self.neutron.show_floatingip.side_effect = neutron_exception.NotFound - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - 'InvalidAllocationID.NotFound') - self.neutron.show_floatingip.side_effect = None - - # NOTE(ft): already associated address vs network interface - self.set_mock_db_items(fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2, - fakes.DB_NETWORK_INTERFACE_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - self.assert_execution_error( - 'Resource.AlreadyAssociated', 'AssociateAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_2, - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}) - - # NOTE(ft): already associated address vs vpc instance - self.set_mock_db_items( - fakes.DB_ADDRESS_2, - fakes.gen_db_network_interface( - fakes.ID_EC2_NETWORK_INTERFACE_1, - fakes.ID_OS_PORT_1, - fakes.ID_EC2_VPC_1, - fakes.ID_EC2_SUBNET_1, - fakes.IP_NETWORK_INTERFACE_1, - instance_id=fakes.ID_EC2_INSTANCE_1)) - self.assert_execution_error('Resource.AlreadyAssociated', - 'AssociateAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_2, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - - # NOTE(ft): multiple network interfaces in vpc instance - # w/o network interface selection - self.add_mock_db_items(fakes.DB_NETWORK_INTERFACE_2) - self.assert_execution_error('InvalidInstanceID', 'AssociateAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - - # NOTE(ft): internet gateway isn't attached to the vpc - self.set_mock_db_items(fakes.DB_ADDRESS_1, - fakes.DB_NETWORK_INTERFACE_2) - self.assert_execution_error('Gateway.NotAttached', 'AssociateAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - - # NOTE(tikitavi): associate to wrong public ip - self.configure(disable_ec2_classic=True) - self.set_mock_db_items( - fakes.DB_VPC_DEFAULT, fakes.DB_IGW_DEFAULT, fakes.DB_ADDRESS_1, - fakes.DB_INSTANCE_DEFAULT, tools.update_dict( - fakes.DB_NETWORK_INTERFACE_DEFAULT, - {'instance_id': fakes.ID_EC2_INSTANCE_DEFAULT})) - do_check({'PublicIp': '0.0.0.0', - 'InstanceId': fakes.ID_EC2_INSTANCE_DEFAULT}, - 'AuthFailure') - - @tools.screen_unexpected_exception_logs - def test_associate_address_vpc_rollback(self): - self.set_mock_db_items(fakes.DB_ADDRESS_1, fakes.DB_IGW_1, - fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_NETWORK_INTERFACE_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - self.neutron.update_floatingip.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'AssociateAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_ADDRESS_1) - - # TODO(andrey-mp): api code has to be fixed - # There is no add-floating-ip and remove-floating-ip command in - # python-novaclient. Those command have been removed since 7.0.0 - # version (ocata) and ec2-api has version >9.1.0 since long. - @base.skip_not_implemented - def test_dissassociate_address_ec2_classic(self): - self.set_mock_db_items(fakes.DB_INSTANCE_1) - self.nova.servers.remove_floating_ip.return_value = True - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, - fakes.OS_PORT_2]}) - - resp = self.execute('DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_2}) - self.assertEqual(True, resp['return']) - self.nova.servers.remove_floating_ip.assert_called_once_with( - fakes.ID_OS_INSTANCE_1, - fakes.IP_ADDRESS_2) - - # NOTE(Alex) Disassociate unassociated address in EC2 classic - resp = self.execute('DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_1}) - self.assertEqual(True, resp['return']) - self.assertEqual(1, self.nova.servers.remove_floating_ip.call_count) - - def test_dissassociate_address_vpc(self): - self.set_mock_db_items(fakes.DB_ADDRESS_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - - resp = self.execute('DisassociateAddress', - {'AssociationId': fakes.ID_EC2_ASSOCIATION_2}) - self.assertEqual(True, resp['return']) - - self.neutron.update_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_2, - {'floatingip': {'port_id': None}}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.purge_dict(fakes.DB_ADDRESS_2, ['network_interface_id', - 'private_ip_address'])) - self.neutron.update_floatingip.reset_mock() - self.db_api.update_item.reset_mock() - - self.configure(disable_ec2_classic=True) - - resp = self.execute('DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_2}) - self.assertEqual(True, resp['return']) - - self.neutron.update_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_2, - {'floatingip': {'port_id': None}}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.purge_dict(fakes.DB_ADDRESS_2, ['network_interface_id', - 'private_ip_address'])) - - def test_dissassociate_address_vpc_idempotent(self): - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - - resp = self.execute('DisassociateAddress', - {'AssociationId': fakes.ID_EC2_ASSOCIATION_1}) - self.assertEqual(True, resp['return']) - - self.assertEqual(0, self.neutron.update_floatingip.call_count) - self.assertEqual(0, self.db_api.update_item.call_count) - - def test_disassociate_address_invalid_parameters(self): - - def do_check(params, error): - self.assert_execution_error(error, 'DisassociateAddress', params) - - do_check({}, - 'MissingParameter') - - do_check({'PublicIp': '0.0.0.0', - 'AssociationId': 'eipassoc-0'}, - 'InvalidParameterCombination') - - # NOTE(ft): EC2 Classic public IP does not exists - self.set_mock_db_items() - self.neutron.list_floatingips.return_value = {'floatingips': []} - - self.assert_execution_error('AuthFailure', 'DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_2}) - - # NOTE(ft): vpc address vs public ip parameter - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - do_check({'PublicIp': fakes.IP_ADDRESS_1}, - 'InvalidParameterValue') - - # NOTE(ft): not registered address - self.set_mock_db_items() - do_check({'AssociationId': fakes.ID_EC2_ASSOCIATION_1}, - 'InvalidAssociationID.NotFound') - - # NOTE(ft): registered broken vpc address - self.set_mock_db_items(fakes.DB_ADDRESS_2) - self.neutron.show_floatingip.side_effect = neutron_exception.NotFound - do_check({'AssociationId': fakes.ID_EC2_ASSOCIATION_2}, - 'InvalidAssociationID.NotFound') - - # NOTE(tikitavi): disassociate to wrong public ip - self.configure(disable_ec2_classic=True) - self.set_mock_db_items() - self.assert_execution_error('AuthFailure', 'DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_2}) - - # NOTE(tikitavi): disassociate to unassociated ip - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.assert_execution_error('InvalidParameterValue', - 'DisassociateAddress', - {'PublicIp': fakes.IP_ADDRESS_1}) - - @tools.screen_unexpected_exception_logs - def test_dissassociate_address_vpc_rollback(self): - self.set_mock_db_items(fakes.DB_ADDRESS_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - self.neutron.update_floatingip.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DisassociateAddress', - {'AssociationId': fakes.ID_EC2_ASSOCIATION_2}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_ADDRESS_2) - - def test_release_address_ec2_classic(self): - self.set_mock_db_items() - self.neutron.delete_floatingip.return_value = True - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - - resp = self.execute('ReleaseAddress', - {'PublicIp': fakes.IP_ADDRESS_1}) - self.assertEqual(True, resp['return']) - - self.neutron.delete_floatingip.assert_called_once_with( - fakes.OS_FLOATING_IP_1['id']) - - def test_release_address_vpc(self): - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - - resp = self.execute('ReleaseAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_1}) - self.assertEqual(True, resp['return']) - - self.neutron.delete_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_1) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_ADDRESS_1) - - @mock.patch('ec2api.api.address.AddressEngineNeutron.disassociate_address') - def test_release_address_default_vpc(self, disassociate_address): - self.configure(disable_ec2_classic=True) - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_ADDRESS_DEFAULT, - fakes.DB_NETWORK_INTERFACE_DEFAULT) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - - resp = self.execute('ReleaseAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_DEFAULT}) - self.assertEqual(True, resp['return']) - - disassociate_address.assert_called_once_with( - mock.ANY, association_id=fakes.ID_EC2_ASSOCIATION_DEFAULT) - self.neutron.delete_floatingip.assert_called_once_with( - fakes.ID_OS_FLOATING_IP_2) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_ADDRESS_DEFAULT) - - def test_release_address_invalid_parameters(self): - - def do_check(params, error): - self.assert_execution_error(error, 'ReleaseAddress', params) - - do_check({}, - 'MissingParameter') - - do_check({'PublicIp': '0.0.0.0', - 'AllocationId': 'eipalloc-0'}, - 'InvalidParameterCombination') - - # NOTE(ft): EC2 Classic public IP is not found - self.neutron.list_floatingips.return_value = {'floatingips': []} - do_check({'PublicIp': fakes.IP_ADDRESS_1}, - 'AuthFailure') - - # NOTE(ft): vpc address vs public ip parameter - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - do_check({'PublicIp': fakes.IP_ADDRESS_1}, - 'InvalidParameterValue') - - # NOTE(ft): not registered address - self.set_mock_db_items() - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1}, - 'InvalidAllocationID.NotFound') - - # NOTE(ft): registered broken vpc address - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.side_effect = neutron_exception.NotFound - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_1}, - 'InvalidAllocationID.NotFound') - self.neutron.show_floatingip.side_effect = None - - # NOTE(ft): address is in use - self.set_mock_db_items(fakes.DB_ADDRESS_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_2}, - 'InvalidIPAddress.InUse') - - # NOTE(tikitavi): address is in use in not default vpc - self.configure(disable_ec2_classic=True) - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_VPC_1, - fakes.DB_ADDRESS_2, - fakes.DB_NETWORK_INTERFACE_2) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_2}) - - do_check({'AllocationId': fakes.ID_EC2_ADDRESS_2}, - 'InvalidIPAddress.InUse') - - @tools.screen_unexpected_exception_logs - def test_release_address_vpc_rollback(self): - self.set_mock_db_items(fakes.DB_ADDRESS_1) - self.neutron.show_floatingip.return_value = ( - {'floatingip': fakes.OS_FLOATING_IP_1}) - self.neutron.delete_floatingip.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'ReleaseAddress', - {'AllocationId': fakes.ID_EC2_ADDRESS_1}) - - self.db_api.restore_item.assert_called_once_with( - mock.ANY, 'eipalloc', fakes.DB_ADDRESS_1) - - def test_describe_addresses_vpc(self): - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, - fakes.OS_PORT_2]}) - self.set_mock_db_items( - fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2, fakes.DB_INSTANCE_1, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - - resp = self.execute('DescribeAddresses', {}) - self.assertThat(resp['addressesSet'], - matchers.ListMatches([fakes.EC2_ADDRESS_1, - fakes.EC2_ADDRESS_2])) - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_ADDRESS_1]) - resp = self.execute('DescribeAddresses', - {'AllocationId.1': fakes.ID_EC2_ADDRESS_1}) - self.assertThat(resp['addressesSet'], - matchers.ListMatches([fakes.EC2_ADDRESS_1])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_ADDRESS_1])) - - self.check_filtering( - 'DescribeAddresses', 'addressesSet', - [('allocation-id', fakes.ID_EC2_ADDRESS_1), - ('association-id', fakes.ID_EC2_ASSOCIATION_2), - ('domain', 'vpc'), - ('instance-id', fakes.ID_EC2_INSTANCE_1), - ('network-interface-id', fakes.ID_EC2_NETWORK_INTERFACE_2), - ('network-interface-owner-id', fakes.ID_OS_PROJECT), - ('private-ip-address', fakes.IP_NETWORK_INTERFACE_2), - ('public-ip', fakes.IP_ADDRESS_2)]) - - def test_describe_addresses_ec2_classic(self): - self.set_mock_db_items(fakes.DB_INSTANCE_1) - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, - fakes.OS_PORT_2]}) - resp = self.execute('DescribeAddresses', {}) - self.assertThat(resp['addressesSet'], - matchers.ListMatches([fakes.EC2_ADDRESS_CLASSIC_1, - fakes.EC2_ADDRESS_CLASSIC_2])) - resp = self.execute('DescribeAddresses', {'PublicIp.1': - fakes.IP_ADDRESS_2}) - self.assertThat(resp['addressesSet'], - matchers.ListMatches([fakes.EC2_ADDRESS_CLASSIC_2])) diff --git a/ec2api/tests/unit/test_api_init.py b/ec2api/tests/unit/test_api_init.py deleted file mode 100644 index 61e74a59..00000000 --- a/ec2api/tests/unit/test_api_init.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from botocore import exceptions as botocore_exceptions -from unittest import mock - -from cinderclient import exceptions as cinder_exception -from glanceclient.common import exceptions as glance_exception -from keystoneclient import exceptions as keystone_exception -from neutronclient.common import exceptions as neutron_exception -from novaclient import exceptions as nova_exception -from oslo_context import context - -from ec2api import api -from ec2api.api import apirequest -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes_request_response as fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools -from ec2api import wsgi - - -class ApiInitTestCase(base.BaseTestCase): - - def setUp(self): - super(ApiInitTestCase, self).setUp() - self.controller = self.mock( - 'ec2api.api.cloud.VpcCloudController').return_value - self.fake_context = mock.NonCallableMock( - request_id=context.generate_request_id()) - - ec2_request = apirequest.APIRequest('FakeAction', 'fake_v1', - {'Param': 'fake_param'}) - self.environ = {'REQUEST_METHOD': 'FAKE', - 'ec2.request': ec2_request, - 'ec2api.context': self.fake_context} - self.request = wsgi.Request(self.environ) - self.application = api.Executor() - - def test_execute(self): - self.controller.fake_action.return_value = {'fakeTag': 'fake_data'} - - res = self.request.send(self.application) - - self.assertEqual(200, res.status_code) - self.assertEqual('text/xml', res.content_type) - expected_xml = fakes.XML_RESULT_TEMPLATE % { - 'action': 'FakeAction', - 'api_version': 'fake_v1', - 'request_id': self.fake_context.request_id, - 'data': 'fake_data'} - self.assertThat(res.body.decode("utf-8"), - matchers.XMLMatches(expected_xml)) - self.controller.fake_action.assert_called_once_with(self.fake_context, - param='fake_param') - - def test_execute_error(self): - @tools.screen_all_logs - def do_check(ex, status, code, message): - self.controller.reset_mock() - self.controller.fake_action.side_effect = ex - - res = self.request.send(self.application) - - self.assertEqual(status, res.status_code) - self.assertEqual('text/xml', res.content_type) - expected_xml = fakes.XML_ERROR_TEMPLATE % { - 'code': code, - 'message': message, - 'request_id': self.fake_context.request_id} - self.assertThat(res.body.decode("utf-8"), - matchers.XMLMatches(expected_xml)) - self.controller.fake_action.assert_called_once_with( - self.fake_context, param='fake_param') - - do_check(exception.EC2Exception('fake_msg'), - 400, 'EC2Exception', 'fake_msg') - do_check(KeyError('fake_msg'), - 500, 'KeyError', 'Unknown error occurred.') - do_check(exception.InvalidVpcIDNotFound('fake_msg'), - 400, 'InvalidVpcID.NotFound', 'fake_msg') - do_check(nova_exception.BadRequest(400, message='fake_msg'), - 400, 'BadRequest', 'fake_msg') - do_check(glance_exception.HTTPBadRequest(), - 400, 'HTTPBadRequest', 'HTTP HTTPBadRequest') - do_check(cinder_exception.BadRequest(400, message='fake_msg'), - 400, 'BadRequest', 'fake_msg') - do_check(neutron_exception.BadRequest(message='fake_msg'), - 400, 'BadRequest', 'fake_msg') - do_check(keystone_exception.BadRequest(message='fake_msg'), - 400, 'BadRequest', 'fake_msg (HTTP 400)') - do_check(botocore_exceptions.ClientError({'Error': - {'Code': '', 'Message': ''}, - 'Code': 'FakeCode', - 'Message': 'fake_msg'}, - 'register_image'), - 400, 'FakeCode', 'fake_msg') diff --git a/ec2api/tests/unit/test_apirequest.py b/ec2api/tests/unit/test_apirequest.py deleted file mode 100644 index 0d759d48..00000000 --- a/ec2api/tests/unit/test_apirequest.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from lxml import etree -from unittest import mock - -from oslo_context import context -from oslo_utils import timeutils - -from ec2api.api import apirequest -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes_request_response as fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class EC2RequesterTestCase(base.BaseTestCase): - - def setUp(self): - super(EC2RequesterTestCase, self).setUp() - self.controller = self.mock( - 'ec2api.api.cloud.VpcCloudController').return_value - self.fake_context = mock.NonCallableMock( - request_id=context.generate_request_id()) - - def test_invoke_returns_data(self): - self.controller.fake_action.return_value = fakes.DICT_FAKE_RESULT_DATA - - api_request = apirequest.APIRequest('FakeAction', 'fake_v1', - {'Param': 'fake'}) - result = api_request.invoke(self.fake_context) - - self._compare_aws_xml('FakeActionResponse', - 'http://ec2.amazonaws.com/doc/fake_v1/', - self.fake_context.request_id, - fakes.DICT_FAKE_RESULT_DATA, - result) - self.controller.fake_action.assert_called_once_with( - self.fake_context, param='fake') - - def test_invoke_returns_true(self): - self.controller.fake_action.return_value = True - - api_request = apirequest.APIRequest('FakeAction', 'fake_v1', - {'Param': 'fake'}) - result = api_request.invoke(self.fake_context) - - self._compare_aws_xml('FakeActionResponse', - 'http://ec2.amazonaws.com/doc/fake_v1/', - self.fake_context.request_id, - {'return': True}, - result) - self.controller.fake_action.assert_called_once_with( - self.fake_context, param='fake') - - def test_invoke_prepare_params(self): - api_request = apirequest.APIRequest('FakeAction', 'fake_v1', - fakes.DOTTED_FAKE_PARAMS) - api_request.invoke(self.fake_context) - - self.controller.fake_action.assert_called_once_with( - self.fake_context, **fakes.DICT_FAKE_PARAMS) - - def _compare_aws_xml(self, root_tag, xmlns, request_id, dict_data, - observed): - # NOTE(ft): we cann't use matchers.XMLMatches since it makes comparison - # based on the order of tags - xml = etree.fromstring(observed) - self.assertEqual(xmlns, xml.nsmap.get(None)) - observed_data = tools.parse_xml(observed) - expected = {root_tag: tools.update_dict( - dict_data, - {'requestId': request_id})} - self.assertThat(observed_data, matchers.DictMatches(expected)) - - def test_render_response_ascii(self): - req = apirequest.APIRequest("FakeAction", "FakeVersion", {}) - resp = { - 'string': 'foo', - 'int': 1, - } - data = req._render_response(resp, 'uuid').decode() - self.assertIn('ꀀabcd޴', data) - - # Tests for individual data element format functions - - def test_return_valid_isoformat(self): - """Ensure that the ec2 api returns datetime in xs:dateTime - - (which apparently isn't datetime.isoformat()) - NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 - """ - conv = apirequest._database_to_isoformat - # sqlite database representation with microseconds - time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276", - "%Y-%m-%d %H:%M:%S.%f") - self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z') - # mysqlite database representation - time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18", - "%Y-%m-%d %H:%M:%S") - self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z') - - def test_xmlns_version_matches_request_version(self): - self.controller.fake_action.return_value = {} - - api_request = apirequest.APIRequest('FakeAction', '2010-10-30', {}) - result = api_request.invoke(self.fake_context) - - self._compare_aws_xml('FakeActionResponse', - 'http://ec2.amazonaws.com/doc/2010-10-30/', - self.fake_context.request_id, - {}, - result) diff --git a/ec2api/tests/unit/test_availability_zone.py b/ec2api/tests/unit/test_availability_zone.py deleted file mode 100644 index 5b17aaa8..00000000 --- a/ec2api/tests/unit/test_availability_zone.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers - - -class AvailabilityZoneCase(base.ApiTestCase): - - def setUp(self): - super(AvailabilityZoneCase, self).setUp() - - def test_describe_availability_zones(self): - self.nova.availability_zones.list.return_value = [ - fakes.NovaAvailabilityZone(fakes.OS_AVAILABILITY_ZONE), - fakes.NovaAvailabilityZone(fakes.OS_AVAILABILITY_ZONE_INTERNAL)] - resp = self.execute('DescribeAvailabilityZones', {}) - self.assertThat(resp['availabilityZoneInfo'], - matchers.ListMatches([fakes.EC2_AVAILABILITY_ZONE])) - self.nova.availability_zones.list.assert_called_once_with( - detailed=False) - - self.check_filtering( - 'DescribeAvailabilityZones', 'availabilityZoneInfo', - [('state', 'available'), - ('zone-name', fakes.NAME_AVAILABILITY_ZONE)]) - - def test_describe_availability_zones_verbose(self): - self.nova.availability_zones.list.return_value = [ - fakes.NovaAvailabilityZone(fakes.OS_AVAILABILITY_ZONE), - fakes.NovaAvailabilityZone(fakes.OS_AVAILABILITY_ZONE_INTERNAL)] - resp = self.execute('DescribeAvailabilityZones', - {'zoneName.1': 'verbose'}) - self.assertEqual(len(resp['availabilityZoneInfo']), 7) - self.nova.availability_zones.list.assert_called_once_with() - - def test_regions(self): - resp = self.execute('DescribeRegions', {}) - self.assertEqual(resp['regionInfo'][0]['regionName'], 'RegionOne') - self.assertTrue(resp['regionInfo'][0].get('regionEndpoint') - is not None) - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - def test_describe_account_attributes(self, check_and_create): - self.nova.quotas.get.return_value = mock.Mock(instances=77) - - resp = self.execute('DescribeAccountAttributes', {}) - self.assertThat(resp['accountAttributeSet'], - matchers.ListMatches( - [{'attributeName': 'supported-platforms', - 'attributeValueSet': [ - {'attributeValue': 'EC2'}, - {'attributeValue': 'VPC'}]}, - {'attributeName': 'default-vpc', - 'attributeValueSet': [ - {'attributeValue': 'none'}]}, - {'attributeName': 'max-instances', - 'attributeValueSet': [ - {'attributeValue': 77}]}], - orderless_lists=True)) - self.nova.quotas.get.assert_called_once_with( - fakes.ID_OS_PROJECT, fakes.ID_OS_USER) - - self.configure(disable_ec2_classic=True) - check_and_create.return_value = fakes.DB_VPC_DEFAULT - - resp = self.execute('DescribeAccountAttributes', {}) - self.assertThat(resp['accountAttributeSet'], - matchers.ListMatches( - [{'attributeName': 'supported-platforms', - 'attributeValueSet': [ - {'attributeValue': 'VPC'}]}, - {'attributeName': 'default-vpc', - 'attributeValueSet': [ - {'attributeValue': - fakes.ID_EC2_VPC_DEFAULT}]}, - {'attributeName': 'max-instances', - 'attributeValueSet': [ - {'attributeValue': 77}]}], - orderless_lists=True)) - check_and_create.assert_called_once_with(mock.ANY) diff --git a/ec2api/tests/unit/test_clients.py b/ec2api/tests/unit/test_clients.py deleted file mode 100644 index 6ec34b71..00000000 --- a/ec2api/tests/unit/test_clients.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -from unittest import mock - -import fixtures - -from ec2api.api import clients -from ec2api.tests.unit import base - - -class ClientsTestCase(base.BaseTestCase): - - def setUp(self): - importlib.reload(clients) - super(ClientsTestCase, self).setUp() - - @mock.patch.object(clients, '_get_nova_api_version', return_value='2.3') - @mock.patch('novaclient.client.Client') - def test_nova(self, nova, get_api_version): - context = mock.NonCallableMock(session=mock.sentinel.session) - - # test normal flow with get_api_version call - res = clients.nova(context) - self.assertEqual(nova.return_value, res) - nova.assert_called_with('2.3', service_type='compute', - session=mock.sentinel.session) - get_api_version.assert_called_once_with(context) - - # test CONF.nova_service_type is used - self.configure(nova_service_type='compute_legacy') - clients.nova(context) - nova.assert_called_with('2.3', service_type='compute_legacy', - session=mock.sentinel.session) - - @mock.patch('novaclient.client.Client') - def test_get_api_version(self, nova): - context = mock.NonCallableMock(session=mock.sentinel.session) - v2 = mock.NonCallableMock() - v2.configure_mock(id='v2', - version='', - links=[{'href': 'http://host:port/path/v2/'}]) - v2_1 = mock.NonCallableMock() - v2_1.configure_mock(id='v2.1', - version='2.40', - links=[{'href': 'http://host:port/path/v2.1/'}]) - - # test normal flow - nova.return_value.versions.get_current.return_value = v2_1 - with fixtures.LoggerFixture( - format='[%(levelname)s] %(message)s') as logs: - res = clients._get_nova_api_version(context) - self.assertEqual(clients.REQUIRED_NOVA_API_MICROVERSION, res) - nova.assert_called_with('2.1', service_type='compute', - session=mock.sentinel.session) - nova.return_value.versions.get_current.assert_called_with() - self.assertTrue(logs.output.startswith('[INFO]')) - - # test Nova doesn't supprt required microversion - v2_1.version = '2.2' - with fixtures.LoggerFixture( - format='[%(levelname)s] %(message)s') as logs: - res = clients._get_nova_api_version(context) - self.assertEqual('2.2', res) - self.assertTrue(logs.output.startswith('[WARNING]')) - - # test service type is not v2.1 - nova.return_value.versions.get_current.return_value = v2 - self.configure(nova_service_type='compute_legacy') - with fixtures.LoggerFixture( - format='[%(levelname)s] %(message)s') as logs: - res = clients._get_nova_api_version(context) - self.assertEqual('2', res) - self.assertTrue(logs.output.startswith('[WARNING]')) - self.configure(nova_service_type='compute') - - # test service url is not found in version list - nova.return_value.versions.get_current.return_value = None - with fixtures.LoggerFixture( - format='[%(levelname)s] %(message)s') as logs: - res = clients._get_nova_api_version(context) - self.assertEqual(clients.REQUIRED_NOVA_API_MICROVERSION, res) - self.assertTrue(logs.output.startswith('[WARNING]')) - - @mock.patch('neutronclient.v2_0.client.Client') - def test_neutron(self, neutron): - context = mock.NonCallableMock(session=mock.sentinel.session) - res = clients.neutron(context) - self.assertEqual(neutron.return_value, res) - neutron.assert_called_with(service_type='network', - session=mock.sentinel.session) - - @mock.patch('glanceclient.client.Client') - def test_glance(self, glance): - context = mock.NonCallableMock(session=mock.sentinel.session) - res = clients.glance(context) - self.assertEqual(glance.return_value, res) - glance.assert_called_with(version='2', service_type='image', - session=mock.sentinel.session) - - @mock.patch('cinderclient.client.Client') - def test_cinder(self, cinder): - # test normal flow - context = mock.NonCallableMock(session=mock.sentinel.session) - res = clients.cinder(context) - self.assertEqual(cinder.return_value, res) - cinder.assert_called_with('3', service_type='volumev3', - session=mock.sentinel.session) - - @mock.patch('keystoneclient.client.Client') - def test_keystone(self, keystone): - context = mock.NonCallableMock(session=mock.sentinel.session) - res = clients.keystone(context) - self.assertEqual(keystone.return_value, res) - keystone.assert_called_with(auth_url='v3', - session=mock.sentinel.session) diff --git a/ec2api/tests/unit/test_common.py b/ec2api/tests/unit/test_common.py deleted file mode 100644 index e2b87ca0..00000000 --- a/ec2api/tests/unit/test_common.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from oslotest import base as test_base - -from ec2api.api import common - - -class OnCrashCleanerTestCase(test_base.BaseTestCase): - class FakeException(Exception): - pass - - def test_clean(self): - obj = mock.MagicMock() - - def run(): - with common.OnCrashCleaner() as cleaner: - cleaner.addCleanup(obj.fake_clean_method, - 555, 'arg', {'k': 'v'}) - cleaner.addCleanup(obj.fake_clean_method, - 666, 'param', {'key': 'value'}) - raise self.FakeException() - - self.assertRaises(self.FakeException, run) - self.assertEqual([mock.call(666, 'param', {'key': 'value'}), - mock.call(555, 'arg', {'k': 'v'})], - obj.fake_clean_method.mock_calls) - - @mock.patch.object(common, 'LOG') - def test_approve_partially(self, log): - class FakeCrasherClass(object): - call_count = 0 - - def fake_crashed_clean_method(self, *args, **kwargs): - self.call_count += 1 - raise Exception() - - def __call__(self): - raise Exception() - - obj = mock.MagicMock() - cls = FakeCrasherClass() - - with common.OnCrashCleaner() as cleaner: - cleaner.addCleanup(obj.fake_clean_method_25), - cleaner.addCleanup(obj.fake_clean_method) - cleaner.addCleanup(cls.fake_crashed_clean_method) - cleaner.approveChanges() - cleaner.addCleanup(cls) - cleaner.addCleanup(fake_standalone_crashed_clean_method) - cleaner.addCleanup(cls.fake_crashed_clean_method, - 'args', 666, {'key': 'value'}, - s='args', i=666, d={'key': 'value'}) - cleaner.addCleanup(obj.fake_clean_method, 'params') - raise Exception() - - self.assertEqual(1, cls.call_count) - self.assertEqual(3, log.warning.call_count) - self.assertIn('ec2api.tests.unit.test_common.FakeCrasherClass.' - 'fake_crashed_clean_method', - log.warning.mock_calls[0][1][0]) - for arg in ["'args'", "666", "{'key': 'value'}", - "s='args'", "i=666", "d={'key': 'value'}"]: - self.assertIn(arg, log.warning.mock_calls[0][1][0]) - self.assertIn('ec2api.tests.unit.test_common.' - 'fake_standalone_crashed_clean_method', - log.warning.mock_calls[1][1][0]) - self.assertIn('ec2api.tests.unit.test_common.FakeCrasherClass', - log.warning.mock_calls[2][1][0]) - obj.fake_clean_method.assert_called_once_with('params') - self.assertFalse(obj.fake_clean_method_25.called) - - def test_normal_flow(self): - obj = mock.MagicMock() - - with common.OnCrashCleaner() as cleaner: - cleaner.addCleanup(obj.fake_clean_method), - cleaner.addCleanup(obj.fake_clean_method_25), - - self.assertFalse(obj.fake_clean_method.called) - self.assertFalse(obj.fake_clean_method_25.called) - - def test_filter(self): - obj = common.UniversalDescriber() - obj.FILTER_MAP = {'prop1': 'prop-1', 'prop2': 'prop-2'} - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['val-0']}]) - self.assertFalse(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['val-0', '0-val']}]) - self.assertFalse(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['0-val', 'val-0']}]) - self.assertFalse(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['0-val']}]) - self.assertTrue(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['val-0']}, - {'name': 'prop2', 'value': ['val-123']}]) - self.assertFalse(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['val-0']}, - {'name': 'prop2', 'value': ['123-val']}]) - self.assertTrue(res) - - res = obj.filtered_out( - {'prop-1': 'val-0', 'prop-2': 'val-123'}, - [{'name': 'prop1', 'value': ['0-val']}, - {'name': 'prop2', 'value': ['val-123']}]) - self.assertTrue(res) - - -def fake_standalone_crashed_clean_method(): - raise Exception() diff --git a/ec2api/tests/unit/test_context.py b/ec2api/tests/unit/test_context.py deleted file mode 100644 index 942eb67b..00000000 --- a/ec2api/tests/unit/test_context.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_context import context -from oslotest import base as test_base - -from ec2api import clients -from ec2api import context as ec2_context - - -GROUP_AUTHTOKEN = 'keystone_authtoken' - - -class ContextTestCase(test_base.BaseTestCase): - - @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') - @mock.patch('keystoneauth1.loading.load_session_from_conf_options') - def test_get_os_admin_context(self, session, auth): - conf = config_fixture.Config() - clients._admin_session = None - conf.config(auth_type='fake', group=GROUP_AUTHTOKEN) - - importlib.reload(ec2_context) - # NOTE(ft): initialize a regular context to populate oslo_context's - # local storage to prevent admin context to populate it. - # Used to implicitly validate overwrite=False argument of the call - # RequestContext constructor from inside get_os_admin_context - if not context.get_current(): - ec2_context.RequestContext(None, None) - - ctx = ec2_context.get_os_admin_context() - conf = cfg.CONF - auth.assert_called_once_with(conf, GROUP_AUTHTOKEN) - auth_plugin = auth.return_value - session.assert_called_once_with(conf, GROUP_AUTHTOKEN, - auth=auth_plugin) - self.assertIsNone(ctx.user_id) - self.assertIsNone(ctx.project_id) - self.assertIsNone(ctx.auth_token) - self.assertEqual([], ctx.service_catalog) - self.assertTrue(ctx.is_os_admin) - self.assertIsNotNone(ctx.session) - self.assertIsNotNone(ctx.session.auth) - self.assertNotEqual(context.get_current(), ctx) - - session.reset_mock() - ec2_context.get_os_admin_context() - self.assertFalse(session.called) diff --git a/ec2api/tests/unit/test_customer_gateway.py b/ec2api/tests/unit/test_customer_gateway.py deleted file mode 100644 index f7c5e705..00000000 --- a/ec2api/tests/unit/test_customer_gateway.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class CustomerGatewayTestCase(base.ApiTestCase): - - def test_create_customer_gateway(self): - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_CUSTOMER_GATEWAY_2)) - - resp = self.execute('CreateCustomerGateway', - {'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2, - 'Type': 'ipsec.1'}) - self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_2}, - resp) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'cgw', - {'ip_address': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2}) - - resp = self.execute('CreateCustomerGateway', - {'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2, - 'Type': 'ipsec.1', - 'BgpAsn': '65000'}) - self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_2}, - resp) - - def test_create_customer_gateway_idempotent(self): - self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1) - - resp = self.execute('CreateCustomerGateway', - {'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'Type': 'ipsec.1'}) - self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_1}, - resp) - self.assertFalse(self.db_api.add_item.called) - - resp = self.execute('CreateCustomerGateway', - {'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'Type': 'ipsec.1', - 'BgpAsn': '65000'}) - self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_1}, - resp) - self.assertFalse(self.db_api.add_item.called) - - def test_create_customer_gateway_invalid_parameters(self): - self.assert_execution_error( - 'Unsupported', - 'CreateCustomerGateway', - {'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'Type': 'ipsec.1', - 'BgpAsn': '456'}) - - def test_delete_customer_gateway(self): - self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_2) - - resp = self.execute( - 'DeleteCustomerGateway', - {'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_2}) - - self.assertEqual({'return': True}, resp) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_CUSTOMER_GATEWAY_2) - - def test_delete_customer_gateway_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidCustomerGatewayID.NotFound', - 'DeleteCustomerGateway', - {'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_2}) - self.assertFalse(self.db_api.delete_item.called) - - self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1, - fakes.DB_VPN_CONNECTION_1) - self.assert_execution_error( - 'IncorrectState', - 'DeleteCustomerGateway', - {'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1}) - self.assertFalse(self.db_api.delete_item.called) - - def test_describe_customer_gateways(self): - self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1, - fakes.DB_CUSTOMER_GATEWAY_2) - - resp = self.execute('DescribeCustomerGateways', {}) - self.assertThat(resp['customerGatewaySet'], - matchers.ListMatches([fakes.EC2_CUSTOMER_GATEWAY_1, - fakes.EC2_CUSTOMER_GATEWAY_2])) - - resp = self.execute( - 'DescribeCustomerGateways', - {'CustomerGatewayId.1': fakes.ID_EC2_CUSTOMER_GATEWAY_2}) - self.assertThat( - resp['customerGatewaySet'], - matchers.ListMatches([fakes.EC2_CUSTOMER_GATEWAY_2])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_CUSTOMER_GATEWAY_2])) - - self.check_filtering( - 'DescribeCustomerGateways', 'customerGatewaySet', - [('bgp-asn', 65000), - ('customer-gateway-id', fakes.ID_EC2_CUSTOMER_GATEWAY_2), - ('ip-address', fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2), - ('state', 'available'), - ('type', 'ipsec.1')]) - self.check_tag_support( - 'DescribeCustomerGateways', 'customerGatewaySet', - fakes.ID_EC2_CUSTOMER_GATEWAY_2, 'customerGatewayId') diff --git a/ec2api/tests/unit/test_db_api.py b/ec2api/tests/unit/test_db_api.py deleted file mode 100644 index 314a78fa..00000000 --- a/ec2api/tests/unit/test_db_api.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sqlalchemy.orm import exc as orm_exception -from unittest import mock - -from ec2api.api import validator -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers - - -class DbApiTestCase(base.DbTestCase): - - def setUp(self): - super(DbApiTestCase, self).setUp() - self.context = mock.NonCallableMock( - project_id=fakes.random_os_id()) - self.other_context = mock.NonCallableMock( - project_id=fakes.random_os_id()) - - def test_add_item(self): - new_item = {'os_id': fakes.random_os_id(), - 'vpc_id': fakes.random_ec2_id('fake_vpc'), - 'str_attr': 'fake_str', - 'int_attr': 1234, - 'bool_attr': True, - 'dict_attr': {'key1': 'val1', - 'key2': 'val2'}, - 'list_attr': ['fake_str', 1234, True, {'key': 'val'}, []]} - item = db_api.add_item(self.context, 'fake', new_item) - self.assertIn('id', item) - self.assertIsNotNone(item['id']) - item_id = item.pop('id') - self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake'])) - self.assertThat(item, matchers.DictMatches(new_item, - orderless_lists=True)) - - item = db_api.get_item_by_id(self.context, item_id) - new_item['id'] = item_id - self.assertThat(item, matchers.DictMatches(new_item, - orderless_lists=True)) - - def test_add_item_defaults(self): - def do_check(new_item): - item = db_api.add_item(self.context, 'fake', new_item) - item_id = item.pop('id') - if 'id' in new_item: - new_item_id = new_item.pop('id') - self.assertNotEqual(new_item_id, item_id) - new_item.setdefault('os_id', None) - new_item.setdefault('vpc_id', None) - self.assertThat(item, matchers.DictMatches(new_item, - orderless_lists=True)) - - do_check({}) - do_check({'os_id': fakes.random_os_id()}) - do_check({'vpc_id': fakes.random_ec2_id('fake_vpc')}) - do_check({'id': fakes.random_ec2_id('fake')}) - - def test_add_item_with_same_os_id(self): - # NOTE(ft): check normal update item on add - os_id = fakes.random_os_id() - item1 = db_api.add_item(self.context, 'fake', - {'os_id': os_id, - 'key': 'val1', - 'key1': 'val'}) - item_id = item1['id'] - item2 = db_api.add_item(self.context, 'fake', - {'os_id': os_id, - 'key': 'val2', - 'key2': 'val'}) - expected_item = {'id': item_id, - 'os_id': os_id, - 'vpc_id': None, - 'key': 'val2', - 'key1': 'val', - 'key2': 'val'} - self.assertThat(item2, matchers.DictMatches(expected_item)) - - def test_add_item_isolation(self): - os_id = fakes.random_os_id() - db_api.add_item(self.context, 'fake', {'os_id': os_id}) - self.assertRaises( - orm_exception.NoResultFound, - db_api.add_item, self.context, 'fake1', {'os_id': os_id}) - self.assertRaises( - orm_exception.NoResultFound, - db_api.add_item, self.other_context, 'fake', {'os_id': os_id}) - - def test_add_item_id(self): - os_id = fakes.random_os_id() - item_id = db_api.add_item_id(self.context, 'fake', os_id) - self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake'])) - item = db_api.get_item_by_id(self.context, item_id) - self.assertIsNone(item) - item = db_api.add_item(self.context, 'fake', {'os_id': os_id}) - self.assertThat(item, matchers.DictMatches({'id': item_id, - 'os_id': os_id, - 'vpc_id': None})) - # NOTE(ft): add os_id when item exists - item_id = db_api.add_item_id(self.context, 'fake', os_id) - self.assertEqual(item_id, item['id']) - - # NOTE(ft): add os_id when id exists - os_id = fakes.random_os_id() - item_id1 = db_api.add_item_id(self.context, 'fake', os_id) - item_id2 = db_api.add_item_id(self.context, 'fake', os_id) - self.assertEqual(item_id1, item_id2) - - def test_restore_item(self): - os_id = fakes.random_os_id() - item = {'os_id': os_id, 'key': 'val1'} - new_item = db_api.add_item(self.context, 'fake', item) - item['id'] = new_item['id'] - self.assertRaises( - exception.EC2DBDuplicateEntry, - db_api.restore_item, self.context, 'fake', item) - - def test_update_item(self): - item = db_api.add_item(self.context, 'fake', {'key': 'val1', - 'key1': 'val'}) - item['key'] = 'val2' - item.pop('key1') - item['key2'] = 'val' - item_id = item['id'] - db_api.update_item(self.context, item) - item = db_api.get_item_by_id(self.context, item_id) - self.assertThat(item, matchers.DictMatches({'id': item_id, - 'os_id': None, - 'vpc_id': None, - 'key': 'val2', - 'key2': 'val'})) - - def test_update_item_invalid(self): - self.assertRaises(orm_exception.NoResultFound, - db_api.update_item, - self.context, - {'id': fakes.random_ec2_id('fake'), - 'key': 'val'}) - - def test_update_item_os_id(self): - item = db_api.add_item(self.context, 'fake', {}) - item['os_id'] = 'fake_os_id' - db_api.update_item(self.context, item) - item = db_api.get_item_by_id(self.context, item['id']) - self.assertThat({'os_id': 'fake_os_id'}, - matchers.IsSubDictOf(item)) - item['os_id'] = 'other_fake_os_id' - self.assertRaises(exception.EC2DBInvalidOsIdUpdate, - db_api.update_item, - self.context, item) - item['os_id'] = None - self.assertRaises(exception.EC2DBInvalidOsIdUpdate, - db_api.update_item, - self.context, item) - - def test_delete_item(self): - item = db_api.add_item(self.context, 'fake', {}) - db_api.delete_item(self.context, item['id']) - item = db_api.get_item_by_id(self.context, item['id']) - self.assertIsNone(item) - - # NOTE(ft): delete not existing item should pass quitely - db_api.delete_item(self.context, fakes.random_ec2_id('fake')) - - item = db_api.add_item(self.context, 'fake', {}) - db_api.delete_item(self.other_context, item['id']) - item = db_api.get_item_by_id(self.context, item['id']) - self.assertIsNotNone(item) - - def _setup_items(self): - db_api.add_item(self.context, 'fake', {}) - db_api.add_item(self.context, 'fake', {'is_public': True}) - db_api.add_item(self.context, 'fake1', {'os_id': fakes.random_os_id()}) - db_api.add_item(self.other_context, 'fake', {}) - db_api.add_item(self.other_context, 'fake', {'is_public': False}) - db_api.add_item(self.other_context, 'fake', {'is_public': True}) - db_api.add_item(self.other_context, 'fake1', - {'is_public': False, - 'os_id': fakes.random_os_id()}) - - def test_get_items(self): - self._setup_items() - - items = db_api.get_items(self.context, 'fake') - self.assertEqual(2, len(items)) - items = db_api.get_items(self.context, 'fake0') - self.assertEqual(0, len(items)) - - def test_get_item_by_id(self): - self._setup_items() - item_id = db_api.get_items(self.context, 'fake')[0]['id'] - other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] - - item = db_api.get_item_by_id(self.context, item_id) - self.assertThat(item, matchers.DictMatches({'id': item_id, - 'os_id': None, - 'vpc_id': None})) - item = db_api.get_item_by_id(self.context, other_item_id) - self.assertIsNone(item) - item = db_api.get_item_by_id(self.context, fakes.random_ec2_id('fake')) - self.assertIsNone(item) - - def test_get_items_by_ids(self): - self._setup_items() - fake_kind_items = db_api.get_items(self.context, 'fake') - fake1_kind_items = db_api.get_items(self.context, 'fake1') - item_id = fake_kind_items[0]['id'] - other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] - - items = db_api.get_items_by_ids(self.context, []) - self.assertEqual(0, len(items)) - items = db_api.get_items_by_ids(self.context, set([])) - self.assertEqual(0, len(items)) - items = db_api.get_items_by_ids(self.context, - [i['id'] for i in fake_kind_items]) - self.assertEqual(2, len(items)) - items = db_api.get_items_by_ids( - self.context, (fake_kind_items[0]['id'], - fake1_kind_items[0]['id'])) - self.assertEqual(2, len(items)) - items = db_api.get_items_by_ids(self.context, (item_id,)) - self.assertEqual(1, len(items)) - self.assertEqual(item_id, items[0]['id']) - items = db_api.get_items_by_ids(self.context, (other_item_id,)) - self.assertEqual(0, len(items)) - items = db_api.get_items_by_ids(self.context, - (item_id, other_item_id)) - self.assertEqual(1, len(items)) - items = db_api.get_items_by_ids(self.context, - (fakes.random_ec2_id('fake'),)) - self.assertEqual(0, len(items)) - items = db_api.get_items_by_ids(self.context, - (item_id, fakes.random_ec2_id('fake'))) - self.assertEqual(1, len(items)) - - def test_get_items_ids(self): - self._setup_items() - item = db_api.get_items(self.context, 'fake1')[0] - other_item = db_api.get_items(self.other_context, 'fake1')[0] - items_ids = db_api.get_items_ids(self.context, 'fake1', - item_os_ids=[item['os_id'], - other_item['os_id']]) - self.assertThat(items_ids, - matchers.ListMatches( - [(item['id'], item['os_id']), - (other_item['id'], other_item['os_id'])], - orderless_lists=True)) - items_ids = db_api.get_items_ids(self.context, 'fake', - item_os_ids=[item['os_id']]) - self.assertEqual(0, len(items_ids)) - - item_ids = db_api.get_items_ids(self.context, 'fake1', - item_ids=[item['id'], - other_item['id']]) - self.assertThat(item_ids, - matchers.ListMatches( - [(item['id'], item['os_id']), - (other_item['id'], other_item['os_id'])], - orderless_lists=True)) - items_ids = db_api.get_items_ids(self.context, 'fake', - item_ids=[item['id']]) - self.assertEqual(0, len(items_ids)) - - def test_get_public_items(self): - self._setup_items() - items = db_api.get_public_items(self.context, 'fake') - self.assertEqual(2, len(items)) - public_item_ids = [i['id'] for i in items] - - items = db_api.get_public_items(self.context, 'fake', public_item_ids) - self.assertEqual(2, len(items)) - items = db_api.get_public_items(self.context, 'fake', - [public_item_ids[0]]) - self.assertEqual(1, len(items)) - items = db_api.get_public_items(self.context, 'fake', - (public_item_ids[1],)) - self.assertEqual(1, len(items)) - items = db_api.get_public_items(self.context, 'fake1', - [public_item_ids[0]]) - self.assertEqual(0, len(items)) - items = db_api.get_public_items(self.context, 'fake', - [fakes.random_ec2_id('fake')]) - self.assertEqual(0, len(items)) - items = db_api.get_public_items(self.context, 'fake0', []) - self.assertEqual(0, len(items)) - - def test_add_tags(self): - item1_id = fakes.random_ec2_id('fake') - item2_id = fakes.random_ec2_id('fake') - item3_id = fakes.random_ec2_id('fake') - tag1_01 = {'item_id': item1_id, - 'key': 'key1', - 'value': None} - tag1_1 = {'item_id': item1_id, - 'key': 'key1', - 'value': 'val'} - tag1_2 = {'item_id': item1_id, - 'key': 'key2', - 'value': 'val'} - tag1_3 = {'item_id': item1_id, - 'key': 'key3', - 'value': 'val'} - tag2_1 = {'item_id': item2_id, - 'key': 'key1', - 'value': None} - tag2_2 = {'item_id': item2_id, - 'key': 'key2', - 'value': 'val'} - tag3_1 = {'item_id': item3_id, - 'key': 'key1', - 'value': 'val'} - tag3_3 = {'item_id': item3_id, - 'key': 'key3', - 'value': 'val'} - db_api.add_tags(self.context, [tag1_01, tag2_1, - tag1_2, tag2_2]) - db_api.add_tags(self.context, [tag1_1, tag3_1, - tag1_3, tag3_3]) - tags = db_api.get_tags(self.context) - self.assertThat(tags, - matchers.ListMatches([tag1_1, tag1_2, tag1_3, - tag2_1, tag2_2, - tag3_1, tag3_3], - orderless_lists=True)) - - def test_add_tags_isolation(self): - item_id = fakes.random_ec2_id('fake') - tag1 = {'item_id': item_id, - 'key': 'key1', - 'value': 'val1'} - tag2 = {'item_id': item_id, - 'key': 'key2', - 'value': 'val2'} - db_api.add_tags(self.context, [tag1, tag2]) - db_api.add_tags(self.other_context, [{'item_id': item_id, - 'key': 'key1', - 'value': 'val1_1'}, - {'item_id': item_id, - 'key': 'key3', - 'value': 'val3'}]) - tags = db_api.get_tags(self.context) - self.assertThat(tags, matchers.ListMatches([tag1, tag2], - orderless_lists=True)) - - def test_get_tags(self): - item1_id = fakes.random_ec2_id('fake') - item2_id = fakes.random_ec2_id('fake') - item3_id = fakes.random_ec2_id('fake1') - tag1 = {'item_id': item1_id, - 'key': 'key1', - 'value': 'val1'} - tag2 = {'item_id': item2_id, - 'key': 'key2', - 'value': 'val2'} - tag3 = {'item_id': item3_id, - 'key': 'key3', - 'value': 'val3'} - db_api.add_tags(self.context, [tag1, tag2, tag3]) - - self.assertThat(db_api.get_tags(self.context), - matchers.ListMatches([tag1, tag2, tag3], - orderless_lists=True)) - self.assertThat(db_api.get_tags(self.context, ('fake',)), - matchers.ListMatches([tag1, tag2], - orderless_lists=True)) - self.assertThat(db_api.get_tags(self.context, ('fake',), - [item1_id, item2_id]), - matchers.ListMatches([tag1, tag2], - orderless_lists=True)) - self.assertThat(db_api.get_tags(self.context, ('fake',), (item1_id,)), - matchers.ListMatches([tag1], - orderless_lists=True)) - self.assertThat(db_api.get_tags(self.context, ('fake',), (item3_id,)), - matchers.ListMatches([])) - self.assertThat(db_api.get_tags(self.context, - item_ids=(item1_id, item3_id)), - matchers.ListMatches([tag1, tag3], - orderless_lists=True)) - self.assertThat(db_api.get_tags(self.context, ('fake', 'fake1'), - (item2_id, item3_id)), - matchers.ListMatches([tag2, tag3], - orderless_lists=True)) - - def test_delete_tags(self): - item1_id = fakes.random_ec2_id('fake') - item2_id = fakes.random_ec2_id('fake') - item3_id = fakes.random_ec2_id('fake1') - tag1_1 = {'item_id': item1_id, - 'key': 'key1', - 'value': 'val_a'} - tag1_2 = {'item_id': item1_id, - 'key': 'key2', - 'value': 'val_b'} - tag2_1 = {'item_id': item2_id, - 'key': 'key1', - 'value': 'val_c'} - tag2_2 = {'item_id': item2_id, - 'key': 'key2', - 'value': 'val_a'} - tag3_1 = {'item_id': item3_id, - 'key': 'key1', - 'value': 'val_b'} - tag3_2 = {'item_id': item3_id, - 'key': 'key2', - 'value': 'val_d'} - db_api.add_tags(self.context, [tag1_1, tag2_1, tag3_1, - tag1_2, tag2_2, tag3_2]) - - def do_check(*tag_list): - self.assertThat(db_api.get_tags(self.context), - matchers.ListMatches(tag_list, - orderless_lists=True)) - db_api.add_tags(self.context, [tag1_1, tag2_1, tag3_1, - tag1_2, tag2_2, tag3_2]) - - db_api.delete_tags(self.context, []) - do_check(tag1_1, tag1_2, tag2_1, tag2_2, tag3_1, tag3_2) - - db_api.delete_tags(self.context, [item1_id]) - do_check(tag2_1, tag2_2, tag3_1, tag3_2) - - db_api.delete_tags(self.context, [item1_id, item3_id]) - do_check(tag2_1, tag2_2) - - db_api.delete_tags(self.context, [item1_id, item2_id, item3_id], - [{'key': 'key1'}, - {'value': 'val_d'}, - {'key': 'key2', - 'value': 'val_b'}]) - do_check(tag2_2) - - def test_delete_tags_isolation(self): - item_id = fakes.random_ec2_id('fake') - tag1 = {'item_id': item_id, - 'key': 'key', - 'value': 'val1'} - db_api.add_tags(self.context, [tag1]) - tag2 = {'item_id': item_id, - 'key': 'key', - 'value': 'val2'} - db_api.add_tags(self.other_context, [tag2]) - db_api.delete_tags(self.context, [item_id]) - self.assertThat(db_api.get_tags(self.other_context), - matchers.ListMatches([tag2])) diff --git a/ec2api/tests/unit/test_dhcp_options.py b/ec2api/tests/unit/test_dhcp_options.py deleted file mode 100644 index 7518dc54..00000000 --- a/ec2api/tests/unit/test_dhcp_options.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from oslo_config import cfg - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class DhcpOptionsTestCase(base.ApiTestCase): - - def test_create_dhcp_options(self): - - def gen_opt(count, value): - return 'DhcpConfiguration.' + str(count) + '.' + value - - def gen_ec2_param_dhcp_options(dhcp_options): - dhcp_configuration = dhcp_options['dhcpConfigurationSet'] - result_param = {} - opt_count = 0 - for opt in dhcp_configuration: - opt_count += 1 - result_param[gen_opt(opt_count, 'Key')] = opt['key'] - value_count = 0 - for value in opt['valueSet']: - value_count += 1 - result_param[gen_opt(opt_count, - 'Value.' + str(value_count))] = ( - str(value['value'])) - return result_param - - def check(ec2_fake, db_fake): - self.db_api.add_item.return_value = db_fake - resp = self.execute( - 'CreateDhcpOptions', - gen_ec2_param_dhcp_options(ec2_fake)) - self.assertThat(ec2_fake, matchers.DictMatches( - resp['dhcpOptions'], orderless_lists=True)) - self.assert_any_call(self.db_api.add_item, - mock.ANY, 'dopt', - tools.purge_dict(db_fake, ('id',))) - self.db_api.reset_mock() - - check(fakes.EC2_DHCP_OPTIONS_1, fakes.DB_DHCP_OPTIONS_1) - check(fakes.EC2_DHCP_OPTIONS_2, fakes.DB_DHCP_OPTIONS_2) - - def test_create_dhcp_options_invalid_parameters(self): - self.assert_execution_error( - 'InvalidParameterValue', 'CreateDhcpOptions', - {'DhcpConfiguration.1.Key': 'InvalidParameter', - 'DhcpConfiguration.1.Value.1': 'Value'}) - - def test_delete_dhcp_options(self): - self.set_mock_db_items(fakes.DB_DHCP_OPTIONS_1) - resp = self.execute('DeleteDhcpOptions', - {'dhcpOptionsId': fakes.ID_EC2_DHCP_OPTIONS_1}) - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_any_call( - mock.ANY, - fakes.ID_EC2_DHCP_OPTIONS_1) - self.db_api.get_items.assert_any_call( - mock.ANY, - 'vpc') - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_DHCP_OPTIONS_1) - - def test_delete_dhcp_options_with_dependencies(self): - self.set_mock_db_items( - fakes.DB_DHCP_OPTIONS_1, - tools.update_dict( - fakes.DB_VPC_1, - {'dhcp_options_id': fakes.ID_EC2_DHCP_OPTIONS_1})) - self.assert_execution_error( - 'DependencyViolation', 'DeleteDhcpOptions', - {'dhcpOptionsId': fakes.ID_EC2_DHCP_OPTIONS_1}) - - def test_describe_dhcp_options(self): - self.set_mock_db_items(fakes.DB_DHCP_OPTIONS_1, - fakes.DB_DHCP_OPTIONS_2) - - resp = self.execute('DescribeDhcpOptions', {}) - self.assertThat(resp['dhcpOptionsSet'], - matchers.ListMatches([fakes.EC2_DHCP_OPTIONS_1, - fakes.EC2_DHCP_OPTIONS_2], - orderless_lists=True)) - - resp = self.execute('DescribeDhcpOptions', - {'DhcpOptionsId.1': fakes.ID_EC2_DHCP_OPTIONS_1}) - self.assertThat(resp['dhcpOptionsSet'], - matchers.ListMatches([fakes.EC2_DHCP_OPTIONS_1], - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_DHCP_OPTIONS_1])) - - self.check_filtering( - 'DescribeDhcpOptions', 'dhcpOptionsSet', - [('dhcp_options_id', fakes.ID_EC2_DHCP_OPTIONS_1), - ('key', 'netbios-node-type'), - ('value', '8.8.8.8')]) - self.check_tag_support( - 'DescribeDhcpOptions', 'dhcpOptionsSet', - fakes.ID_EC2_DHCP_OPTIONS_1, 'dhcpOptionsId') - - def test_associate_dhcp_options(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_DHCP_OPTIONS_1, - fakes.DB_NETWORK_INTERFACE_1) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, fakes.OS_PORT_2]}) - - def check(ec2_dhcp_options_id, db_dhcp_options_id, os_dhcp_options): - resp = self.execute('AssociateDhcpOptions', - {'dhcpOptionsId': ec2_dhcp_options_id, - 'vpcId': fakes.ID_EC2_VPC_1}) - self.assertEqual(True, resp['return']) - self.db_api.update_item.assert_any_call( - mock.ANY, - tools.update_dict( - fakes.DB_VPC_1, - {'dhcp_options_id': db_dhcp_options_id})) - self.assert_any_call( - self.neutron.update_port, - fakes.ID_OS_PORT_1, - {'port': self._effective_os_dhcp_options(os_dhcp_options)}) - - check(fakes.ID_EC2_DHCP_OPTIONS_1, fakes.ID_EC2_DHCP_OPTIONS_1, - fakes.OS_DHCP_OPTIONS_1) - - check('default', None, {'extra_dhcp_opts': []}) - - @tools.screen_unexpected_exception_logs - def test_associate_dhcp_options_rollback(self): - vpc = tools.update_dict( - fakes.DB_VPC_1, - {'dhcp_options_id': fakes.ID_EC2_DHCP_OPTIONS_1}) - self.set_mock_db_items( - vpc, fakes.DB_DHCP_OPTIONS_1, fakes.DB_DHCP_OPTIONS_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, fakes.OS_PORT_2]}) - - def update_port_func(port_id, _port_data): - if port_id == fakes.ID_OS_PORT_2: - raise Exception() - - self.neutron.update_port.side_effect = update_port_func - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'AssociateDhcpOptions', - {'dhcpOptionsId': fakes.ID_EC2_DHCP_OPTIONS_2, - 'vpcId': fakes.ID_EC2_VPC_1}) - - self.assert_any_call(self.neutron.update_port, - fakes.ID_OS_PORT_1, - {'port': fakes.OS_DHCP_OPTIONS_1}) - self.db_api.update_item.assert_any_call( - mock.ANY, vpc) - - def _effective_os_dhcp_options(self, os_dhcp_options): - CONF = cfg.CONF - dhcp_opts = { - 'extra_dhcp_opts': [{'opt_name': 'mtu', - 'opt_value': str(CONF.network_device_mtu)}]} - dhcp_opts['extra_dhcp_opts'].extend( - os_dhcp_options.get('extra_dhcp_opts', [])) - return dhcp_opts diff --git a/ec2api/tests/unit/test_ec2_validate.py b/ec2api/tests/unit/test_ec2_validate.py deleted file mode 100644 index a550b18a..00000000 --- a/ec2api/tests/unit/test_ec2_validate.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright 2012 Cloudscaling, Inc. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_utils import timeutils -import testtools - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api import exception -from ec2api.tests.unit import tools - - -class EC2ValidationTestCase(testtools.TestCase): - """Test case for various validations.""" - - def test_validate_net(self): - validator = common.Validator() - validator.ip('10.10.0.0') - validator.cidr('10.10.0.0/24') - validator.subnet_cidr('10.10.0.0/24') - validator.vpc_cidr('10.10.0.0/24') - - def check_raise_invalid_parameter(cidr): - self.assertRaises(exception.InvalidParameterValue, - validator.cidr, cidr) - - check_raise_invalid_parameter('fake') - check_raise_invalid_parameter('10.10/24') - check_raise_invalid_parameter('10.10.0.0.0/24') - check_raise_invalid_parameter('10.10.0.0') - check_raise_invalid_parameter(' 10.10.0.0/24') - check_raise_invalid_parameter('10.10.0.0/24 ') - check_raise_invalid_parameter('.10.10.0.0/24 ') - check_raise_invalid_parameter('-1.10.0.0/24') - check_raise_invalid_parameter('10.256.0.0/24') - check_raise_invalid_parameter('10.10.0.0/33') - check_raise_invalid_parameter('10.10.0.0/-1') - - self.assertRaises(exception.InvalidParameterValue, - validator.ip, '10.256.0.0') - self.assertRaises(exception.InvalidSubnetRange, - validator.subnet_cidr, '10.10.0.0/15') - self.assertRaises(exception.InvalidVpcRange, - validator.vpc_cidr, '10.10.0.0/29') - - def test_validate_id(self): - validator = common.Validator() - validator.ec2_id('i-00000001') - validator.i_id('i-00000001') - validator.ami_id('ami-00000001') - validator.eni_id('eni-00000001') - validator.sg_id('sg-00000001') - validator.subnet_id('subnet-00000001') - validator.igw_id('igw-00000001') - validator.rtb_id('rtb-00000001') - validator.vpc_id('vpc-00000001') - validator.vol_id('vol-00000001') - validator.snap_id('snap-00000001') - validator.dopt_id('dopt-00000001') - validator.eni_attach_id('eni-attach-00000001') - validator.eipalloc_id('eipalloc-00000001') - validator.eipassoc_id('eipassoc-00000001') - validator.rtbassoc_id('rtbassoc-00000001') - validator.vgw_id('vgw-00000001') - validator.cgw_id('cgw-00000001') - - invalid_ids = ['1234', 'a-1111', '', 'i-1111', 'i-rrr', 'foobar'] - - def check_raise_invalid_parameters(func): - for id in invalid_ids: - self.assertRaises(exception.InvalidParameterValue, func, id) - - check_raise_invalid_parameters(validator.ami_id) - check_raise_invalid_parameters(validator.eni_id) - check_raise_invalid_parameters(validator.sg_id) - check_raise_invalid_parameters(validator.subnet_id) - check_raise_invalid_parameters(validator.igw_id) - check_raise_invalid_parameters(validator.rtb_id) - check_raise_invalid_parameters(validator.vpc_id) - check_raise_invalid_parameters(validator.vol_id) - check_raise_invalid_parameters(validator.snap_id) - check_raise_invalid_parameters(validator.dopt_id) - check_raise_invalid_parameters(validator.eni_attach_id) - check_raise_invalid_parameters(validator.eipalloc_id) - check_raise_invalid_parameters(validator.eipassoc_id) - check_raise_invalid_parameters(validator.rtbassoc_id) - check_raise_invalid_parameters(validator.vgw_id) - check_raise_invalid_parameters(validator.cgw_id) - - invalid_ids = ['1234', 'a-1111', '', 'vpc-1111', 'vpc-rrr', 'foobar'] - - check_raise_invalid_parameters(validator.i_id) - - invalid_ids = ['1234', '', 'foobar'] - - check_raise_invalid_parameters(validator.ec2_id) - - def test_validate_multi(self): - validator = common.Validator() - result_sum = {'value': 0} - list_to_sum = [1, 2, 3, 4] - - def sum(value): - # NOTE(Alex) Because nonlocal is only in python 3.0 - result_sum['value'] += value - - validator.multi(list_to_sum, sum) - self.assertEqual(result_sum['value'], 10) - - self.assertRaises(exception.InvalidParameterValue, - validator.multi, 'not a list', sum) - - def test_validate_primitive(self): - validator = common.Validator() - validator.int(5) - validator.bool(True) - validator.str('str') - validator.str64('str') - validator.str255('str') - - def check_raise_validation_error(value, func): - self.assertRaises(exception.ValidationError, - func, value) - - check_raise_validation_error('str', validator.int) - check_raise_validation_error('str', validator.bool) - check_raise_validation_error(5, validator.str) - check_raise_validation_error('x' * 65, validator.str64) - check_raise_validation_error('x' * 256, validator.str255) - - def test_validate_security_group(self): - validator = common.Validator(params={}) - validator.security_group_str('name') - validator.security_group_str('aa #^% -=99') - validator = common.Validator(params={'vpc_id': 'vpc_id'}) - validator.security_group_str('name') - - def check_raise_validation_error(value): - self.assertRaises(exception.ValidationError, - validator.security_group_str, value) - - validator = common.Validator(params={}) - check_raise_validation_error('aa \t\x01\x02\x7f') - check_raise_validation_error('x' * 256) - - validator = common.Validator(params={'vpc_id': 'vpc_id'}) - check_raise_validation_error('aa #^% -=99') - check_raise_validation_error('x' * 256) - - def test_validate_vpn_connection_type(self): - validator = common.Validator() - validator.vpn_connection_type('ipsec.1') - - invalid_ids = ['1234', 'a-1111', '', 'vpc-1111', 'vpc-rrr', 'foobar', - 'ipsec1', 'openvpn', 'pptp', 'l2tp', 'freelan'] - for id in invalid_ids: - self.assertRaises(exception.InvalidParameterValue, - validator.vpn_connection_type, id) - - -class EC2TimestampValidationTestCase(testtools.TestCase): - """Test case for EC2 request timestamp validation.""" - - def test_validate_ec2_timestamp_valid(self): - params = {'Timestamp': '2011-04-22T11:29:49Z'} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertFalse(expired) - - @tools.screen_all_logs - def test_validate_ec2_timestamp_old_format(self): - params = {'Timestamp': '2011-04-22T11:29:49'} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertTrue(expired) - - def test_validate_ec2_timestamp_not_set(self): - params = {} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertFalse(expired) - - def test_validate_ec2_timestamp_ms_time_regex(self): - result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z') - self.assertIsNotNone(result) - result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z') - self.assertIsNotNone(result) - result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z') - self.assertIsNone(result) - result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123') - self.assertIsNone(result) - result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z') - self.assertIsNone(result) - - @tools.screen_all_logs - def test_validate_ec2_timestamp_aws_sdk_format(self): - params = {'Timestamp': '2011-04-22T11:29:49.123Z'} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertFalse(expired) - expired = ec2utils.is_ec2_timestamp_expired(params, expires=300) - self.assertTrue(expired) - - @tools.screen_all_logs - def test_validate_ec2_timestamp_invalid_format(self): - params = {'Timestamp': '2011-04-22T11:29:49.000P'} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertTrue(expired) - - def test_validate_ec2_timestamp_advanced_time(self): - - # EC2 request with Timestamp in advanced time - timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250) - params = {'Timestamp': timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")} - expired = ec2utils.is_ec2_timestamp_expired(params, expires=300) - self.assertFalse(expired) - - @tools.screen_all_logs - def test_validate_ec2_timestamp_advanced_time_expired(self): - timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350) - params = {'Timestamp': timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")} - expired = ec2utils.is_ec2_timestamp_expired(params, expires=300) - self.assertTrue(expired) - - def test_validate_ec2_req_timestamp_not_expired(self): - params = {'Timestamp': ec2utils.isotime()} - expired = ec2utils.is_ec2_timestamp_expired(params, expires=15) - self.assertFalse(expired) - - @tools.screen_all_logs - def test_validate_ec2_req_timestamp_expired(self): - params = {'Timestamp': '2011-04-22T12:00:00Z'} - compare = ec2utils.is_ec2_timestamp_expired(params, expires=300) - self.assertTrue(compare) - - @tools.screen_all_logs - def test_validate_ec2_req_expired(self): - params = {'Expires': ec2utils.isotime()} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertTrue(expired) - - def test_validate_ec2_req_not_expired(self): - expire = timeutils.utcnow() + datetime.timedelta(seconds=350) - params = {'Expires': expire.strftime("%Y-%m-%dT%H:%M:%SZ")} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertFalse(expired) - - @tools.screen_all_logs - def test_validate_Expires_timestamp_invalid_format(self): - - # EC2 request with invalid Expires - params = {'Expires': '2011-04-22T11:29:49'} - expired = ec2utils.is_ec2_timestamp_expired(params) - self.assertTrue(expired) - - @tools.screen_all_logs - def test_validate_ec2_req_timestamp_Expires(self): - - # EC2 request with both Timestamp and Expires - params = {'Timestamp': '2011-04-22T11:29:49Z', - 'Expires': ec2utils.isotime()} - self.assertRaises(exception.InvalidRequest, - ec2utils.is_ec2_timestamp_expired, - params) diff --git a/ec2api/tests/unit/test_ec2utils.py b/ec2api/tests/unit/test_ec2utils.py deleted file mode 100644 index 57194a8f..00000000 --- a/ec2api/tests/unit/test_ec2utils.py +++ /dev/null @@ -1,610 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest import mock - -import fixtures -from glanceclient.common import exceptions as glance_exception -from oslo_config import fixture as config_fixture -import testtools - -from ec2api.api import ec2utils -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers - - -class EC2UtilsTestCase(testtools.TestCase): - - @mock.patch('ec2api.db.api.IMPL') - def test_get_db_item(self, db_api): - item = {'fake_key': 'fake_value'} - db_api.get_item_by_id.return_value = item - - def check_normal_flow(kind, ec2_id): - item['id'] = ec2_id - res = ec2utils.get_db_item('fake_context', ec2_id) - self.assertThat(res, matchers.DictMatches(item)) - db_api.get_item_by_id.assert_called_once_with('fake_context', - ec2_id) - db_api.reset_mock() - - check_normal_flow('vpc', 'vpc-001234af') - check_normal_flow('igw', 'igw-00000022') - - def check_not_found(kind, ex_class): - ec2_id = fakes.random_ec2_id(kind) - self.assertRaises(ex_class, - ec2utils.get_db_item, - 'fake_context', ec2_id) - db_api.get_item_by_id.assert_called_once_with('fake_context', - ec2_id) - db_api.reset_mock() - - db_api.get_item_by_id.return_value = None - check_not_found('vpc', exception.InvalidVpcIDNotFound) - check_not_found('igw', exception.InvalidInternetGatewayIDNotFound) - check_not_found('subnet', exception.InvalidSubnetIDNotFound) - check_not_found('eni', exception.InvalidNetworkInterfaceIDNotFound) - check_not_found('dopt', exception.InvalidDhcpOptionsIDNotFound) - check_not_found('eipalloc', exception.InvalidAllocationIDNotFound) - check_not_found('sg', exception.InvalidGroupNotFound) - check_not_found('rtb', exception.InvalidRouteTableIDNotFound) - check_not_found('i', exception.InvalidInstanceIDNotFound) - check_not_found('vol', exception.InvalidVolumeNotFound) - check_not_found('snap', exception.InvalidSnapshotNotFound) - check_not_found('ami', exception.InvalidAMIIDNotFound) - check_not_found('ari', exception.InvalidAMIIDNotFound) - check_not_found('aki', exception.InvalidAMIIDNotFound) - check_not_found('vgw', exception.InvalidVpnGatewayIDNotFound) - check_not_found('cgw', exception.InvalidCustomerGatewayIDNotFound) - check_not_found('vpn', exception.InvalidVpnConnectionIDNotFound) - - @mock.patch('ec2api.db.api.IMPL') - def test_get_db_items(self, db_api): - items = [{'id': fakes.random_ec2_id('fake'), - 'fake_key': 'fake_value'}, - {'id': fakes.random_ec2_id('fake'), - 'fake_key': 'fake_value'}] - db_api.get_items.return_value = items - db_api.get_items_by_ids.return_value = items - - def check_with_no_filter(empty_filter): - res = ec2utils.get_db_items('fake_context', 'fake', empty_filter) - self.assertThat(res, matchers.ListMatches(items)) - db_api.get_items.assert_called_once_with('fake_context', 'fake') - db_api.reset_mock() - - check_with_no_filter(None) - check_with_no_filter([]) - - def check_with_filter(item_ids): - res = ec2utils.get_db_items('fake_context', 'fake', item_ids) - self.assertThat(res, matchers.ListMatches(items)) - db_api.get_items_by_ids.assert_called_once_with( - 'fake_context', set(item_ids)) - db_api.reset_mock() - - item_ids = [i['id'] for i in items] - check_with_filter(item_ids) - check_with_filter(item_ids * 2) - - def check_not_found(kind, ex_class): - items = [{'id': fakes.random_ec2_id(kind), - 'fake_key': 'fake_value'} for _ in range(2)] - item_ids = [i['id'] for i in items] - item_ids.append(fakes.random_ec2_id(kind)) - db_api.get_items_by_ids.return_value = items - self.assertRaises(ex_class, ec2utils.get_db_items, - 'fake_context', kind, item_ids) - db_api.reset_mock() - - check_not_found('vpc', exception.InvalidVpcIDNotFound) - check_not_found('igw', exception.InvalidInternetGatewayIDNotFound) - check_not_found('subnet', exception.InvalidSubnetIDNotFound) - check_not_found('eni', exception.InvalidNetworkInterfaceIDNotFound) - check_not_found('dopt', exception.InvalidDhcpOptionsIDNotFound) - check_not_found('eipalloc', exception.InvalidAllocationIDNotFound) - check_not_found('sg', exception.InvalidGroupNotFound) - check_not_found('rtb', exception.InvalidRouteTableIDNotFound) - check_not_found('i', exception.InvalidInstanceIDNotFound) - check_not_found('vol', exception.InvalidVolumeNotFound) - check_not_found('snap', exception.InvalidSnapshotNotFound) - check_not_found('ami', exception.InvalidAMIIDNotFound) - check_not_found('aki', exception.InvalidAMIIDNotFound) - check_not_found('ari', exception.InvalidAMIIDNotFound) - check_not_found('vgw', exception.InvalidVpnGatewayIDNotFound) - check_not_found('cgw', exception.InvalidCustomerGatewayIDNotFound) - check_not_found('vpn', exception.InvalidVpnConnectionIDNotFound) - - """Unit test api xml conversion.""" - def test_number_conversion(self): - conv = ec2utils._try_convert - self.assertIsNone(conv('None')) - self.assertEqual(conv('True'), True) - self.assertEqual(conv('TRUE'), True) - self.assertEqual(conv('true'), True) - self.assertEqual(conv('False'), False) - self.assertEqual(conv('FALSE'), False) - self.assertEqual(conv('false'), False) - self.assertEqual(conv('0'), 0) - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('3.14'), 3.14) - self.assertEqual(conv('-57.12'), -57.12) - self.assertEqual(conv('0x57'), 0x57) - self.assertEqual(conv('-0x57'), -0x57) - self.assertEqual(conv('-'), '-') - self.assertEqual(conv('-0'), 0) - self.assertEqual(conv('0.0'), 0.0) - self.assertEqual(conv('1e-8'), 0.0) - self.assertEqual(conv('-1e-8'), 0.0) - self.assertEqual(conv('0xDD8G'), '0xDD8G') - self.assertEqual(conv('0XDD8G'), '0XDD8G') - self.assertEqual(conv('-stringy'), '-stringy') - self.assertEqual(conv('stringy'), 'stringy') - self.assertEqual(conv('add'), 'add') - self.assertEqual(conv('remove'), 'remove') - self.assertEqual(conv(''), '') - - @mock.patch('ec2api.db.api.IMPL') - def test_os_id_to_ec2_id(self, db_api): - fake_context = base.create_context() - fake_id = fakes.random_ec2_id('fake') - fake_os_id = fakes.random_os_id() - - # no cache, item is found - db_api.get_items_ids.return_value = [(fake_id, fake_os_id)] - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id) - self.assertEqual(fake_id, item_id) - db_api.get_items_ids.assert_called_once_with( - fake_context, 'fake', item_ids=None, item_os_ids=(fake_os_id,)) - self.assertFalse(db_api.add_item_id.called) - - # no cache, item isn't found - db_api.get_items_ids.return_value = [] - db_api.add_item_id.return_value = fake_id - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id) - self.assertEqual(fake_id, item_id) - db_api.add_item_id.assert_called_once_with( - fake_context, 'fake', fake_os_id, None) - - # no item in cache, item isn't found - db_api.reset_mock() - ids_cache = {fakes.random_os_id(): fakes.random_ec2_id('fake')} - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id, - ids_by_os_id=ids_cache) - self.assertEqual(fake_id, item_id) - self.assertIn(fake_os_id, ids_cache) - self.assertEqual(fake_id, ids_cache[fake_os_id]) - db_api.add_item_id.assert_called_once_with( - fake_context, 'fake', fake_os_id, None) - - # no item in cache, item is found - db_api.reset_mock() - db_api.get_items_ids.return_value = [(fake_id, fake_os_id)] - ids_cache = {} - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id, - ids_by_os_id=ids_cache) - self.assertEqual(fake_id, item_id) - self.assertEqual({fake_os_id: fake_id}, ids_cache) - self.assertFalse(db_api.add_item_id.called) - - # item in cache - db_api.reset_mock() - ids_cache = {fake_os_id: fake_id} - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id, - ids_by_os_id=ids_cache) - self.assertEqual(fake_id, item_id) - self.assertEqual({fake_os_id: fake_id}, ids_cache) - self.assertFalse(db_api.get_items_ids.called) - self.assertFalse(db_api.add_item_id.called) - - # item in items dict - items_dict = {fake_os_id: {'id': fake_id, - 'os_id': fake_os_id}} - ids_cache = {} - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id, - items_by_os_id=items_dict, - ids_by_os_id=ids_cache) - self.assertEqual(fake_id, item_id) - self.assertFalse(db_api.get_items_ids.called) - self.assertFalse(db_api.add_item_id.called) - self.assertEqual({}, ids_cache) - - # item not in items dict, item is found - items_dict = {fake_os_id: {'id': fake_id, - 'os_id': fake_os_id}} - db_api.get_items_ids.return_value = [(fake_id, fake_os_id)] - item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id, - items_by_os_id=items_dict) - self.assertEqual(fake_id, item_id) - self.assertFalse(db_api.add_item_id.called) - - @mock.patch('glanceclient.client.Client') - @mock.patch('ec2api.db.api.IMPL') - def test_get_os_image(self, db_api, glance): - glance = glance.return_value - fake_context = base.create_context() - - os_image = fakes.OSImage(fakes.OS_IMAGE_1) - glance.images.get.return_value = os_image - # check normal flow - db_api.get_items_ids.return_value = [ - (fakes.ID_EC2_IMAGE_1, fakes.ID_OS_IMAGE_1)] - self.assertEqual( - os_image, - ec2utils.get_os_image(fake_context, fakes.ID_EC2_IMAGE_1)) - db_api.get_items_ids.assert_called_with( - mock.ANY, 'ami', item_ids=(fakes.ID_EC2_IMAGE_1,), - item_os_ids=None) - glance.images.get.assert_called_with(fakes.ID_OS_IMAGE_1) - - # check case of absence of an image in OS - glance.images.get.side_effect = glance_exception.HTTPNotFound() - self.assertRaises( - exception.InvalidAMIIDNotFound, - ec2utils.get_os_image, - fake_context, fakes.ID_EC2_IMAGE_1) - - # check case of an unknown image id - db_api.get_items_ids.return_value = [] - self.assertRaises( - exception.InvalidAMIIDNotFound, - ec2utils.get_os_image, - fake_context, fakes.random_ec2_id('ami')) - - # check case of creating image - db_api.get_items_ids.return_value = [(fakes.ID_EC2_IMAGE_1, None)] - self.assertIsNone(ec2utils.get_os_image(fake_context, - fakes.ID_EC2_IMAGE_1)) - - @mock.patch('neutronclient.v2_0.client.Client') - def test_get_os_public_network(self, neutron): - neutron = neutron.return_value - context = base.create_context() - conf = self.useFixture(config_fixture.Config()) - - conf.config(external_network='fake_public_network') - neutron.list_networks.return_value = {'networks': ['network_object']} - net = ec2utils.get_os_public_network(context) - self.assertEqual('network_object', net) - neutron.list_networks.assert_called_once_with( - **{'router:external': True, 'name': 'fake_public_network'}) - - neutron.list_networks.return_value = {'networks': []} - with fixtures.FakeLogger() as log: - self.assertRaises(exception.Unsupported, - ec2utils.get_os_public_network, context) - self.assertNotEqual(0, len(log.output)) - self.assertIn('fake_public_network', log.output) - - neutron.list_networks.return_value = {'networks': ['obj1', 'obj2']} - with fixtures.FakeLogger() as log: - self.assertRaises(exception.Unsupported, - ec2utils.get_os_public_network, context) - self.assertNotEqual(0, len(log.output)) - self.assertIn('fake_public_network', log.output) - - conf.config(external_network=None) - with fixtures.FakeLogger() as log: - self.assertRaises(exception.Unsupported, - ec2utils.get_os_public_network, context) - self.assertNotEqual(0, len(log.output)) - self.assertNotIn('None', log.output) - - neutron.list_networks.return_value = {'networks': []} - with fixtures.FakeLogger() as log: - self.assertRaises(exception.Unsupported, - ec2utils.get_os_public_network, context) - self.assertNotEqual(0, len(log.output)) - self.assertNotIn('None', log.output) - - def test_get_os_image_mappings(self): - # check virtual device mapping transformation with substitution - properties = { - 'mappings': [ - {'device': '/dev/vda', 'virtual': 'root'}, - {'device': 'vda', 'virtual': 'ami'}, - {'device': 'vdb', 'virtual': 'ephemeral0'}, - {'device': '/dev/vdb', 'virtual': 'swap'}, - {'device': '/dev/vdc', 'virtual': 'swap'}, - {'device': 'vdc', 'virtual': 'ephemeral0'}, - {'device': 'vdd'}, - {'device': '/dev/vdd', 'virtual': None}, - {'device': 'vdd', 'virtual': ''}, - {'device': '/dev/vdd', 'virtual': 'swamp'}, - {'virtual': 'ephemeral2'}, - {'device': None, 'virtual': 'ephemeral3'}, - {'device': '', 'virtual': 'ephemeral4'}, - ], - } - expected = [ - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'guest_format': 'swap', - 'virtual_name': 'swap'}, - {'device_name': '/dev/vdc', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral0'}, - {'device_name': None, - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral2'}, - {'device_name': None, - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral3'}, - {'device_name': '', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral4'}, - ] - result = ec2utils.get_os_image_mappings(properties) - self.assertThat(expected, matchers.ListMatches(result), verbose=True) - - # check legacy block device mapping transformation with substitution - properties = { - 'block_device_mapping': [ - {'device_name': '/dev/vdb', - 'virtual_name': 'ephemeral0'}, - {'device_name': 'vdc', - 'virtual_name': 'swap', - 'snapshot_id': 'fake_snapshot_id_0'}, - {'device_name': '/dev/vda', - 'snapshot_id': 'fake_snapshot_id_1', - 'delete_on_termination': True, - 'volume_size': 100}, - {'snapshot_id': 'fake_snapshot_id_2'}, - {'device_name': '/dev/vdd', - 'virtual_name': 'ephemeral2'}, - {'device_name': 'vdd', - 'volume_id': 'fake_volume_id_3', - 'delete_on_termination': False}, - {'device_name': 'vde', - 'volume_id': 'fake_volume_id_4'}, - {'device_name': '/dev/vde', - 'snapshot_id': 'fake_snapshot_id_4', - 'no_device': True}, - {'snapshot_id': 'fake_snapshot_id_5', - 'volume_id': 'fake_volume_id_5', - 'volume_size': 50}, - ], - } - expected = [ - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral0'}, - {'device_name': 'vdc', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'guest_format': 'swap', - 'virtual_name': 'swap'}, - {'device_name': '/dev/vda', - 'snapshot_id': 'fake_snapshot_id_1', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': True, - 'volume_size': 100}, - {'snapshot_id': 'fake_snapshot_id_2', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'device_name': 'vdd', - 'volume_id': 'fake_volume_id_3', - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'device_name': '/dev/vde', - 'snapshot_id': 'fake_snapshot_id_4', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'no_device': True, - 'delete_on_termination': False}, - {'snapshot_id': 'fake_snapshot_id_5', - 'volume_id': 'fake_volume_id_5', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'volume_size': 50, - 'delete_on_termination': False}, - ] - result = ec2utils.get_os_image_mappings(properties) - self.assertThat(expected, matchers.ListMatches(result), verbose=True) - - # check bdm v2 with substitution - properties = { - 'bdm_v2': True, - 'block_device_mapping': [ - {'device_name': '/dev/vdb', - 'snapshot_id': 'fake_snapshot_id_1', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'volume_size': 20, - 'delete_on_termination': True}, - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 10, - 'delete_on_termination': True}, - {'device_name': '/dev/vdc', - 'snapshot_id': 'fake_snapshot_id_2', - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'device_name': 'vdc', - 'volume_id': 'fake_volume_id_2', - 'source_type': 'volume', - 'destination_type': 'volume'}, - {'device_name': 'vdd', - 'snapshot_id': 'fake_snapshot_id_3', - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'device_name': '/dev/vdd', - 'image_id': 'fake_image_id_1', - 'source_type': 'image', - 'destination_type': 'volume', - 'volume_size': 30}, - ], - } - expected = [ - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 10, - 'delete_on_termination': True}, - {'device_name': 'vdc', - 'volume_id': 'fake_volume_id_2', - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'device_name': '/dev/vdd', - 'image_id': 'fake_image_id_1', - 'source_type': 'image', - 'destination_type': 'volume', - 'volume_size': 30, - 'delete_on_termination': False}, - ] - result = ec2utils.get_os_image_mappings(properties) - self.assertThat(expected, matchers.ListMatches(result), verbose=True) - - # check bdm v2 vs vdm susbtitution - properties = { - 'mappings': [ - {'device': 'vdb', 'virtual': 'ephemeral0'}, - {'device': 'vdc', 'virtual': 'ephemeral1'}, - {'device': 'vdh', 'virtual': 'ephemeral2'}, - ], - 'bdm_v2': True, - 'block_device_mapping': [ - {'device_name': '/dev/vda', - 'snapshot_id': 'fake_snapshot_id_1', - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'device_name': '/dev/vdc', - 'snapshot_id': 'fake_snapshot_id_2', - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'device_name': '/dev/vdd', - 'snapshot_id': 'fake_snapshot_id_3', - 'source_type': 'snapshot', - 'destination_type': 'volume'} - ], - } - expected = [ - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral0'}, - {'device_name': '/dev/vdc', - 'snapshot_id': 'fake_snapshot_id_2', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'device_name': '/dev/vdh', - 'source_type': 'blank', - 'destination_type': 'local', - 'device_type': 'disk', - 'delete_on_termination': True, - 'boot_index': -1, - 'virtual_name': 'ephemeral2'}, - {'device_name': '/dev/vda', - 'snapshot_id': 'fake_snapshot_id_1', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'device_name': '/dev/vdd', - 'snapshot_id': 'fake_snapshot_id_3', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False}, - ] - result = ec2utils.get_os_image_mappings(properties) - self.assertThat(expected, matchers.ListMatches(result), verbose=True) - - # check legacy bdm vs vdm susbtitution - properties = { - 'mappings': [ - {'device': 'vdb', 'virtual': 'ephemeral0'}, - {'device': 'vdc', 'virtual': 'ephemeral1'}, - {'device': 'vdh', 'virtual': 'ephemeral2'}, - ], - 'block_device_mapping': [ - {'device_name': '/dev/vda', - 'snapshot_id': 'fake_snapshot_id_1'}, - {'device_name': '/dev/vdc', - 'snapshot_id': 'fake_snapshot_id_2'}, - {'device_name': '/dev/vdd', - 'snapshot_id': 'fake_snapshot_id_3'} - ], - } - result = ec2utils.get_os_image_mappings(properties) - self.assertThat(expected, matchers.ListMatches(result), verbose=True) - - def test_block_device_strip_dev(self): - self.assertEqual(ec2utils.block_device_strip_dev('/dev/sda'), 'sda') - self.assertEqual(ec2utils.block_device_strip_dev('sda'), 'sda') - - def test_block_device_prepend_dev(self): - mapping = ['/dev/sda', 'sdb', 'sdc', 'sdd', 'sde'] - expected = ['/dev/sda', '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'] - - for m, e in zip(mapping, expected): - prepended = ec2utils.block_device_prepend_dev(m) - self.assertEqual(e, prepended) - - def test_block_device_properties_root_device_name(self): - root_device0 = '/dev/sda' - root_device1 = '/dev/sdb' - mappings = [{'virtual': 'root', - 'device': root_device0}] - - properties0 = {'mappings': mappings} - properties1 = {'mappings': mappings, - 'root_device_name': root_device1} - - self.assertIsNone( - ec2utils.block_device_properties_root_device_name({})) - self.assertEqual( - root_device0, - ec2utils.block_device_properties_root_device_name(properties0)) - self.assertEqual( - root_device1, - ec2utils.block_device_properties_root_device_name(properties1)) diff --git a/ec2api/tests/unit/test_faults.py b/ec2api/tests/unit/test_faults.py deleted file mode 100644 index c078382d..00000000 --- a/ec2api/tests/unit/test_faults.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslotest import base as test_base -import webob - -from ec2api.api import faults -from ec2api import wsgi - - -class FakeResponse(object): - reason = "Test Reason" - - def __init__(self, status_code=400): - self.status_code = status_code - - def json(self): - return {} - - -class TestFaults(test_base.BaseTestCase): - """Tests covering ec2 Fault class.""" - - def test_fault_exception(self): - # Ensure the status_int is set correctly on faults. - fault = faults.Fault(webob.exc.HTTPBadRequest( - explanation='test')) - self.assertIsInstance(fault.wrapped_exc, webob.exc.HTTPBadRequest) - - def test_fault_exception_status_int(self): - # Ensure the status_int is set correctly on faults. - fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test')) - self.assertEqual(fault.wrapped_exc.status_int, 404) - - @mock.patch.object(faults, 'ec2_error_response', - return_value=FakeResponse()) - def test_fault_call(self, mock_request): - # Ensure proper EC2 response on faults. - message = 'test message' - ex = webob.exc.HTTPNotFound(explanation=message) - fault = faults.Fault(ex) - req = wsgi.Request.blank('/test') - req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id" - fault(req) - mock_request.assert_called_with(mock.ANY, 'HTTPNotFound', - message=message, status=ex.status_int) diff --git a/ec2api/tests/unit/test_hacking.py b/ec2api/tests/unit/test_hacking.py deleted file mode 100644 index 3dd79396..00000000 --- a/ec2api/tests/unit/test_hacking.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ec2api.hacking import checks -from ec2api.tests.unit import base - - -class HackingTestCase(base.BaseTestCase): - def test_no_log_translations(self): - for log in checks._all_log_levels: - for hint in checks._all_hints: - bad = 'LOG.%s(%s("Bad"))' % (log, hint) - self.assertEqual( - 1, len(list(checks.no_translate_logs(bad, 'f')))) - # Catch abuses when used with a variable and not a literal - bad = 'LOG.%s(%s(msg))' % (log, hint) - self.assertEqual( - 1, len(list(checks.no_translate_logs(bad, 'f')))) diff --git a/ec2api/tests/unit/test_image.py b/ec2api/tests/unit/test_image.py deleted file mode 100644 index 5885ca45..00000000 --- a/ec2api/tests/unit/test_image.py +++ /dev/null @@ -1,985 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from unittest import mock - -import tempfile - -from cinderclient import exceptions as cinder_exception -import eventlet -from oslo_concurrency import processutils -from oslo_serialization import jsonutils - -from ec2api.api import image as image_api -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - -AMI_MANIFEST_XML = """ - - 2011-06-17 - - test-s3 - 0 - 0 - - - x86_64 - - - ami - sda1 - - - root - /dev/sda1 - - - ephemeral0 - sda2 - - - swap - sda3 - - - %(aki-id)s - %(ari-id)s - - - foo - foo - foo - - - foo - - - - -""" % {'aki-id': fakes.ID_EC2_IMAGE_AKI_1, - 'ari-id': fakes.ID_EC2_IMAGE_ARI_1} - -FILE_MANIFEST_XML = """ - - - foo - foo - foo - - - foo - - - - -""" - - -class ImageTestCase(base.ApiTestCase): - - @mock.patch('ec2api.api.instance._is_ebs_instance') - def _test_create_image(self, instance_status, no_reboot, is_ebs_instance): - self.set_mock_db_items(fakes.DB_INSTANCE_2) - os_instance = mock.MagicMock() - os_instance.configure_mock(id=fakes.ID_OS_INSTANCE_2, - status=instance_status) - stop_called = iter([False, True]) - os_instance.stop.side_effect = lambda: next(stop_called) - os_instance.get.side_effect = lambda: (setattr(os_instance, 'status', - 'SHUTOFF') - if next(stop_called) else None) - image_id = fakes.random_ec2_id('ami') - os_image_id = fakes.random_os_id() - os_instance.create_image.return_value = os_image_id - self.glance.images.get.return_value = fakes.OSImage( - {'id': os_image_id}, - from_get=True) - self.nova.servers.get.return_value = os_instance - is_ebs_instance.return_value = True - self.db_api.add_item.side_effect = tools.get_db_api_add_item(image_id) - - resp = self.execute('CreateImage', - {'InstanceId': fakes.ID_EC2_INSTANCE_2, - 'Name': 'fake_name', - 'Description': 'fake desc', - 'NoReboot': str(no_reboot)}) - self.assertEqual({'imageId': image_id}, - resp) - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_INSTANCE_2) - self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2) - is_ebs_instance.assert_called_once_with(mock.ANY, os_instance.id) - expected_image = {'is_public': False, - 'description': 'fake desc'} - if no_reboot: - expected_image['os_id'] = os_image_id - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'ami', expected_image) - if not no_reboot: - eventlet.sleep() - if not no_reboot: - os_instance.stop.assert_called_once_with() - os_instance.get.assert_called_once_with() - os_instance.start.assert_called_once_with() - if no_reboot: - os_instance.create_image.assert_called_once_with('fake_name') - else: - os_instance.create_image.assert_called_once_with( - 'fake_name', metadata={'ec2_id': image_id}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, {'id': image_id, - 'is_public': False, - 'description': 'fake desc', - 'os_id': os_image_id, - 'vpc_id': None}) - - self.db_api.reset_mock() - self.nova.servers.reset_mock() - - def test_create_image(self): - self._test_create_image('ACTIVE', False) - self._test_create_image('SHUTOFF', True) - - @mock.patch('ec2api.api.instance._is_ebs_instance') - def test_register_image_by_url(self, is_ebs_instance): - self.set_mock_db_items(fakes.DB_INSTANCE_2) - is_ebs_instance.return_value = True - - # Setup the mock parameters - image_id = fakes.random_ec2_id('ami') - os_image_id = fakes.random_os_id() - self.glance.images.create.return_value = fakes.OSImage( - {'id': os_image_id}, - from_get=True) - self.db_api.add_item.side_effect = tools.get_db_api_add_item(image_id) - - # Setup Import Command - import_command = 'RegisterImage' - - # Setup the import arguments - args = { - 'Name': 'TestImage123', - 'ImageLocation': - fakes.LOCATION_IMAGE_2, - 'Architecture': 'x86_64' - } - - # Execute the import image process - resp = self.execute(import_command, args) - - # Assert that the image returned is equal to what was expected - self.assertEqual({'imageId': image_id}, resp) - - # Assert that Glance Image Create was called - self.glance.images.create.assert_called_once_with( - name='TestImage123', - disk_format='raw', - container_format='bare', - visibility='private', - architecture='x86_64', - image_location=fakes.LOCATION_IMAGE_2) - - # Assert that Glance Image Import was called - self.glance.images.image_import.assert_called_once_with( - os_image_id, - method='web-download', - uri=fakes.LOCATION_IMAGE_2) - - # Assert that the image was created - expected_image = {'is_public': False, - 'os_id': mock.ANY, - 'description': None} - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'ami', expected_image) - - # Reset all test settings/state - self.db_api.reset_mock() - self.glance.reset_mock() - - @mock.patch('ec2api.api.instance._is_ebs_instance') - def test_create_image_invalid_parameters(self, is_ebs_instance): - self.set_mock_db_items(fakes.DB_INSTANCE_1) - is_ebs_instance.return_value = False - - self.assert_execution_error('InvalidParameterValue', 'CreateImage', - {'InstanceId': fakes.ID_EC2_INSTANCE_1, - 'Name': 'fake_name'}) - - @mock.patch('ec2api.api.image._s3_create') - def test_register_image_by_s3(self, s3_create): - s3_create.return_value = fakes.OSImage(fakes.OS_IMAGE_1) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_IMAGE_1)) - - resp = self.execute( - 'RegisterImage', - {'ImageLocation': fakes.LOCATION_IMAGE_1}) - self.assertThat(resp, matchers.DictMatches( - {'imageId': fakes.ID_EC2_IMAGE_1})) - - s3_create.assert_called_once_with( - mock.ANY, - {'name': fakes.LOCATION_IMAGE_1, - 'image_location': fakes.LOCATION_IMAGE_1}) - s3_create.reset_mock() - - resp = self.execute( - 'RegisterImage', - {'ImageLocation': fakes.LOCATION_IMAGE_1, - 'Name': 'an image name'}) - self.assertThat(resp, matchers.DictMatches( - {'imageId': fakes.ID_EC2_IMAGE_1})) - - s3_create.assert_called_once_with( - mock.ANY, - {'name': 'an image name', - 'image_location': fakes.LOCATION_IMAGE_1}) - - @mock.patch('ec2api.api.ec2utils.get_os_image') - def test_register_image_by_bdm(self, get_os_image): - self.glance.images.create.return_value = ( - fakes.OSImage(fakes.OS_IMAGE_2)) - self.glance.images.upload.return_value = ( - fakes.OSImage(fakes.OS_IMAGE_2)) - self.cinder.volume_snapshots.get.side_effect = ( - tools.get_by_1st_arg_getter( - {fakes.ID_OS_SNAPSHOT_1: ( - fakes.OSSnapshot(fakes.OS_SNAPSHOT_1))}, - notfound_exception=cinder_exception.NotFound(404))) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_IMAGE_2)) - self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2, - fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1) - get_os_image.side_effect = [fakes.OSImage(fakes.OS_IMAGE_AKI_1), - fakes.OSImage(fakes.OS_IMAGE_ARI_1)] - - resp = self.execute( - 'RegisterImage', - {'RootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2, - 'Name': 'fake_name', - 'KernelId': fakes.ID_EC2_IMAGE_AKI_1, - 'RamdiskId': fakes.ID_EC2_IMAGE_ARI_1, - 'BlockDeviceMapping.1.DeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2, - 'BlockDeviceMapping.1.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_1, - 'BlockDeviceMapping.2.DeviceName': '/dev/vdf', - 'BlockDeviceMapping.2.Ebs.VolumeSize': '100', - 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'False', - 'BlockDeviceMapping.3.DeviceName': '/dev/vdg', - 'BlockDeviceMapping.3.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_1, - 'BlockDeviceMapping.3.Ebs.VolumeSize': '55', - 'BlockDeviceMapping.3.Ebs.DeleteOnTermination': 'True', - 'BlockDeviceMapping.4.DeviceName': '/dev/vdh', - 'BlockDeviceMapping.4.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_2}) - self.assertThat(resp, matchers.DictMatches( - {'imageId': fakes.ID_EC2_IMAGE_2})) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'ami', {'os_id': fakes.ID_OS_IMAGE_2, - 'is_public': False, - 'description': None}) - self.assertEqual(1, self.glance.images.create.call_count) - self.assertEqual((), self.glance.images.create.call_args[0]) - self.assertIsInstance( - self.glance.images.create.call_args[1], dict) - bdm = self.glance.images.create.call_args[1].pop( - 'block_device_mapping', 'null') - self.assertEqual( - {'visibility': 'private', - 'name': 'fake_name', - 'kernel_id': fakes.ID_OS_IMAGE_AKI_1, - 'ramdisk_id': fakes.ID_OS_IMAGE_ARI_1, - 'root_device_name': fakes.ROOT_DEVICE_NAME_IMAGE_2, - 'container_format': 'bare', - 'disk_format': 'raw', - 'bdm_v2': 'True'}, - self.glance.images.create.call_args[1]) - self.assertEqual([{'boot_index': 0, - 'delete_on_termination': True, - 'destination_type': 'volume', - 'device_name': fakes.ROOT_DEVICE_NAME_IMAGE_2, - 'source_type': 'snapshot', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'volume_size': 1}, - {'boot_index': -1, - 'delete_on_termination': False, - 'destination_type': 'volume', - 'device_name': '/dev/vdf', - 'source_type': 'blank', - 'volume_size': 100}, - {'boot_index': -1, - 'delete_on_termination': True, - 'destination_type': 'volume', - 'device_name': '/dev/vdg', - 'source_type': 'snapshot', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'volume_size': 55}, - {'boot_index': -1, - 'delete_on_termination': True, - 'destination_type': 'volume', - 'device_name': '/dev/vdh', - 'source_type': 'snapshot', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_2}], - jsonutils.loads(bdm)) - get_os_image.assert_has_calls( - [mock.call(mock.ANY, fakes.ID_EC2_IMAGE_AKI_1), - mock.call(mock.ANY, fakes.ID_EC2_IMAGE_ARI_1)]) - self.cinder.volume_snapshots.get.assert_any_call( - fakes.ID_OS_SNAPSHOT_1) - - def test_register_image_invalid_parameters(self): - self.assert_execution_error( - 'InvalidParameterCombination', 'RegisterImage', {}) - - def test_deregister_image(self): - self._setup_model() - - # normal flow - resp = self.execute('DeregisterImage', - {'ImageId': fakes.ID_EC2_IMAGE_1}) - self.assertThat(resp, matchers.DictMatches({'return': True})) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_IMAGE_1) - self.glance.images.delete.assert_called_once_with( - fakes.ID_OS_IMAGE_1) - - # deregister image which failed on asynchronously creation - self.glance.reset_mock() - image_id = fakes.random_ec2_id('ami') - self.add_mock_db_items({'id': image_id, - 'os_id': None, - 'state': 'failed'}) - resp = self.execute('DeregisterImage', - {'ImageId': image_id}) - self.assertThat(resp, matchers.DictMatches({'return': True})) - self.db_api.delete_item.assert_called_with(mock.ANY, image_id) - self.assertFalse(self.glance.images.delete.called) - - def test_deregister_image_invalid_parameters(self): - self._setup_model() - - self.assert_execution_error('InvalidAMIID.NotFound', 'DeregisterImage', - {'ImageId': fakes.random_ec2_id('ami')}) - - # deregister asynchronously creating image - image_id = fakes.random_ec2_id('ami') - self.add_mock_db_items({'id': image_id, - 'os_id': None}) - self.assert_execution_error('IncorrectState', - 'DeregisterImage', - {'ImageId': image_id}) - - def test_describe_images(self): - self._setup_model() - - resp = self.execute('DescribeImages', {}) - self.assertThat( - resp, - matchers.DictMatches( - {'imagesSet': [fakes.EC2_IMAGE_1, fakes.EC2_IMAGE_2]}, - orderless_lists=True), - verbose=True) - - self.db_api.get_items.assert_any_call(mock.ANY, 'ami') - self.db_api.get_items.assert_any_call(mock.ANY, 'aki') - self.db_api.get_items.assert_any_call(mock.ANY, 'ari') - - self.db_api.get_items_by_ids = tools.CopyingMock( - side_effect=self.db_api.get_items_by_ids.side_effect) - - resp = self.execute('DescribeImages', - {'ImageId.1': fakes.ID_EC2_IMAGE_1}) - self.assertThat(resp, - matchers.DictMatches( - {'imagesSet': [fakes.EC2_IMAGE_1]}, - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_any_call( - mock.ANY, set([fakes.ID_EC2_IMAGE_1])) - - self.check_filtering( - 'DescribeImages', 'imagesSet', - [('architecture', 'x86_64'), - ('block-device-mapping.device-name', '/dev/sdb2'), - ('block-device-mapping.snapshot-id', fakes.ID_EC2_SNAPSHOT_1), - ('block-device-mapping.volume-size', 22), - ('description', 'fake desc'), - ('image-id', fakes.ID_EC2_IMAGE_1), - ('image-type', 'machine'), - ('is-public', True), - ('kernel_id', fakes.ID_EC2_IMAGE_AKI_1,), - ('name', 'fake_name'), - ('owner-id', fakes.ID_OS_PROJECT), - ('ramdisk-id', fakes.ID_EC2_IMAGE_ARI_1), - ('root-device-name', fakes.ROOT_DEVICE_NAME_IMAGE_1), - ('root-device-type', 'instance-store'), - ('state', 'available')]) - self.check_tag_support( - 'DescribeImages', 'imagesSet', - fakes.ID_EC2_IMAGE_1, 'imageId', - ('ami', 'ari', 'aki')) - - def test_describe_images_invalid_parameters(self): - self._setup_model() - - self.assert_execution_error('InvalidAMIID.NotFound', 'DescribeImages', - {'ImageId.1': fakes.random_ec2_id('ami')}) - - self.glance.images.list.side_effect = lambda: [] - - self.assert_execution_error('InvalidAMIID.NotFound', 'DescribeImages', - {'ImageId.1': fakes.ID_EC2_IMAGE_1}) - - def test_describe_image_attributes(self): - self._setup_model() - - def do_check(attr, ec2_image_id, response): - resp = self.execute('DescribeImageAttribute', - {'ImageId': ec2_image_id, - 'Attribute': attr}) - response['imageId'] = ec2_image_id - self.assertThat(resp, - matchers.DictMatches(response, - orderless_lists=True), - verbose=True) - - do_check('launchPermission', - fakes.ID_EC2_IMAGE_2, - {'launchPermission': [{'group': 'all'}]}) - - do_check('kernel', - fakes.ID_EC2_IMAGE_1, - {'kernel': {'value': fakes.ID_EC2_IMAGE_AKI_1}}) - - do_check('ramdisk', - fakes.ID_EC2_IMAGE_1, - {'ramdisk': {'value': fakes.ID_EC2_IMAGE_ARI_1}}) - - do_check('rootDeviceName', - fakes.ID_EC2_IMAGE_1, - {'rootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_1}) - - do_check('rootDeviceName', - fakes.ID_EC2_IMAGE_2, - {'rootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2}) - - do_check('blockDeviceMapping', - fakes.ID_EC2_IMAGE_1, - {'blockDeviceMapping': ( - fakes.EC2_IMAGE_1['blockDeviceMapping'])}) - - do_check('blockDeviceMapping', - fakes.ID_EC2_IMAGE_2, - {'blockDeviceMapping': ( - fakes.EC2_IMAGE_2['blockDeviceMapping'])}) - - def test_describe_image_attributes_invalid_parameters(self): - image_id = fakes.random_ec2_id('ami') - self.set_mock_db_items({'id': image_id, - 'os_id': None}) - self.assert_execution_error('IncorrectState', - 'DescribeImageAttribute', - {'ImageId': image_id, - 'Attribute': 'kernel'}) - - def test_modify_image_attributes(self): - self._setup_model() - - resp = self.execute('ModifyImageAttribute', - {'imageId': fakes.ID_EC2_IMAGE_1, - 'attribute': 'launchPermission', - 'operationType': 'add', - 'userGroup.1': 'all'}) - self.assertThat(resp, matchers.DictMatches({'return': True})) - self.glance.images.update.assert_called_once_with( - fakes.ID_OS_IMAGE_1, visibility='public') - - def test_modify_image_attributes_invalid_parameters(self): - image_id = fakes.random_ec2_id('ami') - self.set_mock_db_items({'id': image_id, - 'os_id': None}) - self.assert_execution_error('IncorrectState', - 'ModifyImageAttribute', - {'ImageId': image_id, - 'Attribute': 'kernel'}) - - def _setup_model(self): - self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_IMAGE_2, - fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2, - fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1, - fakes.DB_VOLUME_1, fakes. DB_VOLUME_2) - self.db_api.get_public_items.return_value = [] - - # NOTE(ft): glance.image.list returns an iterator, not just a list - self.glance.images.list.side_effect = ( - lambda: (fakes.OSImage(i) - for i in (fakes.OS_IMAGE_1, fakes.OS_IMAGE_2))) - self.glance.images.get.side_effect = ( - lambda os_id: (fakes.OSImage(fakes.OS_IMAGE_1, from_get=True) - if os_id == fakes.ID_OS_IMAGE_1 else - fakes.OSImage(fakes.OS_IMAGE_2, from_get=True) - if os_id == fakes.ID_OS_IMAGE_2 else - None)) - - -class ImagePrivateTestCase(base.BaseTestCase): - - def test_format_image(self): - image_ids = {fakes.ID_OS_IMAGE_1: fakes.ID_EC2_IMAGE_1, - fakes.ID_OS_IMAGE_AKI_1: fakes.ID_EC2_IMAGE_AKI_1, - fakes.ID_OS_IMAGE_ARI_1: fakes.ID_EC2_IMAGE_ARI_1} - os_image = {'id': fakes.ID_OS_IMAGE_1, - 'owner': fakes.ID_OS_PROJECT, - 'created_at': fakes.TIME_CREATE_IMAGE, - 'visibility': 'private', - 'status': 'active', - 'container_format': 'ami', - 'name': 'fake_name'} - - # check name and location attributes for an unnamed image - os_image['image_location'] = 'location' - os_image['name'] = None - - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image), - None, image_ids) - - self.assertEqual('location', image['imageLocation']) - self.assertEqual('location', image['name']) - - # check name and location attributes for complete image - os_image['image_location'] = None - os_image['name'] = 'fake_name' - - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image), - None, image_ids) - - self.assertEqual('None (fake_name)', image['imageLocation']) - self.assertEqual('fake_name', image['name']) - - # check ebs image type for bdm_v2 mapping type - os_image['bdm_v2'] = True - os_image['root_device_name'] = '/dev/vda' - os_image['block_device_mapping'] = [ - {'boot_index': 0, - 'snapshot_id': fakes.ID_OS_SNAPSHOT_2, - 'source_type': 'snapshot', - 'destination_type': 'volume'}] - - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image), - None, image_ids, - snapshot_ids={fakes.ID_OS_SNAPSHOT_2: fakes.ID_EC2_SNAPSHOT_2}) - - self.assertEqual('ebs', image['rootDeviceType']) - - # check instance-store image attributes with no any device mappings - os_image['bdm_v2'] = False - os_image['root_device_name'] = '/dev/vda' - os_image['block_device_mapping'] = [] - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image), - None, None) - - self.assertEqual('instance-store', image['rootDeviceType']) - self.assertNotIn('blockDeviceMapping', image) - - # check Glance status translation - os_image = fakes.OSImage({'id': fakes.ID_OS_IMAGE_1}) - - def check_status_translation(status, expected): - os_image.status = status - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, os_image, None, None) - self.assertEqual(expected, image['imageState'], - "Wrong '%s' Glance status translation" % status) - check_status_translation('queued', 'pending') - check_status_translation('saving', 'pending') - check_status_translation('active', 'available') - check_status_translation('killed', 'deregistered') - check_status_translation('pending_delete', 'deregistered') - check_status_translation('deleted', 'deregistered') - check_status_translation('deactivated', 'invalid') - check_status_translation('unknown-status', 'error') - - # check internal state translation - os_image.status = 'queued' - - def check_state_translation(state, expected): - os_image.image_state = state - image = image_api._format_image( - 'fake_context', fakes.DB_IMAGE_1, os_image, None, None) - self.assertEqual(expected, image['imageState'], - "Wrong '%s' internal state translation" % state) - - for state in ('downloading', 'decrypting', 'untarring', 'uploading'): - check_state_translation(state, 'pending') - for state in ('failed_download', 'failed_decrypt', 'failed_untar', - 'failed_upload'): - check_state_translation(state, 'failed') - os_image.status = 'active' - check_state_translation('available', 'available') - check_state_translation('unknown-state', 'available') - - def test_format_mappings(self): - db_api = self.mock_db() - # check virtual mapping formatting - properties = { - 'mappings': [ - {'virtual': 'ami', 'device': '/dev/sda'}, - {'virtual': 'root', 'device': 'sda'}, - {'virtual': 'ephemeral0', 'device': 'sdb'}, - {'virtual': 'swap', 'device': 'sdc'}, - {'virtual': 'ephemeral1', 'device': 'sdd'}, - {'virtual': 'ephemeral2', 'device': 'sde'}, - {'virtual': 'ephemeral', 'device': 'sdf'}, - {'virtual': '/dev/sdf1', 'device': 'root'}], - } - expected = [ - {'virtualName': 'ephemeral0', 'deviceName': '/dev/sdb'}, - {'virtualName': 'swap', 'deviceName': '/dev/sdc'}, - {'virtualName': 'ephemeral1', 'deviceName': '/dev/sdd'}, - {'virtualName': 'ephemeral2', 'deviceName': '/dev/sde'}, - ] - - result = image_api._format_mappings('fake_context', properties) - self.assertEqual(expected, result) - - # check bdm v2 formatting - db_api.set_mock_items(fakes.DB_IMAGE_2, fakes.DB_VOLUME_3) - properties = { - 'bdm_v2': True, - 'block_device_mapping': [ - {'boot_index': 0, - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'boot_index': None, - 'snapshot_id': fakes.ID_OS_SNAPSHOT_2, - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'device_name': 'vdi', - 'boot_index': -1, - 'image_id': fakes.ID_OS_IMAGE_2, - 'source_type': 'image', - 'destination_type': 'volume', - 'volume_size': 20}, - {'device_name': 'vdv', - 'boot_index': -1, - 'volume_id': fakes.ID_OS_VOLUME_3, - 'source_type': 'volume', - 'destination_type': 'volume'}, - {'device_name': 'vdb', - 'boot_index': -1, - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 100, - 'delete_on_termination': True}, - ], - } - expected = [ - {'deviceName': 'vdx', - 'ebs': {'snapshotId': fakes.ID_EC2_SNAPSHOT_1, - 'deleteOnTermination': False}}, - {'ebs': {'snapshotId': fakes.ID_EC2_SNAPSHOT_2, - 'deleteOnTermination': False}}, - {'deviceName': 'vdi', - 'ebs': {'snapshotId': fakes.ID_EC2_IMAGE_2, - 'volumeSize': 20, - 'deleteOnTermination': False}}, - {'deviceName': 'vdv', - 'ebs': {'snapshotId': fakes.ID_EC2_VOLUME_3, - 'deleteOnTermination': False}}, - {'deviceName': 'vdb', - 'ebs': {'volumeSize': 100, - 'deleteOnTermination': True}}, - ] - result = image_api._format_mappings( - 'fake_context', properties, root_device_name='vdx', - snapshot_ids={fakes.ID_OS_SNAPSHOT_1: fakes.ID_EC2_SNAPSHOT_1, - fakes.ID_OS_SNAPSHOT_2: fakes.ID_EC2_SNAPSHOT_2}) - self.assertEqual(expected, result) - - # check inheritance and generation of virtual name - properties = { - 'mappings': [ - {'device': 'vdd', 'virtual': 'ephemeral1'}, - ], - 'bdm_v2': True, - 'block_device_mapping': [ - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'local', - 'guest_format': 'swap'}, - {'device_name': 'vdc', - 'source_type': 'blank', - 'destination_type': 'local', - 'volume_size': 5}, - {'device_name': 'vde', - 'source_type': 'blank', - 'destination_type': 'local'}, - ], - } - expected = [ - {'deviceName': '/dev/vdd', 'virtualName': 'ephemeral1'}, - {'deviceName': '/dev/vdb', 'virtualName': 'swap'}, - {'deviceName': 'vdc', 'virtualName': 'ephemeral0'}, - {'deviceName': 'vde', 'virtualName': 'ephemeral2'}, - ] - result = image_api._format_mappings('fake_context', properties) - self.assertEqual(expected, result) - - def test_get_db_items(self): - describer = image_api.ImageDescriber() - describer.context = base.create_context() - - # NOTE(ft): the first requested image appears is user owend and public, - # the second is absent - db_api = self.mock_db() - db_api.set_mock_items(fakes.DB_IMAGE_1) - - describer.ids = set([fakes.ID_EC2_IMAGE_1, fakes.ID_EC2_IMAGE_2]) - self.assertRaises(exception.InvalidAMIIDNotFound, - describer.get_db_items) - - def test_describe_images_being_created(self): - db_api = self.mock_db() - glance = self.mock_glance() - context = base.create_context() - image_id = fakes.random_ec2_id('ami') - image = {'id': image_id, - 'os_id': None, - 'is_public': False, - 'description': 'fake desc'} - db_api.set_mock_items(image) - db_api.get_public_items.return_value = [] - - # describe cases when no glance image exists - glance.images.list.return_value = [] - expected = {'imagesSet': [{'imageId': image_id, - 'description': 'fake desc', - 'imageOwnerId': fakes.ID_OS_PROJECT, - 'imageState': 'pending', - 'imageType': 'machine', - 'isPublic': False}]} - - # describe all images - result = image_api.describe_images(context) - self.assertEqual(expected, result) - - # describe the image - result = image_api.describe_images(context, image_id=[image_id]) - self.assertEqual(expected, result) - - # describe with filter - result = image_api.describe_images( - context, filter=[{'name': 'name', 'value': 'noname'}]) - self.assertEqual({'imagesSet': []}, result) - - # describe failed image - image['state'] = 'failed' - expected['imagesSet'][0]['imageState'] = 'failed' - result = image_api.describe_images(base.create_context()) - self.assertEqual(expected, result) - - # describe cases when glance image exists, db item is yet not updated - del image['state'] - os_image_id = fakes.random_os_id() - os_image = {'id': os_image_id, - 'owner': fakes.ID_OS_PROJECT, - 'status': 'active', - 'visibility': 'private', - 'ec2_id': image_id} - glance.images.list.return_value = [fakes.OSImage(os_image)] - expected['imagesSet'] = [{ - 'architecture': None, - 'creationDate': None, - 'description': 'fake desc', - 'imageId': image_id, - 'imageLocation': 'None (None)', - 'imageOwnerId': fakes.ID_OS_PROJECT, - 'imageState': 'available', - 'imageType': 'machine', - 'isPublic': False, - 'name': None, - 'rootDeviceType': 'instance-store'}] - - # describe all images - result = image_api.describe_images(context) - self.assertEqual(expected, result) - db_api.update_item.assert_called_once_with( - context, tools.update_dict(image, {'os_id': os_image_id})) - - # describe the image - db_api.reset_mock() - result = image_api.describe_images(context, image_id=[image_id]) - self.assertEqual(expected, result) - db_api.update_item.assert_called_once_with( - context, tools.update_dict(image, {'os_id': os_image_id})) - - -class S3TestCase(base.BaseTestCase): - - def test_s3_parse_manifest(self): - db_api = self.mock_db() - glance = self.mock_glance() - db_api.set_mock_items(fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1) - glance.images.get.side_effect = ( - tools.get_by_1st_arg_getter({ - fakes.ID_OS_IMAGE_AKI_1: fakes.OSImage(fakes.OS_IMAGE_AKI_1), - fakes.ID_OS_IMAGE_ARI_1: fakes.OSImage(fakes.OS_IMAGE_ARI_1)})) - - metadata, image_parts, key, iv = image_api._s3_parse_manifest( - base.create_context(), AMI_MANIFEST_XML) - - expected_metadata = { - 'disk_format': 'ami', - 'container_format': 'ami', - 'architecture': 'x86_64', - 'kernel_id': fakes.ID_OS_IMAGE_AKI_1, - 'ramdisk_id': fakes.ID_OS_IMAGE_ARI_1, - 'mappings': [ - {"device": "sda1", "virtual": "ami"}, - {"device": "/dev/sda1", "virtual": "root"}, - {"device": "sda2", "virtual": "ephemeral0"}, - {"device": "sda3", "virtual": "swap"}]} - self.assertThat(metadata, - matchers.DictMatches(expected_metadata, - orderless_lists=True)) - self.assertThat(image_parts, - matchers.ListMatches(['foo'])) - self.assertEqual('foo', key) - self.assertEqual('foo', iv) - db_api.get_items_ids.assert_any_call( - mock.ANY, 'aki', item_ids=(fakes.ID_EC2_IMAGE_AKI_1,), - item_os_ids=None) - db_api.get_items_ids.assert_any_call( - mock.ANY, 'ari', item_ids=(fakes.ID_EC2_IMAGE_ARI_1,), - item_os_ids=None) - - def test_s3_create_image_locations(self): - self.configure(image_decryption_dir=None) - glance = self.mock_glance() - _handle, tempf = tempfile.mkstemp() - fake_context = base.create_context() - - @mock.patch('ec2api.api.image._s3_untarzip_image') - @mock.patch('ec2api.api.image._s3_decrypt_image') - @mock.patch('ec2api.api.image._s3_download_file') - @mock.patch('ec2api.api.image._s3_conn') - def do_test(s3_conn, s3_download_file, s3_decrypt_image, - s3_untarzip_image): - (s3_conn.return_value. - get_object.return_value) = {'Body': FILE_MANIFEST_XML} - s3_download_file.return_value = tempf - s3_untarzip_image.return_value = tempf - os_image_id = fakes.random_os_id() - (glance.images.create.return_value) = ( - fakes.OSImage({'id': os_image_id, - 'status': 'queued'})) - - data = [ - ({'image_location': 'testbucket_1/test.img.manifest.xml'}, - 'testbucket_1', 'test.img.manifest.xml'), - ({'image_location': '/testbucket_2/test.img.manifest.xml'}, - 'testbucket_2', 'test.img.manifest.xml')] - for mdata, bucket, manifest in data: - image = image_api._s3_create(fake_context, mdata) - eventlet.sleep() - self.glance.images.update.assert_called_with( - os_image_id, image_state='available') - self.glance.images.upload.assert_any_call( - os_image_id, mock.ANY) - s3_conn.return_value.get_object.assert_called_with( - Bucket=bucket, Key=manifest) - s3_download_file.assert_called_with( - mock.ANY, bucket, 'foo', mock.ANY) - s3_decrypt_image.assert_called_with( - fake_context, mock.ANY, 'foo', 'foo', mock.ANY) - s3_untarzip_image.assert_called_with(mock.ANY, mock.ANY) - - do_test() - - @mock.patch('ec2api.api.image.eventlet.spawn_n') - def test_s3_create_bdm(self, spawn_n): - glance = self.mock_glance() - metadata = {'image_location': 'fake_bucket/fake_manifest', - 'root_device_name': '/dev/sda1', - 'block_device_mapping': [ - {'device_name': '/dev/sda1', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'delete_on_termination': True}, - {'device_name': '/dev/sda2', - 'virtual_name': 'ephemeral0'}, - {'device_name': '/dev/sdb0', - 'no_device': True}]} - fake_context = base.create_context() - with mock.patch('ec2api.api.image._s3_conn') as s3_conn: - - (s3_conn.return_value. - get_object.return_value) = {'Body': FILE_MANIFEST_XML} - - image_api._s3_create(fake_context, metadata) - - glance.images.create.assert_called_once_with( - disk_format='ami', container_format='ami', - visibility='private', architecture='x86_64', - image_state='pending', root_device_name='/dev/sda1', - block_device_mapping=[{'device_name': '/dev/sda1', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'delete_on_termination': True}, - {'device_name': '/dev/sda2', - 'virtual_name': 'ephemeral0'}, - {'device_name': '/dev/sdb0', - 'no_device': True}], - image_location='fake_bucket/fake_manifest') - - def test_s3_malicious_tarballs(self): - self.assertRaises( - exception.EC2InvalidException, - image_api._s3_test_for_malicious_tarball, - "/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz')) - self.assertRaises( - exception.EC2InvalidException, - image_api._s3_test_for_malicious_tarball, - "/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz')) - - def test_decrypt_text(self): - public_key = os.path.join(os.path.dirname(__file__), 'test_cert.pem') - private_key = os.path.join(os.path.dirname(__file__), - 'test_private_key.pem') - subject = "/C=RU/ST=Moscow/L=Moscow/O=Progmatic/CN=RootCA" - certificate_file = processutils.execute('openssl', - 'req', '-x509', '-new', - '-key', private_key, - '-days', '365', - '-out', public_key, - '-subj', subject) - text = "some @#!%^* test text" - process_input = text.encode("ascii") - enc, _err = processutils.execute('openssl', - 'rsautl', - '-certin', - '-encrypt', - '-inkey', public_key, - process_input=process_input, - binary=True) - self.assertRaises(exception.EC2Exception, image_api._decrypt_text, enc) - self.configure(x509_root_private_key=private_key) - dec = image_api._decrypt_text(enc) - self.assertIsInstance(dec, bytes) - dec = dec.decode('ascii') - self.assertEqual(text, dec) diff --git a/ec2api/tests/unit/test_instance.py b/ec2api/tests/unit/test_instance.py deleted file mode 100644 index b9393a29..00000000 --- a/ec2api/tests/unit/test_instance.py +++ /dev/null @@ -1,2137 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import copy -import datetime -import itertools -import random -from unittest import mock - -from novaclient import exceptions as nova_exception - -from ec2api.api import instance as instance_api -import ec2api.clients -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class InstanceTestCase(base.ApiTestCase): - - def setUp(self): - super(InstanceTestCase, self).setUp() - self.network_interface_api = self.mock( - 'ec2api.api.instance.network_interface_api') - self.address_api = self.mock('ec2api.api.address') - self.security_group_api = self.mock( - 'ec2api.api.instance.security_group_api') - self.utils_generate_uid = self.mock( - 'ec2api.api.instance._utils_generate_uid') - - self.fake_flavor = mock.Mock() - self.fake_flavor.configure_mock(name='fake_flavor', - id='fakeFlavorId') - self.nova.flavors.get.return_value = self.fake_flavor - self.nova.flavors.list.return_value = [self.fake_flavor] - - @mock.patch('ec2api.api.instance.describe_instances') - @mock.patch('ec2api.api.instance.InstanceEngineNeutron.' - 'get_vpc_default_security_group_id') - def test_run_instances(self, get_vpc_default_security_group_id, - describe_instances): - """Run instance with various network interface settings.""" - self.set_mock_db_items( - fakes.DB_SUBNET_1, fakes.DB_NETWORK_INTERFACE_1, fakes.DB_IMAGE_1) - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1) - self.network_interface_api.create_network_interface.return_value = ( - {'networkInterface': fakes.EC2_NETWORK_INTERFACE_1}) - - self.db_api.add_item.return_value = fakes.DB_INSTANCE_1 - self.nova.servers.create.return_value = ( - fakes.OSInstance({ - 'id': fakes.ID_OS_INSTANCE_1, - 'flavor': {'id': 'fakeFlavorId'}, - 'image': {'id': fakes.ID_OS_IMAGE_1}})) - self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1 - - get_vpc_default_security_group_id.return_value = None - - def do_check(params, create_network_interface_kwargs=None, - delete_on_termination=None): - delete_port_on_termination = ( - create_network_interface_kwargs is not None - if delete_on_termination is None - else delete_on_termination) - eni = fakes.gen_ec2_network_interface( - fakes.ID_EC2_NETWORK_INTERFACE_1, - fakes.EC2_SUBNET_1, - [fakes.IP_NETWORK_INTERFACE_1], - description=fakes.DESCRIPTION_NETWORK_INTERFACE_1, - ec2_instance_id=fakes.ID_EC2_INSTANCE_1, - device_index=0, - delete_on_termination=delete_port_on_termination) - expected_reservation = fakes.gen_ec2_reservation( - fakes.ID_EC2_RESERVATION_1, - [tools.patch_dict( - fakes.gen_ec2_instance( - fakes.ID_EC2_INSTANCE_1, - private_ip_address=fakes.IP_NETWORK_INTERFACE_1, - ec2_network_interfaces=[eni], - image_id=fakes.ID_EC2_IMAGE_1, - reservation_id=fakes.ID_EC2_RESERVATION_1), - {'privateDnsName': None}, - ['rootDeviceType', 'rootDeviceName'])]) - describe_instances.return_value = { - 'reservationSet': [expected_reservation]} - - params.update({'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'MinCount': '1', 'MaxCount': '1'}) - resp = self.execute('RunInstances', params) - - self.assertThat(resp, matchers.DictMatches(expected_reservation)) - if create_network_interface_kwargs is not None: - (self.network_interface_api. - create_network_interface.assert_called_once_with( - mock.ANY, fakes.ID_EC2_SUBNET_1, - **create_network_interface_kwargs)) - self.nova.servers.create.assert_called_once_with( - fakes.EC2_INSTANCE_1['privateDnsName'], - fakes.ID_OS_IMAGE_1, self.fake_flavor, - min_count=1, max_count=1, - availability_zone=None, - block_device_mapping_v2=[], - security_groups=None, - nics=[{'port-id': fakes.ID_OS_PORT_1}], - key_name=None, userdata=None) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'i', tools.purge_dict(fakes.DB_INSTANCE_1, ('id',))) - (self.network_interface_api. - _attach_network_interface_item.assert_called_once_with( - mock.ANY, fakes.DB_NETWORK_INTERFACE_1, - fakes.ID_EC2_INSTANCE_1, 0, - delete_on_termination=delete_port_on_termination)) - describe_instances.assert_called_once_with( - mock.ANY, [fakes.ID_EC2_INSTANCE_1]) - - self.network_interface_api.reset_mock() - self.nova.servers.reset_mock() - self.db_api.reset_mock() - describe_instances.reset_mock() - - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1}, - create_network_interface_kwargs={}) - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1, - 'SecurityGroupId.1': fakes.ID_EC2_SECURITY_GROUP_1, - 'SecurityGroupId.2': fakes.ID_EC2_SECURITY_GROUP_2}, - create_network_interface_kwargs={ - 'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1, - fakes.ID_EC2_SECURITY_GROUP_2]}) - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddress': fakes.IP_FIRST_SUBNET_1}, - create_network_interface_kwargs={ - 'private_ip_address': fakes.IP_FIRST_SUBNET_1}) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1, - 'NetworkInterface.1.SecurityGroupId.1': ( - fakes.ID_EC2_SECURITY_GROUP_1), - 'NetworkInterface.1.PrivateIpAddress.1': ( - fakes.IP_FIRST_SUBNET_1)}, - create_network_interface_kwargs={ - 'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1], - 'private_ip_address': [fakes.IP_FIRST_SUBNET_1]}) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1, - 'NetworkInterface.1.DeleteOnTermination': 'False'}, - create_network_interface_kwargs={}, - delete_on_termination=False) - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1, - 'NetworkInterface.1.SecurityGroupId.1': ( - fakes.ID_EC2_SECURITY_GROUP_1), - 'NetworkInterface.1.DeleteOnTermination': 'False'}, - create_network_interface_kwargs={ - 'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1]}, - delete_on_termination=False) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.NetworkInterfaceId': ( - fakes.ID_EC2_NETWORK_INTERFACE_1)}) - - @mock.patch('ec2api.api.instance.describe_instances') - @mock.patch('ec2api.api.instance.InstanceEngineNeutron.' - 'get_vpc_default_security_group_id') - def test_run_instances_multiple_networks(self, - get_vpc_default_security_group_id, - describe_instances): - """Run 2 instances at once on 2 subnets in all combinations.""" - self._build_multiple_data_model() - - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1) - get_vpc_default_security_group_id.return_value = None - - ec2_instances = [ - tools.patch_dict( - fakes.gen_ec2_instance( - ec2_instance_id, launch_index=l_i, - ec2_network_interfaces=eni_pair, - reservation_id=fakes.ID_EC2_RESERVATION_1), - {'privateDnsName': None}, - ['rootDeviceType', 'rootDeviceName']) - for l_i, (ec2_instance_id, eni_pair) in enumerate(zip( - self.IDS_EC2_INSTANCE, - zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))] - ec2_reservation = fakes.gen_ec2_reservation(fakes.ID_EC2_RESERVATION_1, - ec2_instances) - describe_instances.return_value = {'reservationSet': [ec2_reservation]} - - self.set_mock_db_items( - fakes.DB_IMAGE_1, fakes.DB_SUBNET_1, fakes.DB_SUBNET_2, - *self.DB_DETACHED_ENIS) - self.network_interface_api.create_network_interface.side_effect = ( - [{'networkInterface': eni} - for eni in self.EC2_DETACHED_ENIS]) - self.nova.servers.create.side_effect = [ - fakes.OSInstance({ - 'id': os_instance_id, - 'flavor': {'id': 'fakeFlavorId'}}) - for os_instance_id in self.IDS_OS_INSTANCE] - self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1 - self.db_api.add_item.side_effect = self.DB_INSTANCES - - resp = self.execute( - 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'MinCount': '2', - 'MaxCount': '2', - 'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1, - 'NetworkInterface.2.DeviceIndex': '1', - 'NetworkInterface.2.SubnetId': fakes.ID_EC2_SUBNET_2, - 'NetworkInterface.2.DeleteOnTermination': 'False'}) - - self.assertThat(resp, matchers.DictMatches(ec2_reservation), - verbose=True) - - self.network_interface_api.create_network_interface.assert_has_calls([ - mock.call(mock.ANY, ec2_subnet_id) - for ec2_subnet_id in self.IDS_EC2_SUBNET_BY_PORT]) - self.nova.servers.create.assert_has_calls([ - mock.call( - '%s-%s' % (fakes.ID_EC2_RESERVATION_1, launch_index), - fakes.ID_OS_IMAGE_1, self.fake_flavor, - min_count=1, max_count=1, - availability_zone=None, - block_device_mapping_v2=[], - security_groups=None, - nics=[{'port-id': port_id} - for port_id in port_ids], - key_name=None, userdata=None) - for launch_index, port_ids in enumerate( - zip(*[iter(self.IDS_OS_PORT)] * 2))]) - (self.network_interface_api. - _attach_network_interface_item.assert_has_calls([ - mock.call(mock.ANY, eni, ec2_instance_id, dev_ind, - delete_on_termination=dot) - for eni, ec2_instance_id, dev_ind, dot in zip( - self.DB_DETACHED_ENIS, - itertools.chain(*map(lambda i: [i] * 2, - self.IDS_EC2_INSTANCE)), - [0, 1] * 2, - [True, False, True, False])])) - self.db_api.add_item.assert_has_calls([ - mock.call(mock.ANY, 'i', tools.purge_dict(db_instance, ['id'])) - for db_instance in self.DB_INSTANCES]) - - @mock.patch('ec2api.api.instance._parse_block_device_mapping') - @mock.patch('ec2api.api.instance.describe_instances') - @mock.patch('ec2api.api.instance.InstanceEngineNeutron.' - 'get_ec2_classic_os_network') - def test_run_instances_other_parameters(self, get_ec2_classic_os_network, - describe_instances, - parse_block_device_mapping): - self.set_mock_db_items(fakes.DB_IMAGE_1) - self.glance.images.get.side_effect = ( - tools.get_by_1st_arg_getter({ - fakes.ID_OS_IMAGE_1: fakes.OSImage(fakes.OS_IMAGE_1)})) - get_ec2_classic_os_network.return_value = {'id': fakes.random_os_id()} - user_data = base64.b64decode(fakes.USER_DATA_INSTANCE_2) - parse_block_device_mapping.return_value = [] - - def do_check(extra_kwargs={}, extra_db_instance={}): - describe_instances.side_effect = [ - {'reservationSet': []}, - {'reservationSet': [{'foo': 'bar'}]}] - - self.execute( - 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'MinCount': '1', 'MaxCount': '1', - 'SecurityGroup.1': 'default', - 'Placement.AvailabilityZone': 'fake_zone', - 'ClientToken': 'fake_client_token', - 'BlockDeviceMapping.1.DeviceName': '/dev/vdd', - 'BlockDeviceMapping.1.Ebs.SnapshotId': ( - fakes.ID_EC2_SNAPSHOT_1), - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'False', - 'UserData': fakes.USER_DATA_INSTANCE_2}) - - self.nova.servers.create.assert_called_once_with( - mock.ANY, mock.ANY, mock.ANY, min_count=1, max_count=1, - userdata=user_data, key_name=None, - block_device_mapping_v2=[], - availability_zone='fake_zone', security_groups=['default'], - **extra_kwargs) - self.nova.servers.reset_mock() - db_instance = {'os_id': mock.ANY, - 'vpc_id': None, - 'reservation_id': mock.ANY, - 'launch_index': 0, - 'client_token': 'fake_client_token'} - db_instance.update(extra_db_instance) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'i', db_instance) - self.db_api.reset_mock() - parse_block_device_mapping.assert_called_once_with( - mock.ANY, - [{'device_name': '/dev/vdd', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1, - 'delete_on_termination': False}}]) - parse_block_device_mapping.reset_mock() - - do_check( - extra_kwargs={ - 'nics': [ - {'net-id': get_ec2_classic_os_network.return_value['id']}], - }, - extra_db_instance={'vpc_id': None}) - - @mock.patch('ec2api.api.instance.describe_instances') - def test_idempotent_run(self, describe_instances): - self.set_mock_db_items() - - # NOTE(ft): check select corresponding instance by client_token - describe_instances.return_value = { - 'reservationSet': [{'key': 'value'}]} - - resp = self.execute('RunInstances', - {'MinCount': '1', 'MaxCount': '1', - 'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'ClientToken': 'client-token-1'}) - self.assertEqual({'key': 'value'}, resp) - describe_instances.assert_called_once_with( - mock.ANY, filter=[{'name': 'client-token', - 'value': ['client-token-1']}]) - - # NOTE(ft): check pass to general run_instances logic if no - # corresponding client_token is found - describe_instances.return_value = {'reservationSet': []} - - self.assert_execution_error( - 'InvalidAMIID.NotFound', 'RunInstances', - {'MinCount': '1', 'MaxCount': '1', - 'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'ClientToken': 'client-token-2'}) - - def test_run_instances_rollback(self): - self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_SUBNET_1, - fakes.DB_NETWORK_INTERFACE_1) - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1) - - self.network_interface_api.create_network_interface.return_value = ( - {'networkInterface': fakes.EC2_NETWORK_INTERFACE_1}) - self.db_api.add_item.return_value = fakes.DB_INSTANCE_1 - self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1 - self.nova.servers.create.return_value = ( - fakes.OSInstance({'id': fakes.ID_OS_INSTANCE_1, - 'flavor': {'id': 'fakeFlavorId'}, - 'image': {'id': fakes.ID_OS_IMAGE_1}})) - (self.network_interface_api. - _attach_network_interface_item.side_effect) = Exception() - - @tools.screen_unexpected_exception_logs - def do_check(params, new_port=True): - mock_manager = mock.MagicMock() - mock_manager.attach_mock(self.network_interface_api, - 'network_interface_api') - mock_manager.attach_mock(self.neutron, 'neutron') - mock_manager.attach_mock(self.nova.servers, 'nova_servers') - - params.update({'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'MinCount': '1', 'MaxCount': '1'}) - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'RunInstances', params) - - calls = [mock.call.nova_servers.delete(fakes.ID_OS_INSTANCE_1)] - if new_port: - calls.append( - mock.call.network_interface_api.delete_network_interface( - mock.ANY, - network_interface_id=fakes.ID_EC2_NETWORK_INTERFACE_1)) - mock_manager.assert_has_calls(calls) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_INSTANCE_1) - - self.network_interface_api.reset_mock() - self.neutron.reset_mock() - self.nova.servers.reset_mock() - self.db_api.reset_mock() - - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1}) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1}) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1, - 'NetworkInterface.1.DeleteOnTermination': 'False'}) - - do_check({'NetworkInterface.1.DeviceIndex': '0', - 'NetworkInterface.1.NetworkInterfaceId': ( - fakes.ID_EC2_NETWORK_INTERFACE_1)}, - new_port=False) - - @mock.patch('ec2api.api.instance.describe_instances') - def test_run_instances_multiply_rollback(self, describe_instances): - instances = [{'id': fakes.random_ec2_id('i'), - 'os_id': fakes.random_os_id()} - for dummy in range(3)] - os_instances = [fakes.OSInstance({'id': inst['os_id']}) - for inst in instances] - self.nova_admin.servers.list.return_value = os_instances[:2] - network_interfaces = [{'id': fakes.random_ec2_id('eni'), - 'os_id': fakes.random_os_id()} - for dummy in range(3)] - - self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_SUBNET_1, - *network_interfaces) - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1) - - self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1 - - def do_check(): - self.network_interface_api.create_network_interface.side_effect = [ - {'networkInterface': {'networkInterfaceId': eni['id']}} - for eni in network_interfaces] - self.db_api.add_item.side_effect = instances - self.nova.servers.create.side_effect = os_instances - expected_reservation = { - 'reservationId': fakes.ID_EC2_RESERVATION_1, - 'instancesSet': [{'instanceId': inst['id']} - for inst in instances[:2]]} - describe_instances.return_value = { - 'reservationSet': [expected_reservation]} - - resp = self.execute('RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'InstanceType': 'fake_flavor', - 'MinCount': '2', 'MaxCount': '3', - 'SubnetId': fakes.ID_EC2_SUBNET_1}) - self.assertThat(resp, matchers.DictMatches(expected_reservation)) - - self.nova.servers.delete.assert_called_once_with( - instances[2]['os_id']) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, instances[2]['id']) - - self.nova.servers.reset_mock() - self.db_api.reset_mock() - - (self.network_interface_api. - _attach_network_interface_item.side_effect) = [ - None, None, Exception()] - with tools.ScreeningLogger(log_name='ec2api.api'): - do_check() - (self.network_interface_api.delete_network_interface. - assert_called_once_with( - mock.ANY, network_interface_id=network_interfaces[2]['id'])) - - def test_run_instances_invalid_parameters(self): - self.assert_execution_error('InvalidParameterValue', 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'MinCount': '0', 'MaxCount': '0'}) - - self.assert_execution_error('InvalidParameterValue', 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'MinCount': '1', 'MaxCount': '0'}) - - self.assert_execution_error('InvalidParameterValue', 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'MinCount': '0', 'MaxCount': '1'}) - - self.assert_execution_error('InvalidParameterValue', 'RunInstances', - {'ImageId': fakes.ID_EC2_IMAGE_1, - 'MinCount': '2', 'MaxCount': '1'}) - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - @mock.patch('ec2api.api.instance.describe_instances') - @mock.patch('ec2api.api.instance.InstanceEngineNeutron.' - 'get_vpc_default_security_group_id') - def test_run_instances_without_network_parameters( - self, get_vpc_default_security_group_id, describe_instances, - check_and_create): - """Run instance without network interface settings.""" - self.configure(disable_ec2_classic=True) - self.set_mock_db_items(fakes.DB_IMAGE_2, - fakes.DB_SUBNET_DEFAULT, - fakes.DB_NETWORK_INTERFACE_DEFAULT) - - check_and_create.return_value = fakes.DB_VPC_DEFAULT - - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_2) - self.network_interface_api.create_network_interface.return_value = ( - {'networkInterface': fakes.EC2_NETWORK_INTERFACE_DEFAULT}) - - self.db_api.add_item.return_value = fakes.DB_INSTANCE_DEFAULT - self.nova.servers.create.return_value = ( - fakes.OSInstance({ - 'id': fakes.ID_OS_INSTANCE_DEFAULT, - 'flavor': {'id': 'fakeFlavorId'}, - 'image': {'id': fakes.ID_OS_IMAGE_2}})) - self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_DEFAULT - - get_vpc_default_security_group_id.return_value = None - - describe_instances.return_value = { - 'reservationSet': [fakes.EC2_RESERVATION_DEFAULT]} - - params = {'ImageId': fakes.ID_EC2_IMAGE_2, - 'InstanceType': 'fake_flavor', - 'MinCount': '1', 'MaxCount': '1'} - resp = self.execute('RunInstances', params) - - self.assertThat(resp, matchers.DictMatches( - fakes.EC2_RESERVATION_DEFAULT)) - check_and_create.assert_called_once_with(mock.ANY) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'i', - tools.purge_dict(fakes.DB_INSTANCE_DEFAULT, ('id',))) - self.nova.servers.create.assert_called_once_with( - fakes.EC2_INSTANCE_DEFAULT['privateDnsName'], - fakes.ID_OS_IMAGE_2, self.fake_flavor, - min_count=1, max_count=1, - availability_zone=None, - block_device_mapping_v2=[], - security_groups=None, - nics=[{'port-id': fakes.ID_OS_PORT_DEFAULT}], - key_name=None, userdata=None) - (self.network_interface_api.create_network_interface. - assert_called_once_with(mock.ANY, fakes.ID_EC2_SUBNET_DEFAULT)) - (self.network_interface_api._attach_network_interface_item. - assert_called_once_with( - mock.ANY, fakes.DB_NETWORK_INTERFACE_DEFAULT, - fakes.ID_EC2_INSTANCE_DEFAULT, 0, - delete_on_termination=True)) - - def test_run_instances_inconsistent_default_vpc(self): - """Run instance without network interface settings. """ - """No default vpc""" - self.configure(disable_ec2_classic=True) - self.set_mock_db_items(fakes.DB_IMAGE_2) - self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_2) - - params = {'ImageId': fakes.ID_EC2_IMAGE_2, - 'InstanceType': 'fake_flavor', - 'MinCount': '1', 'MaxCount': '1'} - - with mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc' - ) as check_and_create: - check_and_create.return_value = None - self.assert_execution_error('VPCIdNotSpecified', - 'RunInstances', params) - - self.add_mock_db_items(fakes.DB_VPC_DEFAULT) - self.assert_execution_error('MissingInput', 'RunInstances', params) - - @mock.patch.object(fakes.OSInstance, 'delete', autospec=True) - @mock.patch.object(fakes.OSInstance, 'get', autospec=True) - def test_terminate_instances(self, os_instance_get, os_instance_delete): - """Terminate 2 instances in one request.""" - self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2) - os_instances = [fakes.OSInstance(fakes.OS_INSTANCE_1), - fakes.OSInstance(fakes.OS_INSTANCE_2)] - self.nova.servers.get.side_effect = os_instances - - resp = self.execute('TerminateInstances', - {'InstanceId.1': fakes.ID_EC2_INSTANCE_1, - 'InstanceId.2': fakes.ID_EC2_INSTANCE_2}) - - fake_state_change = {'previousState': {'code': 0, - 'name': 'pending'}, - 'currentState': {'code': 0, - 'name': 'pending'}} - self.assertThat( - resp, - matchers.DictMatches( - {'instancesSet': [ - tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_1}, - fake_state_change), - tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2}, - fake_state_change)]})) - self.assertEqual(2, self.nova.servers.get.call_count) - self.nova.servers.get.assert_any_call(fakes.ID_OS_INSTANCE_1) - self.nova.servers.get.assert_any_call(fakes.ID_OS_INSTANCE_2) - self.assertFalse(self.db_api.delete_item.called) - self.assertEqual(2, os_instance_delete.call_count) - self.assertEqual(2, os_instance_get.call_count) - for call_num, inst_id in enumerate(os_instances): - self.assertEqual(mock.call(inst_id), - os_instance_delete.call_args_list[call_num]) - self.assertEqual(mock.call(inst_id), - os_instance_get.call_args_list[call_num]) - - def test_terminate_instances_multiple_networks(self): - """Terminate an instance with various combinations of ports.""" - self._build_multiple_data_model() - - fake_state_change = {'previousState': {'code': 16, - 'name': 'running'}, - 'currentState': {'code': 16, - 'name': 'running'}} - ec2_terminate_instances_result = { - 'instancesSet': [ - tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_1}, - fake_state_change), - tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2}, - fake_state_change)]} - self.nova.servers.get.side_effect = ( - lambda ec2_id: fakes.OSInstance({'id': ec2_id, - 'vm_state': 'active'})) - - self.set_mock_db_items(*self.DB_INSTANCES) - - resp = self.execute('TerminateInstances', - {'InstanceId.1': fakes.ID_EC2_INSTANCE_1, - 'InstanceId.2': fakes.ID_EC2_INSTANCE_2}) - - self.assertThat( - resp, matchers.DictMatches(ec2_terminate_instances_result)) - self.assertFalse(self.db_api.delete_item.called) - - def test_terminate_instances_invalid_parameters(self): - self.assert_execution_error( - 'InvalidInstanceID.NotFound', 'TerminateInstances', - {'InstanceId.1': fakes.random_ec2_id('i')}) - - @mock.patch('ec2api.api.instance._get_os_instances_by_instances') - def _test_instances_operation(self, operation, os_instance_operation, - valid_state, invalid_state, - get_os_instances_by_instances): - os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1) - os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2) - for inst in (os_instance_1, os_instance_2): - setattr(inst, 'OS-EXT-STS:vm_state', valid_state) - - self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2) - get_os_instances_by_instances.return_value = [os_instance_1, - os_instance_2] - - resp = self.execute(operation, - {'InstanceId.1': fakes.ID_EC2_INSTANCE_1, - 'InstanceId.2': fakes.ID_EC2_INSTANCE_2}) - self.assertEqual({'return': True}, resp) - self.assertEqual([mock.call(os_instance_1), mock.call(os_instance_2)], - os_instance_operation.mock_calls) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_INSTANCE_1, fakes.ID_EC2_INSTANCE_2])) - get_os_instances_by_instances.assert_called_once_with( - mock.ANY, [fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2], exactly=True) - - setattr(os_instance_2, 'OS-EXT-STS:vm_state', invalid_state) - os_instance_operation.reset_mock() - self.assert_execution_error('IncorrectInstanceState', 'StartInstances', - {'InstanceId.1': fakes.ID_EC2_INSTANCE_1, - 'InstanceId.2': fakes.ID_EC2_INSTANCE_2}) - self.assertEqual(0, os_instance_operation.call_count) - - @mock.patch.object(fakes.OSInstance, 'start', autospec=True) - def test_start_instances(self, os_instance_start): - self._test_instances_operation('StartInstances', os_instance_start, - instance_api.vm_states_STOPPED, - instance_api.vm_states_ACTIVE) - - @mock.patch.object(fakes.OSInstance, 'stop', autospec=True) - def test_stop_instances(self, os_instance_stop): - self._test_instances_operation('StopInstances', os_instance_stop, - instance_api.vm_states_ACTIVE, - instance_api.vm_states_STOPPED) - - @mock.patch.object(fakes.OSInstance, 'reboot', autospec=True) - def test_reboot_instances(self, os_instance_reboot): - self._test_instances_operation('RebootInstances', os_instance_reboot, - instance_api.vm_states_ACTIVE, - instance_api.vm_states_BUILDING) - - @mock.patch('oslo_utils.timeutils.utcnow') - def _test_instance_get_operation(self, operation, getter, key, utcnow): - self.set_mock_db_items(fakes.DB_INSTANCE_2) - os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2) - self.nova.servers.get.return_value = os_instance_2 - getter.return_value = 'fake_data' - utcnow.return_value = datetime.datetime(2015, 1, 19, 23, 34, 45, 123) - resp = self.execute(operation, - {'InstanceId': fakes.ID_EC2_INSTANCE_2}) - expected_data = (base64.b64encode(getter.return_value. - encode("latin-1")) - .decode("utf-8")) - self.assertEqual({'instanceId': fakes.ID_EC2_INSTANCE_2, - 'timestamp': '2015-01-19T23:34:45.000Z', - key: expected_data}, - resp) - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_INSTANCE_2) - self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2) - getter.assert_called_once_with(os_instance_2) - - @mock.patch.object(fakes.OSInstance, 'get_password', autospec=True) - def test_get_password_data(self, get_password): - self._test_instance_get_operation('GetPasswordData', - get_password, 'passwordData') - - @mock.patch.object(fakes.OSInstance, 'get_console_output', autospec=True) - def test_console_output(self, get_console_output): - self._test_instance_get_operation('GetConsoleOutput', - get_console_output, 'output') - - def test_describe_instances(self): - """Describe 2 instances, one of which is vpc instance.""" - self.set_mock_db_items( - fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_IMAGE_1, fakes.DB_IMAGE_2, - fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1, - fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3) - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_1), - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - self.nova_admin.servers.get.return_value = ( - fakes.OSInstance_full(fakes.OS_INSTANCE_1)) - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_1), - fakes.OSVolume(fakes.OS_VOLUME_2), - fakes.OSVolume(fakes.OS_VOLUME_3)] - self.network_interface_api.describe_network_interfaces.side_effect = ( - lambda *args, **kwargs: copy.deepcopy({ - 'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1, - fakes.EC2_NETWORK_INTERFACE_2]})) - self.security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_3]} - - resp = self.execute('DescribeInstances', {}) - - self.assertThat(resp, matchers.DictMatches( - {'reservationSet': [fakes.EC2_RESERVATION_1, - fakes.EC2_RESERVATION_2]}, - orderless_lists=True)) - self.nova_admin.servers.list.assert_called_once_with( - search_opts={'all_tenants': True, - 'project_id': fakes.ID_OS_PROJECT}) - self.cinder.volumes.list.assert_called_once_with(search_opts=None) - - self.nova_admin.reset_mock() - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_INSTANCE_1]) - resp = self.execute('DescribeInstances', - {'InstanceId.1': fakes.ID_EC2_INSTANCE_1}) - self.assertThat(resp, matchers.DictMatches( - {'reservationSet': [fakes.EC2_RESERVATION_1]}, - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_INSTANCE_1])) - (self.network_interface_api.describe_network_interfaces. - assert_called_with(mock.ANY)) - self.assertFalse(self.nova_admin.servers.list.called) - self.nova_admin.servers.get.assert_called_once_with( - fakes.ID_OS_INSTANCE_1) - - self.check_filtering( - 'DescribeInstances', 'reservationSet', - [('availability-zone', fakes.NAME_AVAILABILITY_ZONE), - ('block-device-mapping.delete-on-termination', False), - ('block-device-mapping.device-name', - fakes.ROOT_DEVICE_NAME_INSTANCE_2), - ('block-device-mapping.status', 'attached'), - ('block-device-mapping.volume-id', fakes.ID_EC2_VOLUME_2), - ('client-token', fakes.CLIENT_TOKEN_INSTANCE_2), - # TODO(ft): support filtering by none/empty value - # ('dns-name', ''), - ('group-id', fakes.ID_EC2_SECURITY_GROUP_1), - ('group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP), - ('image-id', fakes.ID_EC2_IMAGE_1), - ('instance-id', fakes.ID_EC2_INSTANCE_2), - ('instance-state-code', 0), - ('instance-state-name', 'pending'), - ('instance-type', 'fake_flavor'), - ('instance.group-id', fakes.ID_EC2_SECURITY_GROUP_1), - ('instance.group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP), - ('ip-address', fakes.IP_ADDRESS_2), - ('kernel-id', fakes.ID_EC2_IMAGE_AKI_1), - ('key-name', fakes.NAME_KEY_PAIR), - ('launch-index', 0), - ('launch-time', fakes.TIME_CREATE_INSTANCE_2), - ('owner-id', fakes.ID_OS_PROJECT), - ('private-dns-name', '%s-%s' % (fakes.ID_EC2_RESERVATION_1, 0)), - ('private-ip-address', fakes.IP_NETWORK_INTERFACE_2), - ('ramdisk-id', fakes.ID_EC2_IMAGE_ARI_1), - ('reservation-id', fakes.ID_EC2_RESERVATION_1), - ('root-device-name', fakes.ROOT_DEVICE_NAME_INSTANCE_1), - ('root-device-type', 'ebs'), - ('subnet-id', fakes.ID_EC2_SUBNET_2), - ('vpc-id', fakes.ID_EC2_VPC_1), - ('network-interface.description', - fakes.DESCRIPTION_NETWORK_INTERFACE_2), - ('network-interface.subnet-id', fakes.ID_EC2_SUBNET_2), - ('network-interface.vpc-id', fakes.ID_EC2_VPC_1), - ('network-interface.network-interface.id', - fakes.ID_EC2_NETWORK_INTERFACE_2), - ('network-interface.owner-id', fakes.ID_OS_PROJECT), - ('network-interface.requester-managed', False), - ('network-interface.status', 'in-use'), - ('network-interface.mac-address', fakes.MAC_ADDRESS), - ('network-interface.source-destination-check', True), - ('network-interface.group-id', fakes.ID_EC2_SECURITY_GROUP_1), - ('network-interface.group-name', - fakes.NAME_DEFAULT_OS_SECURITY_GROUP), - ('network-interface.attachment.attachment-id', - fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH), - ('network-interface.attachment.instance-id', - fakes.ID_EC2_INSTANCE_1), - ('network-interface.attachment.instance-owner-id', - fakes.ID_OS_PROJECT), - ('network-interface.addresses.private-ip-address', - fakes.IP_NETWORK_INTERFACE_2_EXT_1), - ('network-interface.attachment.device-index', 0), - ('network-interface.attachment.status', 'attached'), - ('network-interface.attachment.attach-time', - fakes.TIME_ATTACH_NETWORK_INTERFACE), - ('network-interface.attachment.delete-on-termination', False), - ('network-interface.addresses.primary', False), - ('network-interface.addresses.association.public-ip', - fakes.IP_ADDRESS_2), - ('network-interface.addresses.association.ip-owner-id', - fakes.ID_OS_PROJECT), - ('association.public-ip', fakes.IP_ADDRESS_2), - ('association.ip-owner-id', fakes.ID_OS_PROJECT)]) - self.check_tag_support( - 'DescribeInstances', ['reservationSet', 'instancesSet'], - fakes.ID_EC2_INSTANCE_1, 'instanceId') - - def test_describe_instances_ec2_classic(self): - self.set_mock_db_items( - fakes.DB_INSTANCE_2, fakes.DB_IMAGE_1, fakes.DB_IMAGE_2, - fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3) - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_1), - fakes.OSVolume(fakes.OS_VOLUME_2), - fakes.OSVolume(fakes.OS_VOLUME_3)] - self.security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_3]} - - resp = self.execute('DescribeInstances', {}) - - self.assertThat(resp, matchers.DictMatches( - {'reservationSet': [fakes.EC2_RESERVATION_2]}, - orderless_lists=True)) - - def test_describe_instances_mutliple_networks(self): - """Describe 2 instances with various combinations of network.""" - self._build_multiple_data_model() - - self.set_mock_db_items(*self.DB_INSTANCES) - describe_network_interfaces = ( - self.network_interface_api.describe_network_interfaces) - self.security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_3]} - - def do_check(ips_by_instance=[], ec2_enis_by_instance=[], - ec2_instance_ips=[]): - describe_network_interfaces.return_value = copy.deepcopy( - {'networkInterfaceSet': list( - itertools.chain(*ec2_enis_by_instance))}) - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full({ - 'id': os_id, - 'flavor': {'id': 'fakeFlavorId'}, - 'addresses': { - subnet_name: [{'addr': addr, - 'version': 4, - 'OS-EXT-IPS:type': 'fixed'}] - for subnet_name, addr in ips}, - 'root_device_name': '/dev/vda', - 'hostname': '%s-%s' % (fakes.ID_EC2_RESERVATION_1, l_i)}) - for l_i, (os_id, ips) in enumerate(zip( - self.IDS_OS_INSTANCE, - ips_by_instance))] - - resp = self.execute('DescribeInstances', {}) - - instances = [fakes.gen_ec2_instance( - inst_id, launch_index=l_i, private_ip_address=ip, - ec2_network_interfaces=enis, - reservation_id=fakes.ID_EC2_RESERVATION_1) - for l_i, (inst_id, ip, enis) in enumerate(zip( - self.IDS_EC2_INSTANCE, - ec2_instance_ips, - ec2_enis_by_instance))] - reservation_set = [fakes.gen_ec2_reservation( - fakes.ID_EC2_RESERVATION_1, instances)] - self.assertThat({'reservationSet': reservation_set}, - matchers.DictMatches(resp, orderless_lists=True), - verbose=True) - - def ip_info(ind): - return (self.EC2_ATTACHED_ENIS[ind]['subnetId'], - self.EC2_ATTACHED_ENIS[ind]['privateIpAddress']) - - # NOTE(ft): 2 instances; the first has 2 correct ports; - # the second has the first port attached by EC2 API but later detached - # by OpenStack and the second port created through EC2 API but - # attached by OpenStack only - do_check( - ips_by_instance=[[ip_info(0), ip_info(1)], [ip_info(3)]], - ec2_enis_by_instance=[ - [self.EC2_ATTACHED_ENIS[0], self.EC2_ATTACHED_ENIS[1]], - []], - ec2_instance_ips=[fakes.IP_FIRST_SUBNET_1, fakes.IP_LAST_SUBNET_2]) - - # NOTE(ft): 2 instances: the first has the first port attached by - # OpenStack only, the second port is attached correctly; - # the second instance has one port created and attached by OpenStack - # only - do_check( - ips_by_instance=[[ip_info(0), ip_info(1)], [ip_info(3)]], - ec2_enis_by_instance=[[self.EC2_ATTACHED_ENIS[1]], []], - ec2_instance_ips=[None, fakes.IP_LAST_SUBNET_2]) - - @mock.patch('ec2api.api.instance._remove_instances') - def test_describe_instances_auto_remove(self, remove_instances): - self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_VOLUME_2) - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_2)] - self.security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_3]} - - resp = self.execute('DescribeInstances', {}) - - self.assertThat(resp, - matchers.DictMatches( - {'reservationSet': [fakes.EC2_RESERVATION_2]}, - orderless_lists=True)) - remove_instances.assert_called_once_with( - mock.ANY, [fakes.DB_INSTANCE_1]) - - @mock.patch('ec2api.api.instance._format_instance') - def test_describe_instances_sorting(self, format_instance): - db_instances = [ - {'id': fakes.random_ec2_id('i'), - 'os_id': fakes.random_os_id(), - 'vpc_id': None, - 'launch_index': i, - 'reservation_id': fakes.ID_EC2_RESERVATION_1} - for i in range(5)] - random.shuffle(db_instances) - self.set_mock_db_items(*db_instances) - os_instances = [ - fakes.OSInstance_full({'id': inst['os_id']}) - for inst in db_instances] - self.nova_admin.servers.list.return_value = os_instances - format_instance.side_effect = ( - lambda context, instance, *args: ( - {'instanceId': instance['id'], - 'amiLaunchIndex': instance['launch_index']})) - - resp = self.execute('DescribeInstances', {}) - self.assertEqual( - [0, 1, 2, 3, 4], - [inst['amiLaunchIndex'] - for inst in resp['reservationSet'][0]['instancesSet']]) - - def test_describe_instances_invalid_parameters(self): - self.assert_execution_error( - 'InvalidInstanceID.NotFound', 'DescribeInstances', - {'InstanceId.1': fakes.random_ec2_id('i')}) - - self.set_mock_db_items(fakes.DB_INSTANCE_2) - self.assert_execution_error( - 'InvalidInstanceID.NotFound', 'DescribeInstances', - {'InstanceId.1': fakes.ID_EC2_INSTANCE_2, - 'InstanceId.2': fakes.random_ec2_id('i')}) - - def test_describe_instance_attributes(self): - self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1, - fakes.DB_VOLUME_2) - self.nova_admin.servers.get.side_effect = ( - tools.get_by_1st_arg_getter({ - fakes.ID_OS_INSTANCE_1: ( - fakes.OSInstance_full(fakes.OS_INSTANCE_1)), - fakes.ID_OS_INSTANCE_2: ( - fakes.OSInstance_full(fakes.OS_INSTANCE_2))})) - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_2)] - self.security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_3]} - - def do_check(instance_id, attribute, expected): - resp = self.execute('DescribeInstanceAttribute', - {'InstanceId': instance_id, - 'Attribute': attribute}) - expected.update({'instanceId': instance_id}) - self.assertThat(resp, matchers.DictMatches(expected)) - - do_check(fakes.ID_EC2_INSTANCE_2, 'blockDeviceMapping', - {'rootDeviceType': 'ebs', - 'blockDeviceMapping': ( - fakes.EC2_INSTANCE_2['blockDeviceMapping'])}) - do_check(fakes.ID_EC2_INSTANCE_2, 'groupSet', - {'groupSet': fakes.EC2_RESERVATION_2['groupSet']}) - do_check(fakes.ID_EC2_INSTANCE_2, 'instanceType', - {'instanceType': {'value': 'fake_flavor'}}) - do_check(fakes.ID_EC2_INSTANCE_1, 'kernel', - {'kernel': {'value': fakes.ID_EC2_IMAGE_AKI_1}}) - do_check(fakes.ID_EC2_INSTANCE_1, 'ramdisk', - {'ramdisk': {'value': fakes.ID_EC2_IMAGE_ARI_1}}) - do_check(fakes.ID_EC2_INSTANCE_2, 'rootDeviceName', - {'rootDeviceName': { - 'value': fakes.ROOT_DEVICE_NAME_INSTANCE_2}}) - do_check(fakes.ID_EC2_INSTANCE_2, 'userData', - {'userData': {'value': fakes.USER_DATA_INSTANCE_2}}) - - def _build_multiple_data_model(self): - # NOTE(ft): generate necessary fake data - # We need 4 detached ports in 2 subnets. - # Sequence of all ports list is s1i1, s2i1, s1i2, s2i2, - # where sNiM - port info of instance iM on subnet sN. - # We generate port ids but use subnet and instance ids since - # fakes contain enough ids for subnets an instances, but not for ports. - instances_count = 2 - subnets_count = 2 - ports_count = instances_count * subnets_count - ids_ec2_eni = [fakes.random_ec2_id('eni') for _ in range(ports_count)] - ids_os_port = [fakes.random_os_id() for _ in range(ports_count)] - - ids_ec2_subnet = (fakes.ID_EC2_SUBNET_1, fakes.ID_EC2_SUBNET_2) - ids_ec2_subnet_by_port = ids_ec2_subnet * 2 - ips = (fakes.IP_FIRST_SUBNET_1, fakes.IP_FIRST_SUBNET_2, - fakes.IP_LAST_SUBNET_1, fakes.IP_LAST_SUBNET_2) - - ids_ec2_instance = [fakes.ID_EC2_INSTANCE_1, fakes.ID_EC2_INSTANCE_2] - ids_ec2_instance_by_port = list( - itertools.chain(*map(lambda i: [i] * subnets_count, - ids_ec2_instance))) - ids_os_instance = [fakes.ID_OS_INSTANCE_1, fakes.ID_OS_INSTANCE_2] - - dots_by_port = [True, False] * instances_count - db_attached_enis = [ - fakes.gen_db_network_interface( - ec2_id, os_id, fakes.ID_EC2_VPC_1, - subnet_ec2_id, ip, - instance_id=instance_ec2_id, - device_index=dev_ind, - delete_on_termination=dot) - for (ec2_id, os_id, subnet_ec2_id, ip, instance_ec2_id, dev_ind, - dot) in zip( - ids_ec2_eni, - ids_os_port, - ids_ec2_subnet_by_port, - ips, - ids_ec2_instance_by_port, - list(range(subnets_count)) * instances_count, - dots_by_port)] - db_detached_enis = [ - fakes.gen_db_network_interface( - ec2_id, os_id, fakes.ID_EC2_VPC_1, - subnet_ec2_id, ip) - for ec2_id, os_id, subnet_ec2_id, ip in zip( - ids_ec2_eni, - ids_os_port, - ids_ec2_subnet_by_port, - ips)] - ec2_attached_enis = [ - fakes.gen_ec2_network_interface( - db_eni['id'], - None, # ec2_subnet - [db_eni['private_ip_address']], - ec2_instance_id=ec2_instance_id, - device_index=dev_ind, - delete_on_termination=dot, - ec2_subnet_id=ec2_subnet_id, - ec2_vpc_id=fakes.ID_EC2_VPC_1) - for db_eni, dot, ec2_subnet_id, ec2_instance_id, dev_ind in zip( - db_attached_enis, - dots_by_port, - ids_ec2_subnet_by_port, - ids_ec2_instance_by_port, - list(range(subnets_count)) * instances_count)] - ec2_detached_enis = [ - fakes.gen_ec2_network_interface( - db_eni['id'], - None, # ec2_subnet - [db_eni['private_ip_address']], - ec2_subnet_id=ec2_subnet_id, - ec2_vpc_id=fakes.ID_EC2_VPC_1) - for db_eni, ec2_subnet_id in zip( - db_detached_enis, - ids_ec2_subnet_by_port)] - db_instances = [ - {'id': db_id, - 'os_id': os_id, - 'vpc_id': fakes.ID_EC2_VPC_1, - 'reservation_id': fakes.ID_EC2_RESERVATION_1, - 'launch_index': l_i} - for l_i, (db_id, os_id) in enumerate(zip( - ids_ec2_instance, - ids_os_instance))] - - self.IDS_EC2_SUBNET = ids_ec2_subnet - self.IDS_OS_PORT = ids_os_port - self.IDS_OS_INSTANCE = ids_os_instance - self.IDS_EC2_INSTANCE = ids_ec2_instance - self.IDS_EC2_SUBNET_BY_PORT = ids_ec2_subnet_by_port - self.DB_DETACHED_ENIS = db_detached_enis - self.EC2_ATTACHED_ENIS = ec2_attached_enis - self.EC2_DETACHED_ENIS = ec2_detached_enis - self.DB_INSTANCES = db_instances - - -# TODO(ft): add tests for get_vpc_default_security_group_id, - -class InstancePrivateTestCase(base.BaseTestCase): - - def test_merge_network_interface_parameters(self): - fake_context = base.create_context() - engine = instance_api.InstanceEngineNeutron() - - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, 'subnet-1', None, None, - [{'device_index': 0, 'private_ip_address': '10.10.10.10'}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, None, '10.10.10.10', None, - [{'device_index': 0, 'subnet_id': 'subnet-1'}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, ['default'], None, None, None, - [{'device_index': 0, 'subnet_id': 'subnet-1'}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, None, None, ['sg-1'], - [{'device_index': 0, 'subnet_id': 'subnet-1'}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, 'subnet-1', None, None, - [{'device_index': 1, 'associate_public_ip_address': True}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, 'subnet-1', None, None, - [{'device_index': 0, 'associate_public_ip_address': True}, - {'device_index': 1, 'subnet_id': 'subnet-2'}]) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, 'subnet-1', None, None, - [{'device_index': 0}]) - - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, ['default'], 'subnet-1', None, None, None) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, None, '10.10.10.10', None, None) - self.assertRaises( - exception.InvalidParameterCombination, - engine.merge_network_interface_parameters, - fake_context, None, None, None, ['sg-1'], None) - - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': 'subnet-1'}]), - engine.merge_network_interface_parameters( - fake_context, None, 'subnet-1', None, None, None)) - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'private_ip_address': '10.10.10.10'}]), - engine.merge_network_interface_parameters( - fake_context, None, 'subnet-1', '10.10.10.10', None, None)) - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'private_ip_address': '10.10.10.10', - 'security_group_id': ['sg-1']}]), - engine.merge_network_interface_parameters( - fake_context, None, 'subnet-1', '10.10.10.10', ['sg-1'], None)) - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'security_group_id': ['sg-1']}]), - engine.merge_network_interface_parameters( - fake_context, None, 'subnet-1', None, ['sg-1'], None)) - - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': 'subnet-1'}]), - engine.merge_network_interface_parameters( - fake_context, None, None, None, None, - [{'device_index': 0, 'subnet_id': 'subnet-1'}])) - self.assertEqual((['default'], []), - engine.merge_network_interface_parameters( - fake_context, ['default'], None, None, None, - None)) - self.assertEqual((None, []), - engine.merge_network_interface_parameters( - fake_context, None, None, None, None, None)) - - self.configure(disable_ec2_classic=True) - self.db_api = self.mock_db() - self.db_api.set_mock_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SUBNET_DEFAULT) - - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_DEFAULT}]), - engine.merge_network_interface_parameters( - fake_context, None, None, None, None, None)) - self.assertEqual( - (None, [{'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_DEFAULT, - 'security_group_id': ['sg-id'], - 'associate_public_ip_address': True}]), - engine.merge_network_interface_parameters( - fake_context, None, None, None, None, - [{'device_index': 0, - 'associate_public_ip_address': True, - 'security_group_id': ['sg-id']}])) - - with mock.patch('ec2api.api.security_group.describe_security_groups' - ) as describe_sg: - - describe_sg.return_value = { - 'securityGroupInfo': [{'groupId': 'sg-named-id'}] - } - self.assertEqual((None, [{'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_DEFAULT, - 'security_group_id': ['sg-id', - 'sg-named-id'], - 'private_ip_address': 'private-ip'}]), - engine.merge_network_interface_parameters( - fake_context, ['sg-name'], None, - 'private-ip', ['sg-id'], None)) - describe_sg.assert_called_once_with(mock.ANY, - group_name=['sg-name']) - - def test_check_network_interface_parameters(self): - engine = instance_api.InstanceEngineNeutron() - - self.assertRaises( - exception.InvalidParameterValue, - engine.check_network_interface_parameters, - [{'subnet_id': 'subnet-1'}], False) - self.assertRaises( - exception.InvalidParameterValue, - engine.check_network_interface_parameters, - [{'device_index': 0, 'subnet_id': 'subnet-1'}, - {'device_index': 0, 'subnet_id': 'subnet-2'}], False) - self.assertRaises( - exception.InvalidParameterValue, - engine.check_network_interface_parameters, - [{'device_index': 0, 'private_ip_address': '10.10.10.10'}], False) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, - 'network_interface_id': 'eni-1', - 'subnet_id': 'subnet-1'}], - False) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, - 'network_interface_id': 'eni-1', - 'private_ip_address': '10.10.10.10'}], - False) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, - 'network_interface_id': 'eni-1', - 'security_group_id': ['sg-1']}], - False) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, - 'network_interface_id': 'eni-1', - 'delete_on_termination': True}], - False) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, 'network_interface_id': 'eni-1'}], - True) - self.assertRaises( - exception.InvalidParameterCombination, - engine.check_network_interface_parameters, - [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'private_ip_address': '10.10.10.10'}], - True) - self.assertRaises( - exception.UnsupportedOperation, - engine.check_network_interface_parameters, - [{'device_index': 1, 'subnet_id': 'subnet-1'}], False) - - engine.check_network_interface_parameters( - [{'device_index': 0, 'subnet_id': 'subnet-1'}], False) - engine.check_network_interface_parameters( - [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'private_ip_address': '10.10.10.10', - 'security_group_id': ['sg-1'], - 'delete_on_termination': True}], - False) - engine.check_network_interface_parameters( - [{'device_index': 0, 'network_interface_id': 'eni-1'}], False) - engine.check_network_interface_parameters( - [{'device_index': 0, - 'subnet_id': 'subnet-1', - 'security_group_id': ['sg-1'], - 'delete_on_termination': True}, - {'device_index': 1, - 'subnet_id': 'subnet-2'}], - True) - engine.check_network_interface_parameters([], False) - - @mock.patch('ec2api.db.api.IMPL') - def test_parse_network_interface_parameters(self, db_api): - engine = instance_api.InstanceEngineNeutron() - context = base.create_context() - db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id( - fakes.DB_SUBNET_1, - tools.update_dict(fakes.DB_SUBNET_2, - {'vpc_id': fakes.ID_EC2_VPC_2}), - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - - resp = engine.parse_network_interface_parameters( - context, - [{'device_index': 1, - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}, - {'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_1, - 'delete_on_termination': False, - 'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1]}]) - self.assertEqual( - (fakes.ID_EC2_VPC_1, - [{'device_index': 0, - 'create_args': (fakes.ID_EC2_SUBNET_1, - {'security_group_id': ( - [fakes.ID_EC2_SECURITY_GROUP_1])}), - 'delete_on_termination': False}, - {'device_index': 1, - 'network_interface': fakes.DB_NETWORK_INTERFACE_1, - 'delete_on_termination': False}]), - resp) - resp = engine.parse_network_interface_parameters( - context, - [{'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_1, - 'associate_public_ip_address': True}]) - self.assertEqual( - (fakes.ID_EC2_VPC_1, - [{'device_index': 0, - 'create_args': (fakes.ID_EC2_SUBNET_1, {}), - 'delete_on_termination': True}]), - resp) - - # NOTE(ft): a network interface has being attached twice - self.assertRaises( - exception.InvalidParameterValue, - engine.parse_network_interface_parameters, context, - [{'device_index': 0, - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}, - {'device_index': 1, - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}]) - # NOTE(ft): a network interface is in use - self.assertRaises( - exception.InvalidNetworkInterfaceInUse, - engine.parse_network_interface_parameters, context, - [{'device_index': 0, - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_2}]) - # NOTE(ft): specified objects are belonging to different VPCs - self.assertRaises( - exception.InvalidParameterValue, - engine.parse_network_interface_parameters, context, - [{'device_index': 0, - 'subnet_id': fakes.ID_EC2_SUBNET_1}, - {'device_index': 1, - 'subnet_id': fakes.ID_EC2_SUBNET_2}]) - self.assertRaises( - exception.InvalidParameterValue, - engine.parse_network_interface_parameters, context, - [{'device_index': 0, - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}, - {'device_index': 1, - 'subnet_id': fakes.ID_EC2_SUBNET_2}]) - - @mock.patch('ec2api.api.ec2utils.get_os_image') - def test_parse_image_parameters(self, get_os_image): - fake_context = base.create_context() - - # NOTE(ft): check normal flow - os_image = fakes.OSImage(fakes.OS_IMAGE_1) - get_os_image.side_effect = [os_image] - self.assertEqual( - (os_image), - instance_api._parse_image_parameters( - fake_context, fakes.ID_EC2_IMAGE_1)) - get_os_image.assert_has_calls( - [mock.call(fake_context, fakes.ID_EC2_IMAGE_1)]) - - get_os_image.side_effect = None - get_os_image.return_value = os_image - get_os_image.reset_mock() - self.assertEqual( - (os_image), - instance_api._parse_image_parameters( - fake_context, fakes.ID_EC2_IMAGE_1)) - get_os_image.assert_called_once_with( - fake_context, fakes.ID_EC2_IMAGE_1) - - # NOTE(ft): check cases of not available image - os_image = fakes.OSImage({ - 'id': fakes.random_os_id(), - 'status': None}) - get_os_image.return_value = os_image - - self.assertRaises( - exception.InvalidAMIIDUnavailable, - instance_api._parse_image_parameters, - fake_context, fakes.random_ec2_id('ami')) - - os_image.status = 'active' - os_image.image_state = 'decrypting' - - self.assertRaises( - exception.InvalidAMIIDUnavailable, - instance_api._parse_image_parameters, - fake_context, fakes.random_ec2_id('ami')) - - @mock.patch('ec2api.db.api.IMPL') - def test_parse_block_device_mapping(self, db_api): - fake_context = base.create_context() - - db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id( - fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3, - fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2) - - res = instance_api._parse_block_device_mapping(fake_context, []) - self.assertEqual([], res) - - res = instance_api._parse_block_device_mapping( - fake_context, [{'device_name': '/dev/vdf', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}}, - {'device_name': '/dev/vdg', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2, - 'volume_size': 111, - 'delete_on_termination': False}}, - {'device_name': '/dev/vdh', - 'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_1}}, - {'device_name': '/dev/vdi', - 'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_2, - 'delete_on_termination': True}}, - {'device_name': '/dev/sdb1', - 'ebs': {'volume_size': 55}}]) - - expected = [{'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'device_name': '/dev/vdf', - 'source_type': 'snapshot', - 'destination_type': 'volume'}, - {'snapshot_id': fakes.ID_OS_SNAPSHOT_2, - 'volume_size': 111, - 'device_name': '/dev/vdg', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False}, - {'volume_id': fakes.ID_OS_VOLUME_1, - 'device_name': '/dev/vdh', - 'source_type': 'volume', - 'destination_type': 'volume'}, - {'volume_id': fakes.ID_OS_VOLUME_2, - 'device_name': '/dev/vdi', - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': True}, - {'volume_size': 55, - 'device_name': '/dev/sdb1', - 'destination_type': 'volume'}] - - self.assertThat(expected, - matchers.ListMatches(res, orderless_lists=True), - verbose=True) - - res = instance_api._parse_block_device_mapping( - fake_context, [{'device_name': '/dev/vdf', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}}, - {'device_name': '/dev/vdf', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}]) - expected = [{'snapshot_id': fakes.ID_OS_SNAPSHOT_2, - 'device_name': '/dev/vdf', - 'source_type': 'snapshot', - 'destination_type': 'volume'}] - self.assertThat(expected, - matchers.ListMatches(res, orderless_lists=True), - verbose=True) - - self.assertRaises( - exception.InvalidBlockDeviceMapping, - instance_api._parse_block_device_mapping, - fake_context, - [{'device_name': '/dev/vdf', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}}, - {'device_name': 'vdf', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}]) - - @mock.patch('ec2api.db.api.IMPL') - def test_build_block_device_mapping(self, db_api): - fake_context = base.create_context() - db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id( - fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2, - fakes.DB_VOLUME_1, fakes.DB_VOLUME_2) - - # check bdm attributes' population - bdms = [ - {'device_name': '/dev/sda1', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}}, - {'device_name': '/dev/vdb', - 'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_1, - 'delete_on_termination': False}}, - {'device_name': 'vdc', - 'ebs': {'volume_size': 100}}, - ] - expected = [ - {'device_name': '/dev/sda1', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'uuid': fakes.ID_OS_SNAPSHOT_1, - 'delete_on_termination': True, - 'boot_index': 0}, - {'device_name': '/dev/vdb', - 'source_type': 'volume', - 'destination_type': 'volume', - 'uuid': fakes.ID_OS_VOLUME_1, - 'delete_on_termination': False, - 'boot_index': -1}, - {'device_name': 'vdc', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 100, - 'delete_on_termination': True, - 'boot_index': -1}, - ] - result = instance_api._build_block_device_mapping( - fake_context, bdms, fakes.OSImage(fakes.OS_IMAGE_1)) - self.assertEqual(expected, result) - - fake_image_template = { - 'id': fakes.random_os_id(), - 'root_device_name': '/dev/vda', - 'bdm_v2': True, - 'block_device_mapping': []} - - # check merging with image bdms - fake_image_template['block_device_mapping'] = [ - {'boot_index': 0, - 'device_name': '/dev/vda', - 'source_type': 'snapshot', - 'snapshot_id': fakes.ID_OS_SNAPSHOT_1, - 'delete_on_termination': True, - 'disk_bus': None}, - {'device_name': 'vdb', - 'source_type': 'snapshot', - 'snapshot_id': fakes.random_os_id(), - 'volume_size': 50}, - {'device_name': '/dev/vdc', - 'source_type': 'blank', - 'volume_size': 10}, - ] - bdms = [ - {'device_name': '/dev/vda', - 'ebs': {'volume_size': 15}}, - {'device_name': 'vdb', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2, - 'delete_on_termination': False}}, - {'device_name': '/dev/vdc', - 'ebs': {'volume_size': 20}}, - ] - expected = [ - {'device_name': '/dev/vda', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'uuid': fakes.ID_OS_SNAPSHOT_1, - 'delete_on_termination': True, - 'volume_size': 15, - 'boot_index': 0}, - {'device_name': 'vdb', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'uuid': fakes.ID_OS_SNAPSHOT_2, - 'delete_on_termination': False, - 'boot_index': -1}, - {'device_name': '/dev/vdc', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 20, - 'delete_on_termination': False}, - ] - result = instance_api._build_block_device_mapping( - fake_context, bdms, fakes.OSImage(fake_image_template)) - self.assertEqual(expected, result) - - # check result order for adjusting some bdm of all - fake_image_template['block_device_mapping'] = [ - {'device_name': '/dev/vdc', - 'source_type': 'blank', - 'volume_size': 10}, - {'device_name': '/dev/vde', - 'source_type': 'blank', - 'volume_size': 10}, - {'device_name': '/dev/vdf', - 'source_type': 'blank', - 'volume_size': 10}, - {'boot_index': -1, - 'source_type': 'blank', - 'volume_size': 10}, - ] - bdms = [ - {'device_name': '/dev/vdh', - 'ebs': {'volume_size': 15}}, - {'device_name': '/dev/vde', - 'ebs': {'volume_size': 15}}, - {'device_name': '/dev/vdb', - 'ebs': {'volume_size': 15}}, - ] - expected = [ - {'device_name': '/dev/vdh', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 15, - 'delete_on_termination': True, - 'boot_index': -1}, - {'device_name': '/dev/vde', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 15, - 'delete_on_termination': False}, - {'device_name': '/dev/vdb', - 'source_type': 'blank', - 'destination_type': 'volume', - 'volume_size': 15, - 'delete_on_termination': True, - 'boot_index': -1}, - ] - result = instance_api._build_block_device_mapping( - fake_context, bdms, fakes.OSImage(fake_image_template)) - self.assertEqual(expected, result) - - # check conflict of short and full device names - fake_image_template['block_device_mapping'] = [ - {'device_name': '/dev/vdc', - 'source_type': 'blank', - 'volume_size': 10}, - ] - bdms = [ - {'device_name': 'vdc', - 'ebs': {'volume_size': 15}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - # opposit combination of the same case - fake_image_template['block_device_mapping'] = [ - {'device_name': 'vdc', - 'source_type': 'blank', - 'volume_size': 10}, - ] - bdms = [ - {'device_name': '/dev/vdc', - 'ebs': {'volume_size': 15}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - # check fault on root device snapshot changing - fake_image_template['block_device_mapping'] = [ - {'boot_index': 0, - 'source_type': 'snapshot', - 'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}, - ] - bdms = [ - {'device_name': '/dev/vda', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - # same case for legacy bdm - fake_image_template['block_device_mapping'] = [ - {'device_name': '/dev/vda', - 'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}, - ] - fake_image_template['bdm_v2'] = False - bdms = [ - {'device_name': '/dev/vda', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - # same case for legacy bdm with short names - fake_image_template['block_device_mapping'] = [ - {'device_name': 'vda', - 'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}, - ] - fake_image_template['bdm_v2'] = False - bdms = [ - {'device_name': 'vda', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - fake_image_template['bdm_v2'] = True - - # check fault on reduce volume size - fake_image_template['block_device_mapping'] = [ - {'device_name': 'vdc', - 'source_type': 'blank', - 'volume_size': 15}, - ] - bdms = [ - {'device_name': '/dev/vdc', - 'ebs': {'volume_size': 10}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - # check fault on set snapshot id if bdm doesn't have one - fake_image_template['block_device_mapping'] = [ - {'device_name': 'vdc', - 'source_type': 'blank', - 'volume_size': 10}, - ] - bdms = [ - {'device_name': '/dev/vdc', - 'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}}, - ] - self.assertRaises(exception.InvalidBlockDeviceMapping, - instance_api._build_block_device_mapping, - fake_context, bdms, - fakes.OSImage(fake_image_template)) - - @mock.patch('cinderclient.client.Client') - @mock.patch('novaclient.client.Client') - @mock.patch('ec2api.db.api.IMPL') - def test_format_instance(self, db_api, nova, cinder): - nova = nova.return_value - fake_context = base.create_context() - fake_flavors = {'fakeFlavorId': 'fake_flavor'} - - instance = {'id': fakes.random_ec2_id('i'), - 'os_id': fakes.random_os_id(), - 'launch_index': 0} - os_instance = fakes.OSInstance_full({'id': instance['os_id'], - 'flavor': {'id': 'fakeFlavorId'}}) - - # NOTE(ft): check instance state formatting - setattr(os_instance, 'OS-EXT-STS:vm_state', 'active') - formatted_instance = instance_api._format_instance( - fake_context, instance, os_instance, [], {}, - None, None, fake_flavors, []) - self.assertEqual({'name': 'running', 'code': 16}, - formatted_instance['instanceState']) - - setattr(os_instance, 'OS-EXT-STS:vm_state', 'stopped') - formatted_instance = instance_api._format_instance( - fake_context, instance, os_instance, [], {}, - None, None, fake_flavors, []) - self.assertEqual({'name': 'stopped', 'code': 80}, - formatted_instance['instanceState']) - - # NOTE(ft): check auto creating of DB item for unknown OS images - os_instance.image = {'id': fakes.random_os_id()} - kernel_id = fakes.random_os_id() - ramdisk_id = fakes.random_os_id() - setattr(os_instance, 'OS-EXT-SRV-ATTR:kernel_id', kernel_id) - setattr(os_instance, 'OS-EXT-SRV-ATTR:ramdisk_id', ramdisk_id) - formatted_instance = instance_api._format_instance( - fake_context, instance, os_instance, [], {}, - None, None, fake_flavors, []) - db_api.add_item_id.assert_has_calls( - [mock.call(mock.ANY, 'ami', os_instance.image['id'], None), - mock.call(mock.ANY, 'aki', kernel_id, None), - mock.call(mock.ANY, 'ari', ramdisk_id, None)], - any_order=True) - - @mock.patch('cinderclient.client.Client') - def test_format_instance_bdm(self, cinder): - id_os_instance_1 = fakes.random_os_id() - id_os_instance_2 = fakes.random_os_id() - cinder = cinder.return_value - cinder.volumes.list.return_value = [ - fakes.OSVolume({'id': '2', - 'status': 'attached', - 'attachments': [{'device': '/dev/sdb1', - 'server_id': id_os_instance_1}]}), - fakes.OSVolume({'id': '5', - 'status': 'attached', - 'attachments': [{'device': '/dev/sdb3', - 'server_id': id_os_instance_1}]}), - fakes.OSVolume({'id': '21', - 'status': 'attached', - 'attachments': [{'device': 'vda', - 'server_id': id_os_instance_2}]}), - ] - os_instance_1 = fakes.OSInstance_full({ - 'id': id_os_instance_1, - 'volumes_attached': [{'id': '2', - 'delete_on_termination': False}, - {'id': '5', - 'delete_on_termination': True}], - 'root_device_name': '/dev/sdb1'}) - os_instance_2 = fakes.OSInstance_full({ - 'id': id_os_instance_2, - 'volumes_attached': [{'id': '21', - 'delete_on_termination': False}], - 'root_device_name': '/dev/sdc1'}) - - db_volumes_1 = {'2': {'id': 'vol-00000002'}, - '5': {'id': 'vol-00000005'}} - - fake_context = base.create_context() - - result = {} - instance_api._cloud_format_instance_bdm( - fake_context, os_instance_1, result, db_volumes_1) - self.assertThat( - result, - matchers.DictMatches({ - 'rootDeviceType': 'ebs', - 'blockDeviceMapping': [ - {'deviceName': '/dev/sdb1', - 'ebs': {'status': 'attached', - 'deleteOnTermination': False, - 'volumeId': 'vol-00000002', - }}, - {'deviceName': '/dev/sdb3', - 'ebs': {'status': 'attached', - 'deleteOnTermination': True, - 'volumeId': 'vol-00000005', - }}]}, - orderless_lists=True), verbose=True) - - result = {} - with mock.patch('ec2api.db.api.IMPL') as db_api: - db_api.get_items.return_value = [{'id': 'vol-00000015', - 'os_id': '21'}] - instance_api._cloud_format_instance_bdm( - fake_context, os_instance_2, result) - self.assertThat( - result, - matchers.DictMatches({ - 'rootDeviceType': 'instance-store', - 'blockDeviceMapping': [ - {'deviceName': 'vda', - 'ebs': {'status': 'attached', - 'deleteOnTermination': False, - 'volumeId': 'vol-00000015', - }}]})) - - @mock.patch('cinderclient.client.Client') - def test_format_instance_bdm_while_attaching_volume(self, cinder): - id_os_instance = fakes.random_os_id() - cinder = cinder.return_value - cinder.volumes.list.return_value = [ - fakes.OSVolume({'id': '2', - 'status': 'attaching', - 'attachments': [{'device': '/dev/sdb1', - 'server_id': id_os_instance}]})] - os_instance = fakes.OSInstance_full({ - 'id': id_os_instance, - 'volumes_attached': [{'id': '2', - 'delete_on_termination': False}], - 'root_device_name': '/dev/vda'}) - fake_context = base.create_context() - - result = {} - instance_api._cloud_format_instance_bdm( - fake_context, os_instance, result, - {'2': {'id': 'vol-00000002'}}) - self.assertThat( - result, - matchers.DictMatches({ - 'rootDeviceType': 'instance-store', - 'blockDeviceMapping': [ - {'deviceName': '/dev/sdb1', - 'ebs': {'status': 'attaching', - 'deleteOnTermination': False, - 'volumeId': 'vol-00000002', - }}]})) - - def test_format_instance_bdm_no_bdm(self): - context = base.create_context() - os_instance_id = fakes.random_os_id() - os_instance = fakes.OSInstance_full({'id': os_instance_id}) - - res = {} - setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', None) - instance_api._cloud_format_instance_bdm( - context, os_instance, res, {}, {os_instance_id: []}) - self.assertEqual({}, res) - - res = {} - setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '') - instance_api._cloud_format_instance_bdm( - context, os_instance, res, {}, {os_instance_id: []}) - self.assertEqual({}, res) - - res = {} - setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vdd') - instance_api._cloud_format_instance_bdm( - context, os_instance, res, {}, {os_instance_id: []}) - self.assertEqual({'rootDeviceType': 'instance-store'}, res) - - @mock.patch('ec2api.api.instance._remove_instances') - @mock.patch('novaclient.client.Client') - def test_get_os_instances_by_instances(self, nova, remove_instances): - nova = nova.return_value - fake_context = base.create_context() - os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1) - os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2) - - def do_check(exactly_flag=None, specify_nova_client=False): - nova.servers.get.side_effect = [os_instance_1, - nova_exception.NotFound(404), - os_instance_2] - absent_instance = {'id': fakes.random_ec2_id('i'), - 'os_id': fakes.random_os_id()} - - params = (fake_context, [fakes.DB_INSTANCE_1, absent_instance, - fakes.DB_INSTANCE_2], - exactly_flag, nova if specify_nova_client else False) - if exactly_flag: - self.assertRaises(exception.InvalidInstanceIDNotFound, - instance_api._get_os_instances_by_instances, - *params) - else: - res = instance_api._get_os_instances_by_instances(*params) - self.assertEqual([os_instance_1, os_instance_2], - res) - remove_instances.assert_called_once_with(fake_context, - [absent_instance]) - remove_instances.reset_mock() - - do_check(exactly_flag=True) - # NOTE(ft): stop to return fake data by the mocked client and create - # a new one to pass it into the function - nova.servers.side_effect = None - nova = mock.Mock() - do_check(specify_nova_client=True) - - @mock.patch('ec2api.api.network_interface.delete_network_interface') - @mock.patch('ec2api.api.network_interface._detach_network_interface_item') - @mock.patch('ec2api.db.api.IMPL') - def test_remove_instances(self, db_api, detach_network_interface_item, - delete_network_interface): - fake_context = base.create_context() - - instances = [{'id': fakes.random_ec2_id('i')} - for dummy in range(4)] - network_interfaces = [ - {'id': fakes.random_ec2_id('eni'), - 'instance_id': inst['id'], - 'delete_on_termination': num in (0, 1, 4, 6)} - for num, inst in enumerate(itertools.chain( - *(list(zip(instances[:3], instances[:3])) + - [[{'id': fakes.random_ec2_id('i')}] * 2])))] - network_interfaces.extend({'id': fakes.random_ec2_id('eni')} - for dummy in range(2)) - - instances_to_remove = instances[:2] + [instances[3]] - network_interfaces_to_delete = network_interfaces[0:2] - network_interfaces_to_detach = network_interfaces[0:4] - - db_api.get_items.side_effect = tools.get_db_api_get_items( - *network_interfaces) - - instance_api._remove_instances(fake_context, instances_to_remove) - - for eni in network_interfaces_to_detach: - detach_network_interface_item.assert_any_call(fake_context, - eni) - for eni in network_interfaces_to_delete: - delete_network_interface.assert_any_call(fake_context, - eni['id']) - - @mock.patch('cinderclient.client.Client') - def test_get_os_volumes(self, cinder): - cinder = cinder.return_value - context = base.create_context() - os_volume_ids = [fakes.random_os_id() for _i in range(5)] - os_instance_ids = [fakes.random_os_id() for _i in range(2)] - os_volumes = [ - fakes.OSVolume( - {'id': os_volume_ids[0], - 'status': 'attached', - 'attachments': [{'server_id': os_instance_ids[0]}]}), - fakes.OSVolume( - {'id': os_volume_ids[1], - 'status': 'attaching', - 'attachments': []}), - fakes.OSVolume( - {'id': os_volume_ids[2], - 'status': 'detaching', - 'attachments': [{'server_id': os_instance_ids[0]}]}), - fakes.OSVolume( - {'id': os_volume_ids[3], - 'status': 'attached', - 'attachments': [{'server_id': os_instance_ids[1]}]}), - fakes.OSVolume( - {'id': os_volume_ids[4], - 'status': 'available', - 'attachments': []}), - ] - cinder.volumes.list.return_value = os_volumes - res = instance_api._get_os_volumes(context) - self.assertIn(os_instance_ids[0], res) - self.assertIn(os_instance_ids[1], res) - self.assertEqual([os_volumes[0], os_volumes[2]], - res[os_instance_ids[0]]) - self.assertEqual([os_volumes[3]], res[os_instance_ids[1]]) - cinder.volumes.list.assert_called_once_with(search_opts=None) - - context.is_os_admin = True - instance_api._get_os_volumes(context) - cinder.volumes.list.assert_called_with( - search_opts={'all_tenants': True, - 'project_id': context.project_id}) - - @mock.patch('ec2api.clients.nova', wraps=ec2api.clients.nova) - @mock.patch('ec2api.context.get_os_admin_context') - @mock.patch('cinderclient.client.Client') - @mock.patch('novaclient.client.Client') - def test_is_ebs_instance(self, nova, cinder, get_os_admin_context, - nova_client_getter): - nova = nova.return_value - cinder = cinder.return_value - context = base.create_context() - os_instance = fakes.OSInstance_full({'id': fakes.random_os_id()}) - - nova.servers.get.return_value = os_instance - cinder.volumes.list.return_value = [] - self.assertFalse(instance_api._is_ebs_instance(context, - os_instance.id)) - - cinder.volumes.list.return_value = [ - fakes.OSVolume( - {'id': fakes.random_os_id(), - 'status': 'attached', - 'attachments': [{'device': '/dev/vda', - 'server_id': os_instance.id}]})] - setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '') - self.assertFalse(instance_api._is_ebs_instance(context, - os_instance.id)) - - setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vda') - cinder.volumes.list.return_value = [] - self.assertFalse(instance_api._is_ebs_instance(context, - os_instance.id)) - - cinder.volumes.list.return_value = [ - fakes.OSVolume( - {'id': fakes.random_os_id(), - 'status': 'attached', - 'attachments': [{'device': '/dev/vda', - 'server_id': fakes.random_os_id()}]})] - self.assertFalse(instance_api._is_ebs_instance(context, - os_instance.id)) - - cinder.volumes.list.return_value = [ - fakes.OSVolume( - {'id': fakes.random_os_id(), - 'status': 'attached', - 'attachments': [{'device': '/dev/vdb', - 'server_id': os_instance.id}]})] - self.assertFalse(instance_api._is_ebs_instance(context, - os_instance.id)) - - cinder.volumes.list.return_value = [ - fakes.OSVolume( - {'id': fakes.random_os_id(), - 'status': 'attached', - 'attachments': [{'device': '/dev/vda', - 'server_id': os_instance.id}]})] - self.assertTrue(instance_api._is_ebs_instance(context, - os_instance.id)) - nova_client_getter.assert_called_with( - get_os_admin_context.return_value) - cinder.volumes.list.assert_called_with(search_opts=None) - - cinder.volumes.list.return_value = [ - fakes.OSVolume( - {'id': fakes.random_os_id(), - 'status': 'attached', - 'attachments': [{'device': 'vda', - 'server_id': os_instance.id}]})] - self.assertTrue(instance_api._is_ebs_instance(context, - os_instance.id)) diff --git a/ec2api/tests/unit/test_integrated_scenario.py b/ec2api/tests/unit/test_integrated_scenario.py deleted file mode 100644 index 1c226f1f..00000000 --- a/ec2api/tests/unit/test_integrated_scenario.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ec2api.api import image as image_api -from ec2api.api import instance as instance_api -from ec2api.api import snapshot as snapshot_api -from ec2api.api import volume as volume_api -from ec2api.db import api as db_api -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes - - -class DBItemsAutoCreationTestCase(base.DbTestCase): - - def setUp(self): - super(DBItemsAutoCreationTestCase, self).setUp() - self.mock_all_os() - self.context = base.create_context() - - def assert_image_project(self, expected_project_id, image_id): - if expected_project_id: - context = mock.NonCallableMock(project_id=expected_project_id) - else: - context = self.context - image_item = db_api.get_item_by_id(context, image_id) - if expected_project_id: - self.assertIsNotNone(image_item) - else: - self.assertIsNone(image_item) - - def test_describe_new_instance_then_its_volume(self): - os_instance_id = fakes.random_os_id() - os_volume_id = fakes.random_os_id() - os_instance = { - 'id': os_instance_id, - 'flavor': {'id': 'fake'}, - 'volumes_attached': [{'id': os_volume_id}], - } - os_volume = { - 'id': os_volume_id, - 'status': 'in-use', - 'attachments': [{'device': '/dev/vdb', - 'server_id': os_instance_id}], - } - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(os_instance)] - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(os_volume)] - - reservations = instance_api.describe_instances(self.context) - instance = reservations['reservationSet'][0]['instancesSet'][0] - volume_id = instance['blockDeviceMapping'][0]['ebs']['volumeId'] - volume_api.describe_volumes(self.context, [volume_id]) - - def _test_describe_new_images(self, image_project_id=None, - aki_image_project_id=None, - with_id_mapping=False): - os_image_id = fakes.random_os_id() - os_aki_image_id = fakes.random_os_id() - os_image = { - 'id': os_image_id, - 'owner': image_project_id, - 'is_public': True, - 'container_format': 'ami', - 'kernel_id': os_aki_image_id, - } - os_aki_image = { - 'id': os_aki_image_id, - 'owner': aki_image_project_id, - 'is_public': True, - 'container_format': 'aki', - } - self.glance.images.list.return_value = ( - [fakes.OSImage(os_image), fakes.OSImage(os_aki_image)] - if with_id_mapping else - [fakes.OSImage(os_aki_image), fakes.OSImage(os_image)]) - - images = image_api.describe_images(self.context) - image = next(i for i in images['imagesSet'] - if i['imageType'] == 'machine') - aki_image = next(i for i in images['imagesSet'] - if i['imageType'] == 'kernel') - self.assertEqual(image_project_id, image['imageOwnerId']) - self.assert_image_project( - (image_project_id - if image_project_id == fakes.ID_OS_PROJECT else - None), - image['imageId']) - self.assertEqual(aki_image_project_id, aki_image['imageOwnerId']) - self.assert_image_project( - (aki_image_project_id - if aki_image_project_id == fakes.ID_OS_PROJECT else - None), - aki_image['imageId']) - - def test_describe_new_alien_images(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=alien_project_id, - aki_image_project_id=alien_project_id, - with_id_mapping=False) - - def test_describe_new_local_images(self): - self._test_describe_new_images( - image_project_id=fakes.ID_OS_PROJECT, - aki_image_project_id=fakes.ID_OS_PROJECT, - with_id_mapping=False) - - def test_describe_new_local_ami_alien_aki_images(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=fakes.ID_OS_PROJECT, - aki_image_project_id=alien_project_id, - with_id_mapping=False) - - def test_describe_new_alien_ami_local_aki_images(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=alien_project_id, - aki_image_project_id=fakes.ID_OS_PROJECT, - with_id_mapping=False) - - def test_describe_new_alien_images_with_mappings(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=alien_project_id, - aki_image_project_id=alien_project_id, - with_id_mapping=True) - - def test_describe_new_local_images_with_mappings(self): - self._test_describe_new_images( - image_project_id=fakes.ID_OS_PROJECT, - aki_image_project_id=fakes.ID_OS_PROJECT, - with_id_mapping=True) - - def test_describe_new_local_ami_alien_aki_images_with_mappings(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=fakes.ID_OS_PROJECT, - aki_image_project_id=alien_project_id, - with_id_mapping=True) - - def test_describe_new_alien_ami_local_aki_images_with_mappings(self): - alien_project_id = fakes.random_os_id() - self._test_describe_new_images( - image_project_id=alien_project_id, - aki_image_project_id=fakes.ID_OS_PROJECT, - with_id_mapping=True) - - def _get_new_ebs_image(self, image_project_id=None, - bdm_image_project_id=None): - os_image_id = fakes.random_os_id() - os_snapshot_id = fakes.random_os_id() - os_bdm_image_id = fakes.random_os_id() - os_image = { - 'id': os_image_id, - 'owner': image_project_id, - 'is_public': True, - 'container_format': 'ami', - 'bdm_v2': True, - 'block_device_mapping': [{'device_name': '/dev/vds', - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'snapshot_id': os_snapshot_id}], - } - if os_bdm_image_id: - os_image['block_device_mapping'].append({ - 'device_name': '/dev/vdi', - 'source_type': 'image', - 'destination_type': 'volume', - 'image_id': os_bdm_image_id, - 'size': 100}) - os_snapshot = { - 'id': os_snapshot_id, - } - os_bdm_image = { - 'id': os_bdm_image_id, - 'owner': bdm_image_project_id, - 'is_public': True, - } - os_images = [fakes.OSImage(os_image)] - if bdm_image_project_id: - os_images.append(fakes.OSImage(os_bdm_image)) - self.glance.images.list.return_value = os_images - self.cinder.volume_snapshots.list.return_value = ( - [fakes.OSSnapshot(os_snapshot)] - if image_project_id == fakes.ID_OS_PROJECT else - []) - - images = image_api.describe_images(self.context) - return next(i for i in images['imagesSet'] - if i['blockDeviceMapping']) - - def _find_snapshot_id_in_bdm(self, image, device_name): - return next(bdm['ebs']['snapshotId'] - for bdm in image['blockDeviceMapping'] - if bdm['deviceName'] == device_name) - - def test_describe_new_local_snapshot_from_new_image(self): - image = self._get_new_ebs_image(image_project_id=fakes.ID_OS_PROJECT) - snapshot_id = self._find_snapshot_id_in_bdm(image, '/dev/vds') - snapshot_api.describe_snapshots(self.context, [snapshot_id]) - - def test_describe_new_alien_snapshot_from_new_image(self): - image = self._get_new_ebs_image(image_project_id=fakes.random_os_id()) - snapshot_id = self._find_snapshot_id_in_bdm(image, '/dev/vds') - self.assertRaises(exception.InvalidSnapshotNotFound, - snapshot_api.describe_snapshots, - self.context, [snapshot_id]) - - def test_describe_new_local_bdm_image_from_local_image(self): - image = self._get_new_ebs_image( - image_project_id=fakes.ID_OS_PROJECT, - bdm_image_project_id=fakes.ID_OS_PROJECT) - image_id = self._find_snapshot_id_in_bdm(image, '/dev/vdi') - image_api.describe_images(self.context, image_id=[image_id]) - self.assert_image_project(fakes.ID_OS_PROJECT, image_id) - - def test_describe_new_alien_bdm_image_from_new_local_image(self): - alien_project_id = fakes.random_os_id() - image = self._get_new_ebs_image( - image_project_id=fakes.ID_OS_PROJECT, - bdm_image_project_id=alien_project_id) - image_id = self._find_snapshot_id_in_bdm(image, '/dev/vdi') - image_api.describe_images(self.context, image_id=[image_id]) - self.assert_image_project(None, image_id) - - def test_describe_new_alien_bdm_image_from_new_alien_image(self): - alien_project_id = fakes.random_os_id() - image = self._get_new_ebs_image( - image_project_id=alien_project_id, - bdm_image_project_id=alien_project_id) - image_id = self._find_snapshot_id_in_bdm(image, '/dev/vdi') - image_api.describe_images(self.context, image_id=[image_id]) - self.assert_image_project(None, image_id) - - def _test_describe_new_instance_then_its_image(self, image_project_id): - os_instance_id = fakes.random_os_id() - os_image_id = fakes.random_os_id() - os_instance = { - 'id': os_instance_id, - 'flavor': {'id': 'fake'}, - 'image': {'id': os_image_id}, - } - os_image = { - 'id': os_image_id, - 'owner': image_project_id, - 'visibility': 'public', - } - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(os_instance)] - self.glance.images.list.return_value = [fakes.OSImage(os_image)] - - reservations = instance_api.describe_instances(self.context) - instance = reservations['reservationSet'][0]['instancesSet'][0] - image_id = instance['imageId'] - image = (image_api.describe_images(self.context, image_id=[image_id]) - ['imagesSet'][0]) - self.assertEqual(image_id, image['imageId']) - self.assertEqual(image_project_id, image['imageOwnerId']) - expected_project_id = (fakes.ID_OS_PROJECT - if image_project_id == fakes.ID_OS_PROJECT else - None) - self.assert_image_project(expected_project_id, image['imageId']) - - def test_describe_new_instance_then_its_local_image(self): - self._test_describe_new_instance_then_its_image(fakes.ID_OS_PROJECT) - - def test_describe_new_instance_then_its_alien_image(self): - self._test_describe_new_instance_then_its_image(fakes.random_os_id()) - - def test_describe_new_instance_then_its_alien_image_attribute(self): - os_instance_id = fakes.random_os_id() - os_image_id = fakes.random_os_id() - alien_project_id = fakes.random_os_id() - os_instance = { - 'id': os_instance_id, - 'flavor': {'id': 'fake'}, - 'image': {'id': os_image_id}, - } - os_image = { - 'id': os_image_id, - 'owner': alien_project_id, - 'is_public': True, - } - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(os_instance)] - self.glance.images.get.return_value = fakes.OSImage(os_image) - - reservations = instance_api.describe_instances(self.context) - instance = reservations['reservationSet'][0]['instancesSet'][0] - image_id = instance['imageId'] - - # NOTE(ft): ensure that InvalidAMIID.NotFound is not raised - self.assertRaises(exception.AuthFailure, - image_api.describe_image_attribute, - self.context, image_id, 'description') diff --git a/ec2api/tests/unit/test_internet_gateway.py b/ec2api/tests/unit/test_internet_gateway.py deleted file mode 100644 index f49b3f0e..00000000 --- a/ec2api/tests/unit/test_internet_gateway.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from neutronclient.common import exceptions as neutron_exception -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class IgwTestCase(base.ApiTestCase): - - def setUp(self): - super(IgwTestCase, self).setUp() - self.DB_IGW_1_DETACHED = fakes.gen_db_igw(fakes.ID_EC2_IGW_1) - self.DB_IGW_2_ATTACHED = fakes.gen_db_igw(fakes.ID_EC2_IGW_2, - fakes.ID_EC2_VPC_2) - - def test_create_igw(self): - self.db_api.add_item.return_value = fakes.DB_IGW_2 - - resp = self.execute('CreateInternetGateway', {}) - - self.assertIn('internetGateway', resp) - igw = resp['internetGateway'] - self.assertThat(fakes.EC2_IGW_2, matchers.DictMatches(igw)) - self.db_api.add_item.assert_called_with( - mock.ANY, 'igw', {}) - - def test_attach_igw(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_IGW_2, fakes.DB_VPC_2, - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - - def do_check(): - resp = self.execute( - 'AttachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'InternetGatewayId': fakes.ID_EC2_IGW_2}) - - self.assertEqual(True, resp['return']) - self.db_api.update_item.assert_called_once_with( - mock.ANY, self.DB_IGW_2_ATTACHED) - - do_check() - self.neutron.add_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_2, - {'network_id': fakes.ID_OS_PUBLIC_NETWORK}) - self.neutron.list_networks.assert_called_once_with( - **{'router:external': True, - 'name': fakes.NAME_OS_PUBLIC_NETWORK}) - - # VPN gateway is already attached - self.db_api.reset_mock() - self.neutron.reset_mock() - vgw_2 = tools.update_dict(fakes.DB_VPN_GATEWAY_2, - {'vpc_id': fakes.ID_EC2_VPC_2}) - self.add_mock_db_items(vgw_2) - do_check() - self.assertFalse(self.neutron.add_gateway_router.called) - - def test_attach_igw_invalid_parameters(self): - def do_check(error_code): - self.assert_execution_error( - error_code, 'AttachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'InternetGatewayId': fakes.ID_EC2_IGW_2}) - - self.assertEqual(0, self.neutron.add_gateway_router.call_count) - self.assertEqual(0, self.db_api.update_item.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.set_mock_db_items(fakes.DB_VPC_2) - do_check('InvalidInternetGatewayID.NotFound') - - self.set_mock_db_items(fakes.DB_IGW_2) - do_check('InvalidVpcID.NotFound') - - self.set_mock_db_items(self.DB_IGW_2_ATTACHED, fakes.DB_VPC_2) - do_check('Resource.AlreadyAssociated') - - self.set_mock_db_items( - fakes.DB_IGW_2, fakes.DB_VPC_2, - fakes.gen_db_igw(fakes.ID_EC2_IGW_1, fakes.ID_EC2_VPC_2)) - do_check('InvalidParameterValue') - - @tools.screen_unexpected_exception_logs - def test_attach_igw_rollback(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_IGW_2, fakes.DB_VPC_2) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.add_gateway_router.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'AttachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'InternetGatewayId': fakes.ID_EC2_IGW_2}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_IGW_2) - - def test_detach_igw(self): - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_VPC_1) - - def do_check(): - resp = self.execute( - 'DetachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'InternetGatewayId': fakes.ID_EC2_IGW_1}) - self.assertEqual(True, resp['return']) - self.db_api.update_item.assert_called_once_with( - mock.ANY, self.DB_IGW_1_DETACHED) - - do_check() - self.neutron.remove_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - - # VPN gateway is still attached - self.db_api.reset_mock() - self.neutron.reset_mock() - self.add_mock_db_items(fakes.DB_VPN_GATEWAY_1) - do_check() - self.assertFalse(self.neutron.remove_gateway_router.called) - - def test_detach_igw_invalid_parameters(self): - def do_check(error_code): - self.assert_execution_error( - error_code, 'DetachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'InternetGatewayId': fakes.ID_EC2_IGW_1}) - - self.assertEqual(0, self.neutron.remove_gateway_router.call_count) - self.assertEqual(0, self.db_api.update_item.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.set_mock_db_items(fakes.DB_VPC_1) - do_check('InvalidInternetGatewayID.NotFound') - - self.set_mock_db_items(fakes.DB_IGW_1) - do_check('InvalidVpcID.NotFound') - - self.set_mock_db_items(self.DB_IGW_1_DETACHED, fakes.DB_VPC_1) - do_check('Gateway.NotAttached') - - def test_detach_igw_no_router(self): - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_VPC_1) - self.neutron.remove_gateway_router.side_effect = ( - neutron_exception.NotFound) - - resp = self.execute( - 'DetachInternetGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'InternetGatewayId': fakes.ID_EC2_IGW_1}) - - self.assertEqual(True, resp['return']) - self.neutron.remove_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - - @tools.screen_unexpected_exception_logs - def test_detach_igw_rollback(self): - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_VPC_1) - self.neutron.remove_gateway_router.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DetachInternetGateway', - {'VpcId': fakes.EC2_VPC_1['vpcId'], - 'InternetGatewayId': fakes.EC2_IGW_1['internetGatewayId']}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_IGW_1) - - def test_delete_igw(self): - self.set_mock_db_items(fakes.DB_IGW_2) - - resp = self.execute( - 'DeleteInternetGateway', - {'InternetGatewayId': fakes.ID_EC2_IGW_2}) - - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_called_once_with(mock.ANY, - fakes.ID_EC2_IGW_2) - self.db_api.delete_item.assert_called_once_with(mock.ANY, - fakes.ID_EC2_IGW_2) - - def test_delete_igw_invalid_parameters(self): - def do_check(error_code): - self.assert_execution_error( - error_code, 'DeleteInternetGateway', - {'InternetGatewayId': fakes.ID_EC2_IGW_1}) - - self.assertEqual(0, self.db_api.delete_item.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.set_mock_db_items() - do_check('InvalidInternetGatewayID.NotFound') - - self.set_mock_db_items(fakes.DB_IGW_1) - do_check('DependencyViolation') - - def test_describe_igw(self): - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_IGW_2) - - resp = self.execute('DescribeInternetGateways', {}) - self.assertThat(resp['internetGatewaySet'], - matchers.ListMatches([fakes.EC2_IGW_1, - fakes.EC2_IGW_2])) - - resp = self.execute('DescribeInternetGateways', - {'InternetGatewayId.1': fakes.ID_EC2_IGW_2}) - self.assertThat(resp['internetGatewaySet'], - matchers.ListMatches([fakes.EC2_IGW_2])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_IGW_2])) - - self.check_filtering( - 'DescribeInternetGateways', 'internetGatewaySet', - [('internet-gateway-id', fakes.ID_EC2_IGW_2), - ('attachment.state', 'available'), - ('attachment.vpc-id', fakes.ID_EC2_VPC_1)]) - self.check_tag_support( - 'DescribeInternetGateways', 'internetGatewaySet', - fakes.ID_EC2_IGW_2, 'internetGatewayId') - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - def test_describe_internet_gateways_no_default_vpc(self, check_and_create): - self.configure(disable_ec2_classic=True) - - def mock_check_and_create(context): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_IGW_DEFAULT) - check_and_create.side_effect = mock_check_and_create - - resp = self.execute('DescribeInternetGateways', {}) - self.assertEqual(resp['internetGatewaySet'], - [fakes.EC2_IGW_DEFAULT]) - - check_and_create.assert_called_once_with(mock.ANY) diff --git a/ec2api/tests/unit/test_key_pair.py b/ec2api/tests/unit/test_key_pair.py deleted file mode 100644 index 4ca00ea0..00000000 --- a/ec2api/tests/unit/test_key_pair.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -from unittest import mock - - -from novaclient import exceptions as nova_exception - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class KeyPairCase(base.ApiTestCase): - - @mock.patch('ec2api.api.key_pair._generate_key_pair') - def test_create_key_pair(self, _generate_key_pair): - _generate_key_pair.return_value = ( - fakes.PRIVATE_KEY_KEY_PAIR, fakes.PUBLIC_KEY_KEY_PAIR) - self.nova.keypairs.create.return_value = ( - fakes.NovaKeyPair(fakes.OS_KEY_PAIR)) - resp = self.execute('CreateKeyPair', {'KeyName': fakes.NAME_KEY_PAIR}) - self.assertThat(fakes.EC2_KEY_PAIR, matchers.DictMatches(resp)) - _generate_key_pair.assert_called_once_with() - - def test_create_key_pair_invalid(self): - self.nova.keypairs.create.side_effect = ( - nova_exception.Conflict(409)) - self.assert_execution_error( - 'InvalidKeyPair.Duplicate', 'CreateKeyPair', - {'KeyName': fakes.NAME_KEY_PAIR}) - self.assert_execution_error( - 'ValidationError', 'CreateKeyPair', {'KeyName': 'k' * 256}) - self.nova.keypairs.create.side_effect = ( - nova_exception.OverLimit(413)) - self.assert_execution_error( - 'ResourceLimitExceeded', 'CreateKeyPair', - {'KeyName': fakes.NAME_KEY_PAIR}) - - def test_import_key_pair(self): - self.nova.keypairs.create.return_value = ( - fakes.NovaKeyPair(fakes.OS_KEY_PAIR)) - resp = self.execute('ImportKeyPair', - {'KeyName': fakes.NAME_KEY_PAIR, - 'PublicKeyMaterial': base64.b64encode( - fakes.PUBLIC_KEY_KEY_PAIR.encode("ascii") - ).decode("ascii")}) - self.assertThat( - tools.purge_dict(fakes.EC2_KEY_PAIR, {'keyMaterial'}), - matchers.DictMatches(resp)) - self.nova.keypairs.create.assert_called_once_with( - fakes.NAME_KEY_PAIR, fakes.PUBLIC_KEY_KEY_PAIR) - - def test_import_key_pair_invalid(self): - self.nova.keypairs.create.side_effect = ( - nova_exception.OverLimit(413)) - self.assert_execution_error( - 'ResourceLimitExceeded', 'ImportKeyPair', - {'KeyName': fakes.NAME_KEY_PAIR, - 'PublicKeyMaterial': base64.b64encode( - fakes.PUBLIC_KEY_KEY_PAIR.encode("ascii") - ).decode("ascii")}) - - def test_delete_key_pair(self): - self.nova.keypairs.delete.return_value = True - self.execute('DeleteKeyPair', {'KeyName': fakes.NAME_KEY_PAIR}) - self.nova.keypairs.delete.assert_called_once_with(fakes.NAME_KEY_PAIR) - self.nova.keypairs.delete.side_effect = nova_exception.NotFound(404) - self.execute('DeleteKeyPair', {'KeyName': 'keyname1'}) - self.nova.keypairs.delete.assert_any_call('keyname1') - - def test_describe_key_pairs(self): - self.nova.keypairs.list.return_value = [fakes.NovaKeyPair( - fakes.OS_KEY_PAIR)] - resp = self.execute('DescribeKeyPairs', {}) - self.assertThat(resp['keySet'], - matchers.ListMatches([ - tools.purge_dict(fakes.EC2_KEY_PAIR, - {'keyMaterial'})])) - self.nova.keypairs.list.assert_called_once_with() - - self.check_filtering( - 'DescribeKeyPairs', 'keySet', - [('fingerprint', fakes.FINGERPRINT_KEY_PAIR), - ('key-name', fakes.NAME_KEY_PAIR)]) - - def test_describe_key_pairs_invalid(self): - self.nova.keypairs.list.return_value = [fakes.NovaKeyPair( - fakes.OS_KEY_PAIR)] - self.assert_execution_error( - 'InvalidKeyPair.NotFound', 'DescribeKeyPairs', - {'KeyName.1': 'badname'}) - self.nova.keypairs.list.assert_called_once_with() diff --git a/ec2api/tests/unit/test_metadata.py b/ec2api/tests/unit/test_metadata.py deleted file mode 100644 index 1c9d2692..00000000 --- a/ec2api/tests/unit/test_metadata.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslotest import base as test_base -import testtools -import webob - -from ec2api import exception -from ec2api import metadata -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers - - -class ProxyTestCase(test_base.BaseTestCase): - - def setUp(self): - super(ProxyTestCase, self).setUp() - self.handler = metadata.MetadataRequestHandler() - conf = self.useFixture(config_fixture.Config()) - conf.config(group='metadata', - nova_metadata_ip='9.9.9.9', - nova_metadata_port=8775, - nova_metadata_protocol='http', - nova_metadata_insecure=True, - auth_ca_cert=None, - nova_client_cert='nova_cert', - nova_client_priv_key='nova_priv_key', - metadata_proxy_shared_secret='secret') - - @mock.patch('ec2api.metadata.api.get_version_list') - def test_callable(self, get_version_list): - get_version_list.return_value = 'foo' - request = webob.Request.blank('/') - response = request.get_response(self.handler) - self.assertEqual(200, response.status_int) - self.assertEqual('foo', response.body.decode("utf-8")) - - @mock.patch('ec2api.metadata.api.get_version_list') - def test_root(self, get_version_list): - get_version_list.return_value = 'fake_version' - request = webob.Request.blank('/') - response = request.get_response(self.handler) - self.assertEqual('fake_version', response.body.decode("utf-8")) - response_ctype = response.headers['Content-Type'] - self.assertTrue(response_ctype.startswith("text/plain")) - get_version_list.assert_called_with() - - request = webob.Request.blank('/foo/../') - response = request.get_response(self.handler) - self.assertEqual('fake_version', response.body.decode("utf-8")) - - @mock.patch.object(metadata.MetadataRequestHandler, '_get_metadata') - @mock.patch.object(metadata.MetadataRequestHandler, '_get_requester') - def test_version_root(self, get_requester, get_metadata): - get_requester.return_value = mock.sentinel.requester - get_metadata.return_value = 'fake' - request = webob.Request.blank('/latest') - response = request.get_response(self.handler) - self.assertEqual('fake', response.body.decode("utf-8")) - response_ctype = response.headers['Content-Type'] - self.assertTrue(response_ctype.startswith("text/plain")) - get_requester.assert_called_with(mock.ANY) - get_metadata.assert_called_with(['latest'], mock.sentinel.requester) - - get_metadata.side_effect = exception.EC2MetadataNotFound() - request = webob.Request.blank('/latest') - response = request.get_response(self.handler) - self.assertEqual(404, response.status_int) - - with mock.patch.object(metadata, 'LOG') as log: - get_metadata.side_effect = Exception() - request = webob.Request.blank('/latest') - response = request.get_response(self.handler) - self.assertEqual(500, response.status_int) - self.assertEqual(len(log.mock_calls), 2) - - def test_get_requester(self): - expected = {'os_instance_id': mock.sentinel.os_instance_id, - 'project_id': mock.sentinel.project_id, - 'private_ip': mock.sentinel.private_ip} - req = mock.Mock(headers={}) - - req.headers['X-Instance-ID'] = mock.sentinel.os_instance_id - - @mock.patch.object(metadata.MetadataRequestHandler, - '_unpack_neutron_request') - def do_test1(unpack_request): - unpack_request.return_value = (mock.sentinel.os_instance_id, - mock.sentinel.project_id, - mock.sentinel.private_ip) - - retval = self.handler._get_requester(req) - self.assertEqual(expected, retval) - unpack_request.assert_called_with(req) - - do_test1() - - req.headers['X-Metadata-Provider'] = mock.sentinel.provider_id - - @mock.patch('ec2api.metadata.api.' - 'get_os_instance_and_project_id_by_provider_id') - @mock.patch('ec2api.context.get_os_admin_context') - @mock.patch.object(metadata.MetadataRequestHandler, - '_unpack_nsx_request') - def do_test2(unpack_request, get_context, get_ids): - unpack_request.return_value = (mock.sentinel.provider_id, - mock.sentinel.private_ip) - get_context.return_value = base.create_context(is_os_admin=True) - get_ids.return_value = (mock.sentinel.os_instance_id, - mock.sentinel.project_id) - - retval = self.handler._get_requester(req) - self.assertEqual(expected, retval) - unpack_request.assert_called_with(req) - get_context.assert_called_with() - get_ids.assert_called_with(get_context.return_value, - mock.sentinel.provider_id, - mock.sentinel.private_ip) - - do_test2() - - @mock.patch('ec2api.metadata.api.get_metadata_item') - @mock.patch('ec2api.context.get_os_admin_context') - def test_get_metadata(self, get_context, get_metadata_item): - get_context.return_value = base.create_context(is_os_admin=True) - requester = {'os_instance_id': mock.sentinel.os_instance_id, - 'project_id': mock.sentinel.project_id, - 'private_ip': mock.sentinel.private_ip} - get_metadata_item.return_value = 'fake_item' - self.handler.cache_region = 'fake_region' - - retval = self.handler._get_metadata(['fake_ver', 'fake_attr'], - requester) - self.assertEqual('fake_item', retval) - get_context.assert_called_with() - get_metadata_item.assert_called_with( - get_context.return_value, ['fake_ver', 'fake_attr'], - mock.sentinel.os_instance_id, mock.sentinel.private_ip, - 'fake_region') - self.assertEqual(mock.sentinel.project_id, - get_context.return_value.project_id) - - @mock.patch.object(metadata.MetadataRequestHandler, '_proxy_request') - def test_proxy_call(self, proxy): - req = mock.Mock(path_info='/openstack') - proxy.return_value = 'value' - - with mock.patch.object(metadata.MetadataRequestHandler, - '_get_requester'): - retval = self.handler(req) - self.assertEqual(retval, 'value') - - @mock.patch.object(metadata, 'LOG') - @mock.patch.object(metadata.MetadataRequestHandler, '_proxy_request') - def test_proxy_call_internal_server_error(self, proxy, log): - req = mock.Mock(path_info='/openstack') - proxy.side_effect = Exception() - retval = self.handler(req) - self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) - self.assertEqual(len(log.mock_calls), 2) - - proxy.side_effect = exception.EC2MetadataException() - retval = self.handler(req) - self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) - - @mock.patch.object(metadata.MetadataRequestHandler, '_proxy_request') - def test_proxy_call_no_instance(self, proxy): - req = mock.Mock(path_info='/openstack') - proxy.side_effect = exception.EC2MetadataNotFound() - with mock.patch.object(metadata.MetadataRequestHandler, - '_get_requester'): - retval = self.handler(req) - self.assertIsInstance(retval, webob.exc.HTTPNotFound) - - @mock.patch.object(metadata.MetadataRequestHandler, - '_build_proxy_request_headers') - def _proxy_request_test_helper(self, build_headers, - response_code=200, method='GET'): - hdrs = {'X-Forwarded-For': '8.8.8.8'} - body = 'body' - - req = mock.Mock(path_info='/openstack', query_string='', headers=hdrs, - method=method, body=body) - resp = mock.MagicMock(status=response_code) - req.response = resp - build_headers.return_value = hdrs - with mock.patch('httplib2.Http') as mock_http: - resp.__getitem__.return_value = "text/plain" - mock_http.return_value.request.return_value = (resp, 'content') - - retval = self.handler._proxy_request(req, mock.sentinel.requester) - mock_http.assert_called_once_with( - ca_certs=None, disable_ssl_certificate_validation=True) - mock_http.assert_has_calls([ - mock.call().add_certificate( - cfg.CONF.metadata.nova_client_priv_key, - cfg.CONF.metadata.nova_client_cert, - "%s:%s" % (cfg.CONF.metadata.nova_metadata_ip, - cfg.CONF.metadata.nova_metadata_port) - ), - mock.call().request( - 'http://9.9.9.9:8775/openstack', - method=method, - headers={ - 'X-Forwarded-For': '8.8.8.8', - }, - body=body - )] - ) - build_headers.assert_called_once_with(mock.sentinel.requester) - - return retval - - def test_proxy_request_post(self): - response = self._proxy_request_test_helper(method='POST') - self.assertEqual(response.content_type, "text/plain") - self.assertEqual(response.body, 'content') - - def test_proxy_request_200(self): - response = self._proxy_request_test_helper(response_code=200) - self.assertEqual(response.content_type, "text/plain") - self.assertEqual(response.body, 'content') - - def test_proxy_request_400(self): - self.assertIsInstance( - self._proxy_request_test_helper(response_code=400), - webob.exc.HTTPBadRequest) - - def test_proxy_request_403(self): - self.assertIsInstance( - self._proxy_request_test_helper(response_code=403), - webob.exc.HTTPForbidden) - - def test_proxy_request_404(self): - self.assertIsInstance( - self._proxy_request_test_helper(response_code=404), - webob.exc.HTTPNotFound) - - def test_proxy_request_409(self): - self.assertIsInstance( - self._proxy_request_test_helper(response_code=409), - webob.exc.HTTPConflict) - - def test_proxy_request_500(self): - self.assertIsInstance( - self._proxy_request_test_helper(response_code=500), - webob.exc.HTTPInternalServerError) - - def test_proxy_request_other_code(self): - with testtools.ExpectedException(Exception): - self._proxy_request_test_helper(response_code=302) - - @mock.patch.object(metadata.MetadataRequestHandler, '_sign_instance_id') - def test_build_proxy_request_headers(self, sign_instance_id): - sign_instance_id.return_value = mock.sentinel.signed - requester = {'os_instance_id': mock.sentinel.os_instance_id, - 'project_id': mock.sentinel.project_id, - 'private_ip': mock.sentinel.private_ip} - result = self.handler._build_proxy_request_headers(requester) - expected = {'X-Forwarded-For': mock.sentinel.private_ip, - 'X-Instance-ID': mock.sentinel.os_instance_id, - 'X-Tenant-ID': mock.sentinel.project_id, - 'X-Instance-ID-Signature': mock.sentinel.signed} - self.assertThat(result, matchers.DictMatches(expected)) - - def test_sign_instance_id(self): - self.assertEqual( - '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4', - self.handler._sign_instance_id('foo') - ) - - def test_unpack_neutron_request(self): - sign = ( - '97e7709481495f1a3a589e5ee03f8b5d51a3e0196768e300c441b58fe0382f4d') - req = mock.Mock(headers={'X-Instance-ID': 'fake_instance_id', - 'X-Tenant-ID': 'fake_project_id', - 'X-Forwarded-For': 'fake_instance_ip', - 'X-Instance-ID-Signature': sign}) - retval = self.handler._unpack_neutron_request(req) - self.assertEqual( - ('fake_instance_id', 'fake_project_id', 'fake_instance_ip'), - retval) - - req.headers['X-Instance-ID-Signature'] = 'fake' - self.assertRaises(webob.exc.HTTPForbidden, - self.handler._unpack_neutron_request, req) - - req.headers.pop('X-Instance-ID-Signature') - self.assertRaises(webob.exc.HTTPForbidden, - self.handler._unpack_neutron_request, req) - - req.headers.pop('X-Tenant-ID') - self.assertRaises(webob.exc.HTTPBadRequest, - self.handler._unpack_neutron_request, req) - - req.headers.pop('X-Forwarded-For') - self.assertRaises(exception.EC2MetadataInvalidAddress, - self.handler._unpack_neutron_request, req) - - def test_unpack_nsx_request(self): - sign = ( - '344aa301e652d6c214c4f4a475a43c9f69d9f2d26d87e939c6bac3f21a9d2476') - req = mock.Mock(headers={'X-Metadata-Provider': 'fake_provider_id', - 'X-Forwarded-For': 'fake_instance_ip', - 'X-Metadata-Provider-Signature': sign}) - retval = self.handler._unpack_nsx_request(req) - self.assertEqual(('fake_provider_id', 'fake_instance_ip'), retval) - - req.headers['X-Forwarded-For'] = 'fake_instance_ip,fake_router_ip' - retval = self.handler._unpack_nsx_request(req) - self.assertEqual(('fake_provider_id', 'fake_instance_ip'), retval) - - req.headers['X-Metadata-Provider-Signature'] = 'fake' - self.assertRaises(webob.exc.HTTPForbidden, - self.handler._unpack_nsx_request, req) - - req.headers.pop('X-Metadata-Provider-Signature') - self.assertRaises(webob.exc.HTTPForbidden, - self.handler._unpack_nsx_request, req) - - with config_fixture.Config() as conf: - conf.config(group='metadata', - metadata_proxy_shared_secret=None) - retval = self.handler._unpack_nsx_request(req) - self.assertEqual(('fake_provider_id', 'fake_instance_ip'), retval) - - req.headers.pop('X-Metadata-Provider') - self.assertRaises(webob.exc.HTTPBadRequest, - self.handler._unpack_nsx_request, req) - - req.headers.pop('X-Forwarded-For') - self.assertRaises(webob.exc.HTTPBadRequest, - self.handler._unpack_nsx_request, req) - - @mock.patch('ec2api.utils.constant_time_compare') - def test_usage_of_constant_time_compare(self, constant_time_compare): - sign = ( - '97e7709481495f1a3a589e5ee03f8b5d51a3e0196768e300c441b58fe0382f4d') - req = mock.Mock(headers={'X-Instance-ID': 'fake_instance_id', - 'X-Tenant-ID': 'fake_project_id', - 'X-Forwarded-For': 'fake_instance_ip', - 'X-Instance-ID-Signature': sign}) - self.handler._unpack_neutron_request(req) - self.assertEqual(1, constant_time_compare.call_count) - - @mock.patch('novaclient.client.Client') - @mock.patch('ec2api.db.api.IMPL') - @mock.patch('ec2api.metadata.api.instance_api') - @mock.patch('ec2api.metadata.MetadataRequestHandler._validate_signature') - def test_get_metadata_items(self, validate, instance_api, db_api, nova): - FAKE_USER_DATA = u'fake_user_data-' + chr(1071) - nova.return_value.servers.list.return_value = [ - fakes.OSInstance(fakes.OS_INSTANCE_1)] - keypair = mock.Mock(public_key=fakes.PUBLIC_KEY_KEY_PAIR) - keypair.configure_mock(name=fakes.NAME_KEY_PAIR) - nova.return_value.keypairs.get.return_value = keypair - db_api.get_items_ids.return_value = [ - (fakes.ID_EC2_INSTANCE_1, fakes.ID_OS_INSTANCE_1)] - instance_api.describe_instances.return_value = { - 'reservationSet': [fakes.EC2_RESERVATION_1]} - userDataValue = base64.b64encode(FAKE_USER_DATA.encode('utf-8')) - instance_api.describe_instance_attribute.return_value = { - 'instanceId': fakes.ID_EC2_INSTANCE_1, - 'userData': {'value': userDataValue}} - - def _test_metadata_path(relpath): - # recursively confirm a http 200 from all meta-data elements - # available at relpath. - headers = {'X-Instance-ID': fakes.ID_EC2_INSTANCE_1, - 'X-Tenant-ID': fakes.ID_OS_PROJECT, - 'X-Forwarded-For': fakes.IP_NETWORK_INTERFACE_2} - request = webob.Request.blank( - relpath, headers=headers) - - response = request.get_response(self.handler) - self.assertEqual(200, response.status_int) - for item in response.body.decode("utf-8").split('\n'): - if 'public-keys' in relpath: - # meta-data/public-keys/0=keyname refers to - # meta-data/public-keys/0 - item = item.split('=')[0] - if item.endswith('/'): - path = relpath + '/' + item - _test_metadata_path(path) - continue - - path = relpath + '/' + item - request = webob.Request.blank( - path, headers=headers) - response = request.get_response(self.handler) - self.assertEqual(200, response.status_int, message=path) - - _test_metadata_path('/latest') diff --git a/ec2api/tests/unit/test_metadata_api.py b/ec2api/tests/unit/test_metadata_api.py deleted file mode 100644 index 19844502..00000000 --- a/ec2api/tests/unit/test_metadata_api.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import copy -from unittest import mock - -from novaclient import exceptions as nova_exception -from oslo_cache import core as cache_core -from oslo_config import cfg - -from ec2api import exception -from ec2api.metadata import api -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - -CONF = cfg.CONF -FAKE_USER_DATA = u'fake_user_data-' + chr(1071) - - -class MetadataApiTestCase(base.ApiTestCase): - # TODO(ft): 'execute' feature isn't used here, but some mocks and - # fake context are. ApiTestCase should be split to some classes to use - # its feature optimally - - def setUp(self): - super(MetadataApiTestCase, self).setUp() - self.instance_api = self.mock('ec2api.metadata.api.instance_api') - - self.set_mock_db_items(fakes.DB_INSTANCE_1) - self.instance_api.describe_instances.return_value = { - 'reservationSet': [fakes.EC2_RESERVATION_1]} - userDataValue = base64.b64encode(FAKE_USER_DATA.encode('utf-8')) - self.instance_api.describe_instance_attribute.return_value = { - 'instanceId': fakes.ID_EC2_INSTANCE_1, - 'userData': {'value': userDataValue}} - self.configure(enabled=False, group='cache') - self._init_cache_region() - - self.fake_context = base.create_context() - - def _init_cache_region(self): - self.cache_region = cache_core.create_region() - cache_core.configure_cache_region(CONF, self.cache_region) - - def test_get_version_list(self): - retval = api.get_version_list() - self.assertEqual('\n'.join(api.VERSIONS + ['latest']), retval) - - def test_get_instance_and_project_id_by_provider_id(self): - self.neutron.list_subnets.return_value = { - 'subnets': [fakes.OS_SUBNET_1, fakes.OS_SUBNET_2]} - self.neutron.list_ports.return_value = { - 'ports': [fakes.OS_PORT_2]} - self.assertEqual( - (fakes.ID_OS_INSTANCE_1, fakes.ID_OS_PROJECT), - api.get_os_instance_and_project_id_by_provider_id( - self.fake_context, mock.sentinel.provider_id, - fakes.IP_NETWORK_INTERFACE_2)) - self.neutron.list_subnets.assert_called_with( - advanced_service_providers=[mock.sentinel.provider_id], - fields=['network_id']) - self.neutron.list_ports.assert_called_with( - fixed_ips=('ip_address=%s' % fakes.IP_NETWORK_INTERFACE_2), - network_id=[fakes.ID_OS_NETWORK_1, fakes.ID_OS_NETWORK_2], - fields=['device_id', 'tenant_id']) - - self.neutron.list_ports.return_value = {'ports': []} - self.assertRaises(exception.EC2MetadataNotFound, - api.get_os_instance_and_project_id_by_provider_id, - self.fake_context, mock.sentinel.provider_id, - fakes.IP_NETWORK_INTERFACE_2) - - self.neutron.list_subnets.return_value = {'subnets': []} - self.assertRaises(exception.EC2MetadataNotFound, - api.get_os_instance_and_project_id_by_provider_id, - self.fake_context, mock.sentinel.provider_id, - fakes.IP_NETWORK_INTERFACE_2) - - def test_get_version_root(self): - retval = api.get_metadata_item(self.fake_context, ['2009-04-04'], - fakes.ID_OS_INSTANCE_1, - fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual('meta-data/\nuser-data', retval) - - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, ['9999-99-99'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - self.db_api.get_items_ids.assert_called_with( - self.fake_context, 'i', item_ids=None, - item_os_ids=(fakes.ID_OS_INSTANCE_1,)) - self.instance_api.describe_instances.assert_called_with( - self.fake_context, [fakes.ID_EC2_INSTANCE_1]) - self.instance_api.describe_instance_attribute.assert_called_with( - self.fake_context, fakes.ID_EC2_INSTANCE_1, 'userData') - - def test_invalid_path(self): - self.assertRaises(exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, - ['9999-99-99', 'user-data-invalid'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_mismatch_project_id(self): - self.fake_context.project_id = fakes.random_os_id() - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, ['2009-04-04'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_non_existing_instance(self): - self.instance_api.describe_instances.return_value = { - 'reservationSet': []} - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, ['2009-04-04'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_user_data(self): - retval = api.get_metadata_item( - self.fake_context, ['2009-04-04', 'user-data'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(FAKE_USER_DATA, retval) - - def test_no_user_data(self): - self.instance_api.describe_instance_attribute.return_value = { - 'instanceId': fakes.ID_EC2_INSTANCE_1} - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, - ['2009-04-04', 'user-data'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_security_groups(self): - self.instance_api.describe_instances.return_value = { - 'reservationSet': [fakes.EC2_RESERVATION_2]} - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'security-groups'], - fakes.ID_OS_INSTANCE_2, fakes.IP_NETWORK_INTERFACE_1, - self.cache_region) - self.assertEqual('\n'.join(['groupname3']), - retval) - - def test_local_hostname(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'local-hostname'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.EC2_INSTANCE_1['privateDnsName'], retval) - - def test_local_ipv4(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'local-ipv4'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.IP_NETWORK_INTERFACE_2, retval) - - def test_local_ipv4_from_address(self): - self.instance_api.describe_instances.return_value = { - 'reservationSet': [fakes.EC2_RESERVATION_2]} - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'local-ipv4'], - fakes.ID_OS_INSTANCE_2, fakes.IP_NETWORK_INTERFACE_1, - self.cache_region) - self.assertEqual(fakes.IP_NETWORK_INTERFACE_1, retval) - - def test_pubkey_name(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'public-keys'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual('0=%s' % fakes.NAME_KEY_PAIR, retval) - - def test_pubkey(self): - self.nova.servers.get.return_value = ( - fakes.OSInstance(fakes.OS_INSTANCE_1)) - self.nova.keypairs.keypair_prefix = 'os_keypairs' - self.nova.keypairs._get.return_value = ( - fakes.NovaKeyPair(fakes.OS_KEY_PAIR)) - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'public-keys', '0', 'openssh-key'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.PUBLIC_KEY_KEY_PAIR, retval) - self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_1) - self.nova.keypairs._get.assert_called_once_with( - '/os_keypairs/%s?user_id=%s' % (fakes.NAME_KEY_PAIR, - fakes.ID_OS_USER), - 'keypair') - - self.nova.keypairs._get.side_effect = nova_exception.NotFound(404) - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, - self.fake_context, - ['2009-04-04', 'meta-data', 'public-keys', '0', 'openssh-key'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_image_type_ramdisk(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'ramdisk-id'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.ID_EC2_IMAGE_ARI_1, retval) - - def test_image_type_kernel(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'kernel-id'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.ID_EC2_IMAGE_AKI_1, retval) - - def test_check_version(self): - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'block-device-mapping'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertIsNotNone(retval) - - self.assertRaises( - exception.EC2MetadataNotFound, - api.get_metadata_item, self.fake_context, - ['2007-08-29', 'meta-data', 'block-device-mapping'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - - def test_format_instance_mapping(self): - retval = api._build_block_device_mappings( - 'fake_context', fakes.EC2_INSTANCE_1, fakes.ID_OS_INSTANCE_1) - self.assertThat(retval, - matchers.DictMatches( - {'ami': 'vda', - 'root': fakes.ROOT_DEVICE_NAME_INSTANCE_1})) - - retval = api._build_block_device_mappings( - 'fake_context', fakes.EC2_INSTANCE_2, fakes.ID_OS_INSTANCE_2) - expected = {'ami': 'sdb1', - 'root': fakes.ROOT_DEVICE_NAME_INSTANCE_2} - expected.update(fakes.EC2_BDM_METADATA_INSTANCE_2) - self.assertThat(retval, - matchers.DictMatches(expected)) - - def test_metadata_cache(self): - self.configure(enabled=True, group='cache') - self.configure(backend='oslo_cache.dict', group='cache') - self._init_cache_region() - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'local-ipv4'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.IP_NETWORK_INTERFACE_2, retval) - self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_1) - self.nova.servers.get.reset_mock() - - retval = api.get_metadata_item( - self.fake_context, - ['2009-04-04', 'meta-data', 'instance-id'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - self.cache_region) - self.assertEqual(fakes.ID_EC2_INSTANCE_1, retval) - self.nova.servers.get.assert_not_called() - - -class MetadataApiIntegralTestCase(base.ApiTestCase): - # TODO(ft): 'execute' feature isn't used here, but some mocks and - # fake context are. ApiTestCase should be split to some classes to use - # its feature optimally - - @mock.patch('ec2api.metadata.api.cache_core.create_region') - @mock.patch('ec2api.api.instance.security_group_api') - @mock.patch('ec2api.api.instance.network_interface_api') - def test_get_metadata_integral(self, network_interface_api, - security_group_api, create_region): - fake_context = base.create_context(is_os_admin=True) - - self.set_mock_db_items( - fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_IMAGE_1, fakes.DB_IMAGE_2, - fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1, - fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3) - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_1), - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - self.nova_admin.servers.get.side_effect = tools.get_by_1st_arg_getter({ - fakes.ID_OS_INSTANCE_1: fakes.OSInstance_full(fakes.OS_INSTANCE_1), - fakes.ID_OS_INSTANCE_2: fakes.OSInstance_full(fakes.OS_INSTANCE_2) - }) - self.nova_admin.keypairs._get.return_value = ( - fakes.NovaKeyPair(fakes.OS_KEY_PAIR)) - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_1), - fakes.OSVolume(fakes.OS_VOLUME_2), - fakes.OSVolume(fakes.OS_VOLUME_3)] - network_interface_api.describe_network_interfaces.side_effect = ( - lambda *args, **kwargs: copy.deepcopy({ - 'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1, - fakes.EC2_NETWORK_INTERFACE_2]})) - security_group_api.describe_security_groups.return_value = { - 'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_3]} - create_region.get.return_value = cache_core.NO_VALUE - - retval = api.get_metadata_item( - fake_context, ['latest', 'meta-data', 'instance-id'], - fakes.ID_OS_INSTANCE_1, fakes.IP_NETWORK_INTERFACE_2, - create_region) - self.assertEqual(fakes.ID_EC2_INSTANCE_1, retval) - - retval = api.get_metadata_item( - fake_context, ['latest', 'meta-data', 'instance-id'], - fakes.ID_OS_INSTANCE_2, '10.200.1.15', - create_region) - self.assertEqual(fakes.ID_EC2_INSTANCE_2, retval) diff --git a/ec2api/tests/unit/test_middleware.py b/ec2api/tests/unit/test_middleware.py deleted file mode 100644 index 6db24590..00000000 --- a/ec2api/tests/unit/test_middleware.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from lxml import etree -from unittest import mock - -from oslo_config import cfg -from oslo_context import context -from oslo_serialization import jsonutils -from oslotest import base as test_base -import requests -import webob.dec -import webob.exc - -from ec2api import api as ec2 -from ec2api import exception -from ec2api.tests.unit import tools -from ec2api import wsgi - -CONF = cfg.CONF - - -@webob.dec.wsgify -def conditional_forbid(req): - """Helper wsgi app returns 403 if param 'die' is 1.""" - if 'die' in req.params and req.params['die'] == '1': - raise webob.exc.HTTPForbidden() - return 'OK' - - -class ExecutorTestCase(test_base.BaseTestCase): - def setUp(self): - super(ExecutorTestCase, self).setUp() - self.executor = ec2.Executor() - - def _execute(self, invoke): - class Fake(object): - pass - fake_ec2_request = Fake() - fake_ec2_request.invoke = invoke - - fake_wsgi_request = Fake() - - fake_wsgi_request.environ = { - 'ec2api.context': mock.Mock( - request_id=context.generate_request_id()), - 'ec2.request': fake_ec2_request, - } - return self.executor(fake_wsgi_request) - - def _extract_message(self, result): - tree = etree.fromstring(result.body) - return tree.findall('./Errors')[0].find('Error/Message').text - - def _extract_code(self, result): - tree = etree.fromstring(result.body) - return tree.findall('./Errors')[0].find('Error/Code').text - - def test_instance_not_found(self): - def not_found(context): - raise exception.InvalidInstanceIDNotFound(id='i-01') - result = self._execute(not_found) - self.assertIn('i-01', self._extract_message(result)) - self.assertEqual('InvalidInstanceID.NotFound', - self._extract_code(result)) - - def test_instance_not_found_none(self): - def not_found(context): - raise exception.InvalidInstanceIDNotFound(id=None) - - # NOTE(mikal): we want no exception to be raised here, which was what - # was happening in bug/1080406 - result = self._execute(not_found) - self.assertIn('None', self._extract_message(result)) - self.assertEqual('InvalidInstanceID.NotFound', - self._extract_code(result)) - - def test_snapshot_not_found(self): - def not_found(context): - raise exception.InvalidSnapshotNotFound(id='snap-01') - result = self._execute(not_found) - self.assertIn('snap-01', self._extract_message(result)) - self.assertEqual('InvalidSnapshot.NotFound', - self._extract_code(result)) - - def test_volume_not_found(self): - def not_found(context): - raise exception.InvalidVolumeNotFound(id='vol-01') - result = self._execute(not_found) - self.assertIn('vol-01', self._extract_message(result)) - self.assertEqual('InvalidVolume.NotFound', self._extract_code(result)) - - -class FakeResponse(object): - reason = "Test Reason" - - def __init__(self, status_code=400): - self.status_code = status_code - - def json(self): - return {} - - -class KeystoneAuthTestCase(test_base.BaseTestCase): - def setUp(self): - super(KeystoneAuthTestCase, self).setUp() - self.kauth = ec2.EC2KeystoneAuth(conditional_forbid) - - def _validate_ec2_error(self, response, http_status, ec2_code): - self.assertEqual(response.status_code, http_status, - 'Expected HTTP status %s' % http_status) - root_e = etree.XML(response.body) - self.assertEqual(root_e.tag, 'Response', - "Top element must be Response.") - errors_e = root_e.find('Errors') - error_e = errors_e[0] - code_e = error_e.find('Code') - self.assertIsNotNone(code_e, "Code element must be present.") - self.assertEqual(code_e.text, ec2_code) - - def test_no_signature(self): - req = wsgi.Request.blank('/test') - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - - def test_no_key_id(self): - req = wsgi.Request.blank('/test') - req.GET['Signature'] = 'test-signature' - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - - @mock.patch.object(requests, 'request', return_value=FakeResponse()) - def test_communication_failure(self, mock_request): - req = wsgi.Request.blank('/test') - req.GET['Signature'] = 'test-signature' - req.GET['AWSAccessKeyId'] = 'test-key-id' - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - mock_request.assert_called_with('POST', - CONF.keystone_ec2_tokens_url, - data=mock.ANY, headers=mock.ANY) - - @tools.screen_all_logs - @mock.patch.object(requests, 'request', return_value=FakeResponse(200)) - def test_no_result_data(self, mock_request): - req = wsgi.Request.blank('/test') - req.GET['Signature'] = 'test-signature' - req.GET['AWSAccessKeyId'] = 'test-key-id' - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - mock_request.assert_called_with('POST', - CONF.keystone_ec2_tokens_url, - data=mock.ANY, headers=mock.ANY) - - fake_request = mock.NonCallableMock(status_code=200, headers={}) - fake_request.json.return_value = {'token': {}} - mock_request.return_value = fake_request - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - - fake_request.json.return_value = {'access': {}} - resp = self.kauth(req) - self._validate_ec2_error(resp, 400, 'AuthFailure') - - @tools.screen_unexpected_exception_logs - @mock.patch.object(requests, 'request', return_value=FakeResponse(200)) - def test_params_for_keystone_call(self, mock_request): - req = wsgi.Request.blank('/test') - req.GET['Signature'] = 'test-signature' - req.GET['AWSAccessKeyId'] = 'test-key-id' - self.kauth(req) - mock_request.assert_called_with( - 'POST', CONF.keystone_ec2_tokens_url, - data=mock.ANY, headers=mock.ANY) - - data = jsonutils.loads(mock_request.call_args[1]['data']) - expected_data = { - 'ec2Credentials': { - 'access': 'test-key-id', - 'headers': {'Host': 'localhost:80'}, - 'host': 'localhost:80', - 'verb': 'GET', - 'params': {'AWSAccessKeyId': 'test-key-id'}, - 'signature': 'test-signature', - 'path': '/test', - 'body_hash': 'e3b0c44298fc1c149afbf4c8996fb924' - '27ae41e4649b934ca495991b7852b855'}} - self.assertEqual(expected_data, data) diff --git a/ec2api/tests/unit/test_network_interface.py b/ec2api/tests/unit/test_network_interface.py deleted file mode 100644 index aba6c527..00000000 --- a/ec2api/tests/unit/test_network_interface.py +++ /dev/null @@ -1,679 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -from unittest import mock - -from neutronclient.common import exceptions as neutron_exception - -from ec2api.api import ec2utils -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class NetworkInterfaceTestCase(base.ApiTestCase): - - def test_create_network_interface(self): - self.set_mock_db_items(fakes.DB_SUBNET_1, fakes.DB_VPC_1, - fakes.DB_SECURITY_GROUP_1) - self.db_api.add_item.return_value = fakes.DB_NETWORK_INTERFACE_1 - self.neutron.show_subnet.return_value = {'subnet': fakes.OS_SUBNET_1} - self.neutron.create_port.return_value = {'port': fakes.OS_PORT_1} - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - - def check_response(resp, auto_ips=False): - self.assertThat(fakes.EC2_NETWORK_INTERFACE_1, - matchers.DictMatches(resp['networkInterface'])) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'eni', - tools.purge_dict(fakes.DB_NETWORK_INTERFACE_1, ('id',))) - if auto_ips: - self.neutron.create_port.assert_called_once_with( - {'port': - {'network_id': fakes.ID_OS_NETWORK_1, - 'fixed_ips': - [{'subnet_id': fakes.ID_OS_SUBNET_1}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - else: - self.neutron.create_port.assert_called_once_with( - {'port': - {'network_id': fakes.ID_OS_NETWORK_1, - 'fixed_ips': - [{'ip_address': fakes.IP_NETWORK_INTERFACE_1}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - self.neutron.update_port.assert_called_once_with( - fakes.ID_OS_PORT_1, - {'port': {'name': - fakes.ID_EC2_NETWORK_INTERFACE_1}}) - self.neutron.reset_mock() - self.db_api.reset_mock() - self.neutron.list_security_groups.return_value = ( - {'security_groups': [ - copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_1, - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_1}) - check_response(resp) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddresses.1.PrivateIpAddress': ( - fakes.IP_NETWORK_INTERFACE_1), - 'PrivateIpAddresses.1.Primary': True, - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_1}) - check_response(resp) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1, - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_1}) - check_response(resp, True) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1, - 'SecondaryPrivateIpAddressCount': '1', - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_1}) - check_response(resp, True) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1, - 'SecondaryPrivateIpAddressCount': '0', - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_1}) - check_response(resp, True) - - def test_create_network_interface_multiple_ips(self): - self.set_mock_db_items(fakes.DB_SUBNET_2, fakes.DB_VPC_1, - fakes.DB_SECURITY_GROUP_1) - self.db_api.add_item.return_value = fakes.DB_NETWORK_INTERFACE_2 - self.neutron.show_subnet.return_value = {'subnet': fakes.OS_SUBNET_2} - self.neutron.create_port.return_value = {'port': fakes.OS_PORT_2} - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - created_ec2_network_interface = tools.patch_dict( - fakes.EC2_NETWORK_INTERFACE_2, - {'privateIpAddressesSet': [ - tools.purge_dict(s, ['association']) - for s in fakes.EC2_NETWORK_INTERFACE_2[ - 'privateIpAddressesSet']]}, - ['association']) - - def check_response(resp): - self.assertThat(created_ec2_network_interface, - matchers.DictMatches(resp['networkInterface'])) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'eni', - tools.purge_dict(fakes.DB_NETWORK_INTERFACE_2, - ('id', - 'device_index', - 'instance_id', - 'delete_on_termination', - 'attach_time'))) - self.neutron.update_port.assert_called_once_with( - fakes.ID_OS_PORT_2, - {'port': {'name': - fakes.ID_EC2_NETWORK_INTERFACE_2}}) - self.neutron.reset_mock() - self.db_api.reset_mock() - self.neutron.list_security_groups.return_value = ( - {'security_groups': [ - copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_2, - 'SecondaryPrivateIpAddressCount': '3', - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_2}) - self.neutron.create_port.assert_called_once_with( - {'port': {'network_id': fakes.ID_OS_NETWORK_2, - 'fixed_ips': [{'subnet_id': fakes.ID_OS_SUBNET_2}, - {'subnet_id': fakes.ID_OS_SUBNET_2}, - {'subnet_id': fakes.ID_OS_SUBNET_2}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - check_response(resp) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_2, - 'PrivateIpAddress': fakes.IPS_NETWORK_INTERFACE_2[0], - 'PrivateIpAddresses.1.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[1], - 'PrivateIpAddresses.1.Primary': False, - 'PrivateIpAddresses.2.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[2], - 'PrivateIpAddresses.2.Primary': False, - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_2}) - self.neutron.create_port.assert_called_once_with( - {'port': - {'network_id': fakes.ID_OS_NETWORK_2, - 'fixed_ips': [ - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[0]}, - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[1]}, - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[2]}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - check_response(resp) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_2, - 'PrivateIpAddresses.1.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[0], - 'PrivateIpAddresses.1.Primary': True, - 'PrivateIpAddresses.2.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[1], - 'PrivateIpAddresses.2.Primary': False, - 'PrivateIpAddresses.3.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[2], - 'PrivateIpAddresses.3.Primary': False, - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_2}) - self.neutron.create_port.assert_called_once_with( - {'port': - {'network_id': fakes.ID_OS_NETWORK_2, - 'fixed_ips': [ - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[0]}, - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[1]}, - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[2]}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - check_response(resp) - - resp = self.execute( - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_2, - 'PrivateIpAddress': fakes.IPS_NETWORK_INTERFACE_2[0], - 'PrivateIpAddresses.1.PrivateIpAddress': - fakes.IPS_NETWORK_INTERFACE_2[1], - 'PrivateIpAddresses.1.Primary': False, - 'SecondaryPrivateIpAddressCount': '1', - 'Description': fakes.DESCRIPTION_NETWORK_INTERFACE_2}) - self.neutron.create_port.assert_called_once_with( - {'port': - {'network_id': fakes.ID_OS_NETWORK_2, - 'fixed_ips': [ - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[0]}, - {'ip_address': fakes.IPS_NETWORK_INTERFACE_2[1]}, - {'subnet_id': fakes.ID_OS_SUBNET_2}], - 'security_groups': [fakes.ID_OS_SECURITY_GROUP_1]}}) - check_response(resp) - - def test_create_network_interface_invalid_parameters(self): - def do_check(args, error_code): - self.neutron.reset_mock() - self.db_api.reset_mock() - self.assert_execution_error( - error_code, 'CreateNetworkInterface', args) - - self.set_mock_db_items() - do_check({'SubnetId': fakes.ID_EC2_SUBNET_2}, - 'InvalidSubnetID.NotFound') - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_SUBNET_2) - - self.set_mock_db_items(fakes.DB_SUBNET_1, fakes.DB_VPC_1) - self.neutron.show_subnet.return_value = {'subnet': fakes.OS_SUBNET_1} - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_2}, - 'InvalidParameterValue') - - for cls in [neutron_exception.OverQuotaClient, - neutron_exception.IpAddressGenerationFailureClient]: - self.neutron.create_port.side_effect = cls() - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_1}, - 'InsufficientFreeAddressesInSubnet') - - for cls in [neutron_exception.IpAddressInUseClient, - neutron_exception.BadRequest]: - self.neutron.create_port.side_effect = cls() - do_check({'SubnetId': fakes.ID_EC2_SUBNET_1, - 'PrivateIpAddress': fakes.IP_NETWORK_INTERFACE_1}, - 'InvalidParameterValue') - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.dhcp_options._add_dhcp_opts_to_port') - def test_create_network_interface_rollback(self, _add_dhcp_opts_to_port): - self.set_mock_db_items( - tools.update_dict( - fakes.DB_VPC_1, - {'dhcp_options_id': fakes.ID_EC2_DHCP_OPTIONS_1}), - fakes.DB_SUBNET_1, fakes.DB_DHCP_OPTIONS_1) - self.db_api.add_item.return_value = fakes.DB_NETWORK_INTERFACE_1 - self.neutron.show_subnet.return_value = {'subnet': fakes.OS_SUBNET_1} - self.neutron.create_port.return_value = {'port': fakes.OS_PORT_1} - _add_dhcp_opts_to_port.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, - 'CreateNetworkInterface', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - - self.neutron.delete_port.assert_called_once_with(fakes.ID_OS_PORT_1) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_NETWORK_INTERFACE_1) - - def test_delete_network_interface(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1) - resp = self.execute( - 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}) - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_any_call( - mock.ANY, - fakes.ID_EC2_NETWORK_INTERFACE_1) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_NETWORK_INTERFACE_1) - self.neutron.delete_port.assert_called_once_with( - fakes.ID_OS_PORT_1) - - def test_delete_network_interface_obsolete(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1) - self.neutron.delete_port.side_effect = ( - neutron_exception.PortNotFoundClient()) - resp = self.execute( - 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}) - self.assertEqual(True, resp['return']) - - def test_delete_network_interface_no_network_interface(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidNetworkInterfaceID.NotFound', 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}) - self.assertEqual(0, self.neutron.delete_port.call_count) - - def test_delete_network_interface_is_in_use(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_2) - self.assert_execution_error( - 'InvalidParameterValue', 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2}) - self.assertEqual(0, self.neutron.delete_port.call_count) - - def test_delete_network_interface_with_public_ip(self): - detached_network_interface_2 = fakes.gen_db_network_interface( - fakes.ID_EC2_NETWORK_INTERFACE_2, - fakes.ID_OS_PORT_2, - fakes.ID_EC2_VPC_1, - fakes.ID_EC2_SUBNET_2, - fakes.IP_NETWORK_INTERFACE_2) - self.set_mock_db_items(detached_network_interface_2, - fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2) - resp = self.execute( - 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2}) - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_any_call( - mock.ANY, - fakes.ID_EC2_NETWORK_INTERFACE_2) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_NETWORK_INTERFACE_2) - self.neutron.delete_port.assert_called_once_with( - fakes.ID_OS_PORT_2) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.purge_dict(fakes.DB_ADDRESS_2, - ['network_interface_id', - 'private_ip_address'])) - - @tools.screen_unexpected_exception_logs - def test_delete_network_interface_rollback(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1) - self.neutron.delete_port.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DeleteNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}) - - self.db_api.restore_item.assert_called_once_with( - mock.ANY, 'eni', fakes.DB_NETWORK_INTERFACE_1) - - def test_describe_network_interfaces(self): - self.set_mock_db_items( - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_ADDRESS_1, fakes.DB_ADDRESS_2, - fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_SECURITY_GROUP_1) - self.neutron.list_ports.return_value = ( - {'ports': [fakes.OS_PORT_1, fakes.OS_PORT_2]}) - self.neutron.list_floatingips.return_value = ( - {'floatingips': [fakes.OS_FLOATING_IP_1, - fakes.OS_FLOATING_IP_2]}) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - - resp = self.execute('DescribeNetworkInterfaces', {}) - self.assertThat(resp['networkInterfaceSet'], - matchers.ListMatches( - [fakes.EC2_NETWORK_INTERFACE_1, - fakes.EC2_NETWORK_INTERFACE_2], - orderless_lists=True), - verbose=True) - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_NETWORK_INTERFACE_1]) - resp = self.execute( - 'DescribeNetworkInterfaces', - {'NetworkInterfaceId.1': fakes.ID_EC2_NETWORK_INTERFACE_1}) - self.assertThat(resp['networkInterfaceSet'], - matchers.ListMatches( - [fakes.EC2_NETWORK_INTERFACE_1])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_NETWORK_INTERFACE_1])) - - self.check_filtering( - 'DescribeNetworkInterfaces', 'networkInterfaceSet', - [('addresses.private-ip-address', - fakes.IP_NETWORK_INTERFACE_2_EXT_1,), - ('addresses.primary', False), - ('addresses.association.public-ip', fakes.IP_ADDRESS_2), - ('addresses.association.owner-id', fakes.ID_OS_PROJECT), - ('association.association-id', fakes.ID_EC2_ASSOCIATION_2), - ('association.allocation-id', fakes.ID_EC2_ADDRESS_2), - ('association.ip-owner-id', fakes.ID_OS_PROJECT), - ('association.public-ip', fakes.IP_ADDRESS_2), - ('attachment.attachment-id', - fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH), - ('attachment.instance-id', fakes.ID_EC2_INSTANCE_1), - ('attachment.instance-owner-id', fakes.ID_OS_PROJECT), - ('attachment.device-index', 0), - ('attachment.status', 'attached'), - ('attachment.attach.time', fakes.TIME_ATTACH_NETWORK_INTERFACE), - ('attachment.delete-on-termination', False), - ('description', fakes.DESCRIPTION_NETWORK_INTERFACE_1), - ('group-id', fakes.ID_EC2_SECURITY_GROUP_1), - ('group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP), - ('mac-address', fakes.MAC_ADDRESS), - ('network-interface-id', fakes.ID_EC2_NETWORK_INTERFACE_1), - ('owner-id', fakes.ID_OS_PROJECT), - ('private-ip-address', fakes.IP_NETWORK_INTERFACE_1), - ('requester-managed', False), - ('source-dest-check', True), - ('status', 'available'), - ('vpc-id', fakes.ID_EC2_VPC_1), - ('subnet-id', fakes.ID_EC2_SUBNET_2)]) - self.check_tag_support( - 'DescribeNetworkInterfaces', 'networkInterfaceSet', - fakes.ID_EC2_NETWORK_INTERFACE_1, 'networkInterfaceId') - - def test_describe_network_interface_attribute(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1) - - resp = self.execute( - 'DescribeNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'Attribute': 'description'}) - self.assertEqual(fakes.ID_EC2_NETWORK_INTERFACE_1, - resp['networkInterfaceId']) - self.assertEqual(fakes.DESCRIPTION_NETWORK_INTERFACE_1, - resp['description'].get('value', None)) - - def test_modify_network_interface_attribute(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_NETWORK_INTERFACE_2) - - self.execute( - 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'Description.Value': 'New description'}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.update_dict(fakes.DB_NETWORK_INTERFACE_1, - {'description': 'New description'})) - - self.db_api.reset_mock() - self.execute( - 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'Attachment.AttachmentId': ( - fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH), - 'Attachment.DeleteOnTermination': 'True'}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.update_dict(fakes.DB_NETWORK_INTERFACE_2, - {'delete_on_termination': True})) - - def test_modify_network_interface_attribute_invalid_parameters(self): - self.assert_execution_error( - 'InvalidParameterCombination', 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'Description.Value': 'New description', - 'SourceDestCheck.Value': 'True'}) - - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_NETWORK_INTERFACE_2) - - self.assert_execution_error( - 'MissingParameter', 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'Attachment.DeleteOnTermination': 'True'}) - - self.assert_execution_error( - 'MissingParameter', 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'Attachment.AttachmentId': ( - fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH)}) - - self.assert_execution_error( - 'InvalidAttachmentID.NotFound', 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'Attachment.AttachmentId': ( - fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH), - 'Attachment.DeleteOnTermination': 'True'}) - - self.assert_execution_error( - 'InvalidAttachmentID.NotFound', 'ModifyNetworkInterfaceAttribute', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'Attachment.AttachmentId': ec2utils.change_ec2_id_kind( - fakes.ID_EC2_NETWORK_INTERFACE_1, 'eni-attach'), - 'Attachment.DeleteOnTermination': 'True'}) - - def test_reset_network_interface_attribute(self): - self.execute( - 'ResetNetworkInterfaceAttribute', - {'NetworkInterfaceId': - fakes.ID_EC2_NETWORK_INTERFACE_1, - 'Attribute': 'sourceDestCheck'}) - - def test_attach_network_interface(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_INSTANCE_1) - self.neutron.show_port.return_value = ( - {'port': fakes.OS_PORT_1}) - self.isotime.return_value = fakes.TIME_ATTACH_NETWORK_INTERFACE - self.execute( - 'AttachNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1, - 'DeviceIndex': '1'}) - self.nova.servers.interface_attach.assert_called_once_with( - fakes.ID_OS_INSTANCE_1, fakes.ID_OS_PORT_1, None, None) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.update_dict( - fakes.DB_NETWORK_INTERFACE_1, - {'device_index': 1, - 'instance_id': fakes.ID_EC2_INSTANCE_1, - 'delete_on_termination': False, - 'attach_time': fakes.TIME_ATTACH_NETWORK_INTERFACE})) - - def test_attach_network_interface_invalid_parameters(self): - # NOTE(ft): eni is already attached - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_2) - self.assert_execution_error( - 'InvalidParameterValue', 'AttachNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'InstanceId': fakes.ID_EC2_INSTANCE_2, - 'DeviceIndex': '1'}) - - # NOTE(ft): device index is in use - self.set_mock_db_items( - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_INSTANCE_1) - self.assert_execution_error( - 'InvalidParameterValue', 'AttachNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1, - 'DeviceIndex': '0'}) - - @tools.screen_unexpected_exception_logs - def test_attach_network_interface_rollback(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, - fakes.DB_INSTANCE_1) - self.neutron.show_port.return_value = ( - {'port': fakes.OS_PORT_2}) - self.isotime.return_value = fakes.TIME_ATTACH_NETWORK_INTERFACE - self.nova.servers.interface_attach.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'AttachNetworkInterface', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'InstanceId': fakes.ID_EC2_INSTANCE_1, - 'DeviceIndex': '1'}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_NETWORK_INTERFACE_1) - - def test_detach_network_interface(self): - network_interface = tools.update_dict(fakes.DB_NETWORK_INTERFACE_2, - {'device_index': 1}) - self.set_mock_db_items(network_interface) - self.neutron.show_port.return_value = ( - {'port': fakes.OS_PORT_2}) - self.execute( - 'DetachNetworkInterface', - {'AttachmentId': ec2utils.change_ec2_id_kind( - fakes.ID_EC2_NETWORK_INTERFACE_2, 'eni-attach')}) - self.neutron.update_port.assert_called_once_with( - fakes.ID_OS_PORT_2, - {'port': {'device_id': '', - 'device_owner': ''}} - ) - self.db_api.update_item.assert_called_once_with( - mock.ANY, - tools.purge_dict(fakes.DB_NETWORK_INTERFACE_2, - {'device_index', - 'instance_id', - 'delete_on_termination', - 'attach_time'})) - - def test_detach_network_interface_invalid_parameters(self): - # NOTE(ft): eni is not found - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidAttachmentID.NotFound', 'DetachNetworkInterface', - {'AttachmentId': ec2utils.change_ec2_id_kind( - fakes.ID_EC2_NETWORK_INTERFACE_2, 'eni-attach')}) - - # NOTE(ft): eni is attached with device index = 0 - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_2) - self.assert_execution_error( - 'OperationNotPermitted', 'DetachNetworkInterface', - {'AttachmentId': ec2utils.change_ec2_id_kind( - fakes.ID_EC2_NETWORK_INTERFACE_2, 'eni-attach')}) - - @tools.screen_unexpected_exception_logs - def test_detach_network_interface_rollback(self): - network_interface = tools.update_dict(fakes.DB_NETWORK_INTERFACE_2, - {'device_index': 1}) - self.set_mock_db_items(network_interface) - self.neutron.show_port.return_value = ( - {'port': fakes.OS_PORT_2}) - self.neutron.update_port.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DetachNetworkInterface', - {'AttachmentId': fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH}) - - self.db_api.update_item.assert_any_call( - mock.ANY, network_interface) - - def test_assign_unassign_private_ip_addresses(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, fakes.DB_SUBNET_1) - self.neutron.show_subnet.return_value = ( - {'subnet': fakes.OS_SUBNET_1}) - self.neutron.show_port.return_value = ( - {'port': copy.deepcopy(fakes.OS_PORT_1)}) - self.execute( - 'AssignPrivateIpAddresses', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'PrivateIpAddress.1': '10.10.1.5', - 'PrivateIpAddress.2': '10.10.1.6', - }) - self.neutron.update_port.assert_called_once_with( - fakes.ID_OS_PORT_1, - {'port': - {'fixed_ips': [ - {'subnet_id': fakes.ID_OS_SUBNET_1, - 'ip_address': fakes.IP_NETWORK_INTERFACE_1}, - {'ip_address': '10.10.1.5'}, - {'ip_address': '10.10.1.6'}]}}) - self.execute( - 'UnassignPrivateIpAddresses', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'PrivateIpAddress.1': '10.10.1.5', - 'PrivateIpAddress.2': '10.10.1.6', - }) - self.neutron.update_port.assert_any_call( - fakes.ID_OS_PORT_1, - {'port': - {'fixed_ips': [ - {'subnet_id': fakes.ID_OS_SUBNET_1, - 'ip_address': fakes.IP_NETWORK_INTERFACE_1}]}}) - - def test_assign_private_ip_addresses_invalid_parameters(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_1, fakes.DB_SUBNET_1) - self.neutron.show_subnet.return_value = ( - {'subnet': fakes.OS_SUBNET_1}) - self.neutron.show_port.return_value = ( - {'port': copy.deepcopy(fakes.OS_PORT_1)}) - - def do_check(error_code): - self.assert_execution_error( - error_code, 'AssignPrivateIpAddresses', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'PrivateIpAddress.1': '10.10.1.5', - 'PrivateIpAddress.2': '10.10.1.6', - }) - - self.neutron.update_port.side_effect = ( - neutron_exception.IpAddressGenerationFailureClient()) - do_check('InsufficientFreeAddressesInSubnet') - - self.neutron.update_port.side_effect = ( - neutron_exception.IpAddressInUseClient()) - do_check('InvalidParameterValue') - - self.neutron.update_port.side_effect = ( - neutron_exception.BadRequest()) - do_check('InvalidParameterValue') - - def test_unassign_private_ip_addresses_invalid_parameters(self): - self.set_mock_db_items(fakes.DB_NETWORK_INTERFACE_2, fakes.DB_SUBNET_2) - self.neutron.show_subnet.return_value = ( - {'subnet': fakes.OS_SUBNET_2}) - self.neutron.show_port.return_value = ( - {'port': copy.deepcopy(fakes.OS_PORT_2)}) - - self.assert_execution_error( - 'InvalidParameterValue', 'UnassignPrivateIpAddresses', - {'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'PrivateIpAddress.1': '10.10.2.55'}) diff --git a/ec2api/tests/unit/test_private_key.pem b/ec2api/tests/unit/test_private_key.pem deleted file mode 100644 index b59d29c3..00000000 --- a/ec2api/tests/unit/test_private_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA1+OySRkwliGtujemv7E4cqhJCxlF+4YULhmZaTWpmKUvKz5c -VC6wx/5A6Zz0XrZtuiFDgS7sIAGODcF18xpyKnp2Ign6zgCQpg0s+rss9q5vrSai -JR5iXdqA/vfemS9ptRyz6LSCI9O+EL/U7AaRHVBMQJEM2TfQeuowgi7XboKs8ljl -N4LpAu3HdtaFjHlBnegr7iJ2lq7rTj9deu1tJf/EbrLOclB2LwafT2kC0HQL2E0O -OphKu60qBR7sZ4M4tdUftCGZEAdd8+vvRd+YVK9TDtcYtSFSXh86a16504bPylCL -b/dF2bupLMe/zGLZIZ/hKbYPyfH6k5HFJIC12wIDAQABAoIBAH07F7BPbF+qKZxb -q96GbrgT5ksJ3g6JOCuFrffZqQdiynnLMsOiUemxEvZwlVBbgkr2ALJvBYmLXVud -XU4niRIa92vHXjUhHscz3WOUMADoLt/CCUx+05CdrzY3kmhJmIf2nmXeT594tEgC -/v/qz0Kx0YmimlFmjwi90GWzxkPTeKW1s8H9J0Xi/UX3urIfcCzRChumxY+f/sBq -CWJua3D6o3wx3B9oU0msC0R7ZIBY4PFhKyomcVyynD0HNe80NomyeRuJGm7NHHRB -MsVggZVgsFGkOZh00UkcaUHOKS+/7iyQAXuqms2amxGqLoxq/U11uoRW0NxOlC/7 -YYElIAECgYEA8cXXQjt2KWWNWIn2L0mWm+EGF/ihkvGmEXZVcuiVnuwyNxf+VAMe -f1i3EF+Hqlmyv46j4ReTbdpeW7bv7VW0iMgW68K3eFiPznBzlRA/n3XULAy41oyt -9kGhxzy6565iQMF0FK4viAuDeA6EwBkcu5MAQNOZXAUoGk02+GKPN6ECgYEA5Jfw -OeHcodteQdP0KEvChjGrZ7LNz3QU0kxvBaOjIg4ElEvJbq6EzSBxdugT7L+blDEU -umu8RgRn2Z73608nNtagBAKjg+VQsmnXxC92Ht1VFJF+uLLpmOSWG32Q/9e4nPUM -bFW9PyakF9LOM+lnqomKBhY1/LDAf77sWiQpS/sCgYAuh9n+2DzMiMvkP2EPBsWi -qHMox+QoyLMiZzjYzaSGGoUrj0WWW6dR8PwCfbA5e9vn/AbUOlpYaQ+B7TpN3hHJ -xWCL7USsN7ctjvzfsmncQawc8jHcsOSGIWmGU8zQ7AHi3ph9pmxlbXnW8ExiQDME -cq04zMCWMjPeo/+xXB6eIQKBgF+zUGobKdBFU6/BeY1JMlYWA0l1rP41/eWRBEXb -HRfLwJUJKXqB660o8Pez72uFSDABYEkvg3HYtFWCXQ6RY7xsnC8xn50/aspWz3Md -35jKVq02wFO4610MDd/ScNr7SBnF6X6NYp5GohorMhK/m5vk2vjzYYS5xs10c+TF -ENjzAoGAMH+O2S7LFjBKNuIpaqdIj8bjFwUhKgOG74/YodhxxgYIcWaHf2xnImuX -L/dam7seSo4MFYXX60lph6tutTyppkxeS64cz3X8cYN+CJQseWVestPkwArcF4m4 -M3mwEPCKoTqFUzaGbFbEl+TzEfQaKZwLn+AbsuMo6PBoAuD2fM4= ------END RSA PRIVATE KEY----- diff --git a/ec2api/tests/unit/test_route_table.py b/ec2api/tests/unit/test_route_table.py deleted file mode 100644 index 37987ed9..00000000 --- a/ec2api/tests/unit/test_route_table.py +++ /dev/null @@ -1,1162 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from unittest import mock - -from oslotest import base as test_base - -from ec2api.api import common -from ec2api.api import ec2utils -from ec2api.api import route_table as route_table_api -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class RouteTableTestCase(base.ApiTestCase): - - def test_route_table_create(self): - self.set_mock_db_items(fakes.DB_VPC_1) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_ROUTE_TABLE_1)) - resp = self.execute('CreateRouteTable', - {'VpcId': fakes.ID_EC2_VPC_1}) - self.assertThat( - resp['routeTable'], - matchers.DictMatches(tools.purge_dict(fakes.EC2_ROUTE_TABLE_1, - ('associationSet',)))) - self.db_api.add_item.assert_called_once_with( - mock.ANY, - 'rtb', - {'vpc_id': fakes.ID_EC2_VPC_1, - 'routes': [{'destination_cidr_block': fakes.CIDR_VPC_1, - 'gateway_id': None}]}) - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPC_1) - - def test_route_table_create_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidVpcID.NotFound', 'CreateRouteTable', - {'VpcId': fakes.ID_EC2_VPC_1}) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_create_route(self, routes_updater): - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_VPC_1, fakes.DB_IGW_1, fakes.DB_VPN_GATEWAY_1, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - - def do_check(params, route_table, rollback_route_table_state, - update_target=route_table_api.HOST_TARGET): - resp = self.execute('CreateRoute', params) - self.assertEqual(True, resp['return']) - - self.db_api.update_item.assert_called_once_with( - mock.ANY, route_table) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, route_table, - update_target=update_target) - - self.db_api.update_item.reset_mock() - routes_updater.reset_mock() - - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({'gateway_id': fakes.ID_EC2_IGW_1, - 'destination_cidr_block': '0.0.0.0/0'}) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '0.0.0.0/0', - 'GatewayId': fakes.ID_EC2_IGW_1}, - route_table, fakes.DB_ROUTE_TABLE_1) - - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1, - 'destination_cidr_block': '0.0.0.0/0'}) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '0.0.0.0/0', - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}, - route_table, fakes.DB_ROUTE_TABLE_1, - update_target=route_table_api.VPN_TARGET) - - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({ - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'destination_cidr_block': '192.168.75.0/24'}) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '192.168.75.0/24', - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}, - route_table, fakes.DB_ROUTE_TABLE_1) - - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({ - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'destination_cidr_block': '192.168.80.0/24'}) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '192.168.80.0/24', - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - route_table, fakes.DB_ROUTE_TABLE_1) - - # NOTE(ft): check idempotent calls - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': fakes.CIDR_EXTERNAL_NETWORK, - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - fakes.DB_ROUTE_TABLE_2, fakes.DB_ROUTE_TABLE_2) - - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '0.0.0.0/0', - 'GatewayId': fakes.ID_EC2_IGW_1}, - fakes.DB_ROUTE_TABLE_2, fakes.DB_ROUTE_TABLE_2) - - def test_create_route_invalid_parameters(self): - id_ec2_eni_vpc_2 = fakes.random_ec2_id('eni') - eni_vpc_2 = fakes.gen_db_network_interface( - id_ec2_eni_vpc_2, fakes.random_os_id(), - fakes.ID_EC2_VPC_2, fakes.random_ec2_id('subnet'), '10.20.0.10', - instance_id=fakes.ID_EC2_INSTANCE_2) - - eni_2_in_instance_1 = fakes.gen_db_network_interface( - fakes.random_ec2_id('eni'), fakes.random_os_id(), - fakes.ID_EC2_VPC_1, fakes.random_ec2_id('subnet'), '10.10.3.15', - instance_id=fakes.ID_EC2_INSTANCE_1) - - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_VPC_1, eni_vpc_2, fakes.DB_IGW_1, fakes.DB_IGW_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_VPN_GATEWAY_2) - - def do_check(params, error_code): - self.assert_execution_error(error_code, 'CreateRoute', params) - - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': 'not_a_cidr', - 'GatewayId': fakes.ID_EC2_IGW_1}, - 'InvalidParameterValue') - - do_check({'RouteTableId': fakes.random_ec2_id('rtb'), - 'DestinationCidrBlock': fakes.CIDR_VPC_1, - 'GatewayId': fakes.ID_EC2_IGW_1}, - 'InvalidRouteTableID.NotFound') - - # NOTE(ft): redefine vpc local route - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': fakes.CIDR_VPC_1, - 'GatewayId': fakes.ID_EC2_IGW_1}, - 'InvalidParameterValue') - - # NOTE(ft): create route for cidr lesser than vpc cidr - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': fakes.IP_NETWORK_INTERFACE_1 + '/24', - 'GatewayId': fakes.ID_EC2_IGW_1}, - 'InvalidParameterValue') - - # NOTE(ft): redefine existed route by route with another attributes - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '0.0.0.0/0', - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1}, - 'RouteAlreadyExists') - - # NOTE(ft): missed traffic receiver - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '0.0.0.0/0'}, - 'MissingParameter') - - # NOTE(ft): more than one traffic receiver - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '0.0.0.0/0', - 'NetworkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'GatewayId': fakes.ID_EC2_IGW_1}, - 'InvalidParameterCombination') - - # NOTE(ft): unknown internet gateway - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'GatewayId': fakes.random_ec2_id('igw')}, - 'InvalidInternetGatewayID.NotFound') - - # NOTE(ft): gateway from different vpc - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'GatewayId': fakes.ID_EC2_IGW_2}, - 'InvalidParameterValue') - - # NOTE(ft): unknown vpn gateway - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'GatewayId': fakes.random_ec2_id('vgw')}, - 'InvalidVpnGatewayID.NotFound') - - # NOTE(ft): vpn gateway from different vpc - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}, - 'InvalidGatewayID.NotFound') - - # NOTE(ft): network interface from different vpc - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'NetworkInterfaceId': id_ec2_eni_vpc_2}, - 'InvalidParameterValue') - - # NOTE(ft): not vpc instance - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'InstanceId': fakes.ID_EC2_INSTANCE_2}, - 'InvalidParameterValue') - - # NOTE(ft): multiple network interfaces in instance - self.add_mock_db_items(eni_2_in_instance_1) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'InstanceId': fakes.ID_EC2_INSTANCE_1}, - 'InvalidInstanceID') - - # NOTE(ft): different vpc instance - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '192.168.100.0/0', - 'InstanceId': fakes.ID_EC2_INSTANCE_2}, - 'InvalidParameterValue') - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_create_or_replace_route_rollback(self, routes_updater): - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_VPC_1, fakes.DB_IGW_1, - fakes.gen_db_igw(fakes.ID_EC2_IGW_2, fakes.ID_EC2_VPC_1)) - routes_updater.side_effect = Exception() - - with tools.ScreeningLogger(log_name='ec2api.api'): - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'CreateRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '0.0.0.0/0', - 'GatewayId': fakes.ID_EC2_IGW_1}) - - self.db_api.update_item.assert_called_with(mock.ANY, - fakes.DB_ROUTE_TABLE_1) - - with tools.ScreeningLogger(log_name='ec2api.api'): - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'ReplaceRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '0.0.0.0/0', - 'GatewayId': fakes.ID_EC2_IGW_2}) - - self.db_api.update_item.assert_called_with(mock.ANY, - fakes.DB_ROUTE_TABLE_2) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_replace_route(self, routes_updater): - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({'gateway_id': fakes.ID_EC2_IGW_1, - 'destination_cidr_block': '0.0.0.0/0'}) - self.set_mock_db_items( - route_table, fakes.DB_VPC_1, fakes.DB_IGW_1, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2) - - resp = self.execute('ReplaceRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '0.0.0.0/0', - 'NetworkInterfaceId': - fakes.ID_EC2_NETWORK_INTERFACE_1}) - self.assertEqual(True, resp['return']) - - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table['routes'].append({ - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'destination_cidr_block': '0.0.0.0/0'}) - self.db_api.update_item.assert_called_once_with(mock.ANY, route_table) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, route_table, - update_target=route_table_api.HOST_TARGET) - - def test_replace_route_invalid_parameters(self): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, - fakes.DB_VPC_1, fakes.DB_IGW_1) - - self.assert_execution_error( - 'InvalidParameterValue', 'ReplaceRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'DestinationCidrBlock': '11.22.33.0/24', - 'GatewayId': fakes.ID_EC2_IGW_1}) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_delete_route(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2) - resp = self.execute('DeleteRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': - fakes.CIDR_EXTERNAL_NETWORK}) - self.assertEqual(True, resp['return']) - route_table = copy.deepcopy(fakes.DB_ROUTE_TABLE_2) - route_table['routes'] = [ - r for r in route_table['routes'] - if r['destination_cidr_block'] != fakes.CIDR_EXTERNAL_NETWORK] - self.db_api.update_item.assert_called_once_with(mock.ANY, route_table) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, route_table, - update_target=route_table_api.HOST_TARGET) - - def test_delete_route_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidRouteTableID.NotFound', 'DeleteRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '11.22.33.0/24'}) - - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2) - self.assert_execution_error( - 'InvalidRoute.NotFound', 'DeleteRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': '11.22.33.0/24'}) - - self.assert_execution_error( - 'InvalidParameterValue', 'DeleteRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': fakes.CIDR_VPC_1}) - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_delete_route_rollback(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2) - routes_updater.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DeleteRoute', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'DestinationCidrBlock': fakes.CIDR_EXTERNAL_NETWORK}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_ROUTE_TABLE_2) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_enable_vgw_route_propagation(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_VPN_GATEWAY_1) - resp = self.execute('EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.assertEqual({'return': True}, resp) - route_table_1_updated = tools.update_dict( - fakes.DB_ROUTE_TABLE_1, - {'propagating_gateways': [fakes.ID_EC2_VPN_GATEWAY_1]}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, route_table_1_updated) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, route_table_1_updated, - update_target=route_table_api.VPN_TARGET) - - self.db_api.reset_mock() - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_2, - tools.update_dict(fakes.DB_VPN_GATEWAY_2, - {'vpc_id': fakes.ID_EC2_VPC_1})) - resp = self.execute('EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - self.assertEqual({'return': True}, resp) - db_route_table_2 = copy.deepcopy(fakes.DB_ROUTE_TABLE_2) - db_route_table_2['propagating_gateways'].append( - fakes.ID_EC2_VPN_GATEWAY_2) - self.db_api.update_item.assert_called_once_with( - mock.ANY, db_route_table_2) - - def test_enable_vgw_route_propagation_idempotent(self): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_VPN_GATEWAY_1) - resp = self.execute('EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.assertEqual({'return': True}, resp) - self.assertFalse(self.db_api.update_item.called) - - def test_enable_vgw_route_propagation_invalid_parameters(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1) - self.assert_execution_error( - 'InvalidRouteTableID.NotFound', 'EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1) - self.assert_execution_error( - 'InvalidVpnGatewayID.NotFound', 'EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_VPN_GATEWAY_2) - self.assert_execution_error( - 'Gateway.NotAttached', 'EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_1, - tools.update_dict(fakes.DB_VPN_GATEWAY_2, - {'vpc_id': fakes.ID_EC2_VPC_2})) - self.assert_execution_error( - 'Gateway.NotAttached', 'EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_enable_vgw_route_propagation_rollback(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_VPN_GATEWAY_1) - routes_updater.side_effect = Exception() - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'EnableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_ROUTE_TABLE_1) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_disable_vgw_route_propagation(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_VPN_GATEWAY_1) - resp = self.execute('DisableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.assertEqual({'return': True}, resp) - route_table_1_updated = tools.purge_dict( - fakes.DB_ROUTE_TABLE_2, ('propagating_gateways',)) - self.db_api.update_item.assert_called_once_with( - mock.ANY, route_table_1_updated) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, route_table_1_updated, - update_target=route_table_api.VPN_TARGET) - - self.db_api.reset_mock() - routes_updater.reset_mock() - db_route_table_2 = copy.deepcopy(fakes.DB_ROUTE_TABLE_2) - db_route_table_2['propagating_gateways'].append( - fakes.ID_EC2_VPN_GATEWAY_2) - self.set_mock_db_items(db_route_table_2) - resp = self.execute('DisableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - self.assertEqual({'return': True}, resp) - self.db_api.update_item.assert_called_once_with( - mock.ANY, fakes.DB_ROUTE_TABLE_2) - self.assertFalse(routes_updater.called) - - def test_disable_vgw_route_propagation_idempotent(self): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2) - resp = self.execute('DisableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - self.assertEqual({'return': True}, resp) - self.assertFalse(self.db_api.update_item.called) - - def test_disable_vgw_route_propagation_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidRouteTableID.NotFound', 'DisableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_disable_vgw_route_propagation_rollbadk(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_VPN_GATEWAY_1) - routes_updater.side_effect = Exception() - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DisableVgwRoutePropagation', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'GatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_ROUTE_TABLE_2) - - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_associate_route_table(self, routes_updater): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1, - fakes.DB_SUBNET_1) - resp = self.execute('AssociateRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': fakes.ID_EC2_SUBNET_1}) - self.assertEqual(fakes.ID_EC2_SUBNET_1.replace('subnet', 'rtbassoc'), - resp['associationId']) - subnet_1 = tools.update_dict( - fakes.DB_SUBNET_1, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_1}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, subnet_1) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, subnet_1, fakes.DB_ROUTE_TABLE_1) - - def test_associate_route_table_invalid_parameters(self): - def do_check(params, error_code): - self.assert_execution_error( - error_code, 'AssociateRouteTable', params) - - self.set_mock_db_items() - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': fakes.ID_EC2_SUBNET_1}, - 'InvalidRouteTableID.NotFound') - - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': fakes.ID_EC2_SUBNET_1}, - 'InvalidSubnetID.NotFound') - - id_ec2_subnet_vpc_2 = fakes.random_ec2_id('subnet') - db_subnet_vpc_2 = {'id': id_ec2_subnet_vpc_2, - 'os_id': fakes.random_os_id(), - 'vpc_id': fakes.ID_EC2_VPC_2} - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, db_subnet_vpc_2) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': id_ec2_subnet_vpc_2}, - 'InvalidParameterValue') - - subnet_2 = tools.update_dict( - fakes.DB_SUBNET_2, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2}) - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, subnet_2) - do_check({'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': fakes.ID_EC2_SUBNET_2}, - 'Resource.AlreadyAssociated') - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_associate_route_table_rollback(self, routes_updater): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1, - fakes.DB_SUBNET_1) - routes_updater.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'AssociateRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'SubnetId': fakes.ID_EC2_SUBNET_1}) - - self.db_api.update_item.assert_any_call(mock.ANY, fakes.DB_SUBNET_1) - - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_replace_route_table_association(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_ROUTE_TABLE_3, - fakes.DB_SUBNET_2) - resp = self.execute( - 'ReplaceRouteTableAssociation', - {'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - self.assertEqual(fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_2, - resp['newAssociationId']) - subnet_2 = tools.update_dict( - fakes.DB_SUBNET_2, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, subnet_2) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, subnet_2, fakes.DB_ROUTE_TABLE_2) - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - def test_replace_route_table_association_main(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_VPC_1) - resp = self.execute('ReplaceRouteTableAssociation', - {'AssociationId': - fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - self.assertEqual(fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - resp['newAssociationId']) - vpc = tools.update_dict( - fakes.DB_VPC_1, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, vpc) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, fakes.DB_ROUTE_TABLE_2, - default_associations_only=True) - - def test_replace_route_table_association_invalid_parameters(self): - def do_check(params, error_code): - self.assert_execution_error( - error_code, 'ReplaceRouteTableAssociation', params) - - self.set_mock_db_items() - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1}, - 'InvalidRouteTableID.NotFound') - - # NOTE(ft): association with vpc is obsolete - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1) - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1}, - 'InvalidAssociationID.NotFound') - - # NOTE(ft): association with subnet is obsolete (no subnet) - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_3) - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_3}, - 'InvalidAssociationID.NotFound') - - # NOTE(ft): association with subnet is obsolete (subnet is - # disassociated) - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_3, - tools.purge_dict(fakes.DB_SUBNET_2, ['route_table_id'])) - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_3}, - 'InvalidAssociationID.NotFound') - - # NOTE(ft): association belongs to different vpc - id_ec2_subnet_vpc_2 = fakes.random_ec2_id('subnet') - db_subnet_vpc_2 = {'id': id_ec2_subnet_vpc_2, - 'os_id': fakes.random_os_id(), - 'vpc_id': fakes.ID_EC2_VPC_2, - 'route_table_id': fakes.random_ec2_id('rtb')} - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, db_subnet_vpc_2) - do_check({'AssociationId': ec2utils.change_ec2_id_kind( - id_ec2_subnet_vpc_2, 'rtbassoc'), - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}, - 'InvalidParameterValue') - - @mock.patch('ec2api.api.route_table._update_routes_in_associated_subnets') - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_replace_route_table_association_rollback(self, routes_updater, - multiply_routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_ROUTE_TABLE_3, fakes.DB_SUBNET_2, - fakes.DB_VPC_1) - multiply_routes_updater.side_effect = Exception() - - with tools.ScreeningLogger(log_name='ec2api.api'): - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'ReplaceRouteTableAssociation', - {'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_VPC_1) - - self.db_api.reset_mock() - routes_updater.side_effect = Exception() - - with tools.ScreeningLogger(log_name='ec2api.api'): - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'ReplaceRouteTableAssociation', - {'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3, - 'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_SUBNET_2) - - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_disassociate_route_table(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_3, - fakes.DB_SUBNET_2, fakes.DB_VPC_1) - resp = self.execute( - 'DisassociateRouteTable', - {'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3}) - self.assertEqual(True, resp['return']) - subnet_1 = tools.purge_dict(fakes.DB_SUBNET_2, ('route_table_id',)) - self.db_api.update_item.assert_called_once_with( - mock.ANY, subnet_1) - routes_updater.assert_called_once_with( - mock.ANY, mock.ANY, subnet_1, fakes.DB_ROUTE_TABLE_1) - - def test_disassociate_route_table_invalid_parameter(self): - def do_check(params, error_code): - self.assert_execution_error( - error_code, 'DisassociateRouteTable', params) - - self.set_mock_db_items() - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1}, - 'InvalidAssociationID.NotFound') - - self.set_mock_db_items( - tools.purge_dict(fakes.DB_SUBNET_1, ['route_table_id'])) - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_2}, - 'InvalidAssociationID.NotFound') - - self.set_mock_db_items(fakes.DB_VPC_1) - do_check({'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1}, - 'InvalidParameterValue') - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.route_table._update_subnet_routes') - def test_disassociate_route_table_rollback(self, routes_updater): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_3, - fakes.DB_SUBNET_2, fakes.DB_VPC_1) - routes_updater.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DisassociateRouteTable', - {'AssociationId': fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_3}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_SUBNET_2) - - def test_delete_route_table(self): - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_VPC_1, - fakes.DB_SUBNET_1, fakes.DB_SUBNET_2) - resp = self.execute('DeleteRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - self.assertEqual(True, resp['return']) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_ROUTE_TABLE_2) - - def test_delete_route_table_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidRouteTableID.NotFound', 'DeleteRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1}) - - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_1, fakes.DB_VPC_1) - self.assert_execution_error( - 'DependencyViolation', 'DeleteRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_1}) - - subnet = tools.update_dict( - fakes.DB_SUBNET_2, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2}) - self.set_mock_db_items(fakes.DB_ROUTE_TABLE_2, fakes.DB_VPC_1, subnet) - self.assert_execution_error( - 'DependencyViolation', 'DeleteRouteTable', - {'RouteTableId': fakes.ID_EC2_ROUTE_TABLE_2}) - - def test_describe_route_tables(self): - self.set_mock_db_items( - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_ROUTE_TABLE_3, fakes.DB_SUBNET_1, fakes.DB_SUBNET_2, - fakes.DB_VPC_1, fakes.DB_VPC_2, fakes.DB_IGW_1, fakes.DB_IGW_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_INSTANCE_1, fakes.DB_VPN_GATEWAY_1, - fakes.DB_VPN_CONNECTION_1) - self.nova.servers.get.return_value = ( - mock.NonCallableMock(status='ACTIVE')) - - resp = self.execute('DescribeRouteTables', {}) - self.assertThat(resp['routeTableSet'], - matchers.ListMatches([fakes.EC2_ROUTE_TABLE_1, - fakes.EC2_ROUTE_TABLE_2, - fakes.EC2_ROUTE_TABLE_3], - orderless_lists=True)) - - resp = self.execute('DescribeRouteTables', - {'RouteTableId.1': fakes.ID_EC2_ROUTE_TABLE_1}) - self.assertThat(resp['routeTableSet'], - matchers.ListMatches([fakes.EC2_ROUTE_TABLE_1])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_ROUTE_TABLE_1])) - - self.check_filtering( - 'DescribeRouteTables', 'routeTableSet', - [('association.route-table-association-id', - fakes.ID_EC2_ROUTE_TABLE_ASSOCIATION_1), - ('association.route-table-id', fakes.ID_EC2_ROUTE_TABLE_1), - ('association.subnet-id', fakes.ID_EC2_SUBNET_2), - ('association.main', True), - ('route-table-id', fakes.ID_EC2_ROUTE_TABLE_1), - ('route.destination-cidr-block', fakes.CIDR_EXTERNAL_NETWORK), - ('route.gateway-id', 'local'), - ('route.instance-id', fakes.ID_EC2_INSTANCE_1), - ('route.origin', 'CreateRouteTable'), - ('route.state', 'active'), - ('vpc-id', fakes.ID_EC2_VPC_1)]) - self.check_tag_support( - 'DescribeRouteTables', 'routeTableSet', - fakes.ID_EC2_ROUTE_TABLE_1, 'routeTableId') - - def test_describe_route_tables_variations(self): - igw_1 = tools.purge_dict(fakes.DB_IGW_1, ('vpc_id',)) - igw_2 = tools.update_dict(fakes.DB_IGW_2, - {'vpc_id': fakes.ID_EC2_VPC_2}) - subnet_1 = tools.update_dict( - fakes.DB_SUBNET_1, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_1}) - subnet_2 = tools.update_dict( - fakes.DB_SUBNET_2, - {'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2}) - route_table_1 = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table_1['routes'].append( - {'destination_cidr_block': '0.0.0.0/0', - 'gateway_id': fakes.ID_EC2_IGW_2}) - route_table_1['routes'].append( - {'destination_cidr_block': '192.168.77.0/24', - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}) - deleted_eni_id = fakes.random_ec2_id('eni') - route_table_1['routes'].append( - {'destination_cidr_block': '192.168.99.0/24', - 'network_interface_id': deleted_eni_id}) - route_table_2 = copy.deepcopy(fakes.DB_ROUTE_TABLE_2) - route_table_2['routes'].append( - {'destination_cidr_block': '192.168.88.0/24', - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_2}) - route_table_2['routes'].append( - {'destination_cidr_block': '192.168.111.0/24', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1}) - route_table_2['routes'].append( - {'destination_cidr_block': '192.168.122.0/24', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_2}) - self.set_mock_db_items( - route_table_1, route_table_2, fakes.DB_VPC_1, fakes.DB_VPC_2, - igw_1, igw_2, subnet_1, subnet_2, - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_VPN_GATEWAY_2) - self.nova.servers.get.return_value = ( - mock.NonCallableMock(status='DOWN')) - resp = self.execute('DescribeRouteTables', {}) - ec2_route_table_1 = copy.deepcopy(fakes.EC2_ROUTE_TABLE_1) - ec2_route_table_1['routeSet'].append({ - 'destinationCidrBlock': '0.0.0.0/0', - 'gatewayId': fakes.ID_EC2_IGW_2, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_1['routeSet'].append({ - 'destinationCidrBlock': '192.168.77.0/24', - 'networkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_1, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_1['routeSet'].append({ - 'destinationCidrBlock': '192.168.99.0/24', - 'networkInterfaceId': deleted_eni_id, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_1['associationSet'].append({ - 'routeTableAssociationId': - fakes.ID_EC2_SUBNET_1.replace('subnet', 'rtbassoc'), - 'routeTableId': fakes.ID_EC2_ROUTE_TABLE_1, - 'subnetId': fakes.ID_EC2_SUBNET_1, - 'main': False}) - ec2_route_table_2 = copy.deepcopy(fakes.EC2_ROUTE_TABLE_2) - ec2_route_table_2['routeSet'][1]['state'] = 'blackhole' - del ec2_route_table_2['routeSet'][2] - ec2_route_table_2['routeSet'][2]['state'] = 'blackhole' - ec2_route_table_2['routeSet'].append({ - 'destinationCidrBlock': '192.168.88.0/24', - 'networkInterfaceId': fakes.ID_EC2_NETWORK_INTERFACE_2, - 'instanceId': fakes.ID_EC2_INSTANCE_1, - 'instanceOwnerId': fakes.ID_OS_PROJECT, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_2['routeSet'].append({ - 'destinationCidrBlock': '192.168.111.0/24', - 'gatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_2['routeSet'].append({ - 'destinationCidrBlock': '192.168.122.0/24', - 'gatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'state': 'blackhole', - 'origin': 'CreateRoute'}) - ec2_route_table_2['associationSet'] = [{ - 'routeTableAssociationId': - fakes.ID_EC2_SUBNET_2.replace('subnet', 'rtbassoc'), - 'routeTableId': fakes.ID_EC2_ROUTE_TABLE_2, - 'subnetId': fakes.ID_EC2_SUBNET_2, - 'main': False}] - self.assertThat(resp['routeTableSet'], - matchers.ListMatches([ec2_route_table_1, - ec2_route_table_2])) - - def test_format_route_table(self): - id_db_ec2_vpn_gateway_3 = fakes.random_ec2_id('vgw') - db_route_table_1 = tools.update_dict( - fakes.DB_ROUTE_TABLE_1, - {'propagating_gateways': [fakes.ID_EC2_VPN_GATEWAY_1, - fakes.ID_EC2_VPN_GATEWAY_2, - id_db_ec2_vpn_gateway_3]}) - db_route_table_1['routes'].extend( - [{'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1, - 'destination_cidr_block': fakes.CIDR_VPN_1_STATIC}, - {'gateway_id': fakes.ID_EC2_VPN_GATEWAY_2, - 'destination_cidr_block': '192.168.201.0/24'}]) - vpn_connection_3 = tools.update_dict( - fakes.DB_VPN_CONNECTION_1, - {'customer_gateway_id': fakes.random_ec2_id('cgw')}) - vpn_connection_3['cidrs'].append('192.168.120.0/24') - ec2_route_table_1 = tools.patch_dict( - fakes.EC2_ROUTE_TABLE_1, - {'propagatingVgwSet': [{'gatewayId': fakes.ID_EC2_VPN_GATEWAY_1}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_2}, - {'gatewayId': id_db_ec2_vpn_gateway_3}]}, - ('associationSet',)) - ec2_route_table_1['routeSet'].extend( - [{'gatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'destinationCidrBlock': fakes.CIDR_VPN_1_STATIC, - 'origin': 'CreateRoute', - 'state': 'active'}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'destinationCidrBlock': '192.168.201.0/24', - 'origin': 'CreateRoute', - 'state': 'blackhole'}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'destinationCidrBlock': fakes.CIDR_VPN_1_PROPAGATED_1, - 'origin': 'EnableVgwRoutePropagation', - 'state': 'active'}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'destinationCidrBlock': '192.168.120.0/24', - 'origin': 'EnableVgwRoutePropagation', - 'state': 'active'}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'destinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1, - 'origin': 'EnableVgwRoutePropagation', - 'state': 'blackhole'}, - {'gatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'destinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_2, - 'origin': 'EnableVgwRoutePropagation', - 'state': 'blackhole'}]) - - self.assertThat( - route_table_api._format_route_table( - base.create_context(), db_route_table_1, - gateways={gw['id']: gw - for gw in (fakes.DB_VPN_GATEWAY_1, - fakes.DB_VPN_GATEWAY_2, - fakes.DB_IGW_1)}, - vpn_connections_by_gateway_id={ - fakes.ID_EC2_VPN_GATEWAY_1: [fakes.DB_VPN_CONNECTION_1, - vpn_connection_3], - fakes.ID_EC2_VPN_GATEWAY_2: [fakes.DB_VPN_CONNECTION_2]}), - matchers.DictMatches(ec2_route_table_1, orderless_lists=True), - verbose=True) - - def test_get_subnet_host_routes_and_gateway_ip(self): - self.set_mock_db_items( - fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2, - fakes.DB_IGW_1, fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2, - fakes.DB_VPN_CONNECTION_1) - - route_table_1 = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - route_table_1['routes'].extend([ - {'destination_cidr_block': '192.168.111.0/24', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1}, - {'destination_cidr_block': '192.168.222.0/24', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_2}, - {'destination_cidr_block': '0.0.0.0/0', - 'gateway_id': fakes.random_ec2_id('igw')}, - {'destination_cidr_block': '192.168.200.0/24', - 'gateway_id': fakes.random_ec2_id('vgw')}]) - host_routes, gateway_ip = ( - route_table_api._get_subnet_host_routes_and_gateway_ip( - mock.ANY, route_table_1, fakes.CIDR_SUBNET_1)) - - self.assertThat(host_routes, - matchers.ListMatches([ - {'destination': fakes.CIDR_VPC_1, - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}, - {'destination': '192.168.111.0/24', - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}, - {'destination': '192.168.222.0/24', - 'nexthop': '127.0.0.1'}, - {'destination': '192.168.200.0/24', - 'nexthop': '127.0.0.1'}, - {'destination': '169.254.169.254/32', - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}])) - self.assertIsNone(gateway_ip) - - host_routes, gateway_ip = ( - route_table_api._get_subnet_host_routes_and_gateway_ip( - mock.ANY, fakes.DB_ROUTE_TABLE_2, fakes.CIDR_SUBNET_1)) - self.assertEqual(fakes.IP_GATEWAY_SUBNET_1, gateway_ip) - - self.assertThat(host_routes, - matchers.ListMatches([ - {'destination': fakes.CIDR_VPC_1, - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}, - {'destination': fakes.CIDR_EXTERNAL_NETWORK, - 'nexthop': fakes.IP_NETWORK_INTERFACE_2}, - {'destination': '0.0.0.0/0', - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}, - {'destination': fakes.CIDR_VPN_1_PROPAGATED_1, - 'nexthop': fakes.IP_GATEWAY_SUBNET_1}])) - self.assertEqual(fakes.IP_GATEWAY_SUBNET_1, gateway_ip) - - @mock.patch('ec2api.api.route_table.' - '_get_subnet_host_routes_and_gateway_ip') - @mock.patch('ec2api.api.route_table._get_active_route_destinations') - def test_update_host_routes(self, destinations_getter, routes_getter): - self.neutron.show_subnet.side_effect = tools.get_by_1st_arg_getter( - {fakes.ID_OS_SUBNET_1: {'subnet': fakes.OS_SUBNET_1}, - fakes.ID_OS_SUBNET_2: {'subnet': fakes.OS_SUBNET_2}}) - routes_getter.side_effect = [ - ('fake_routes', fakes.IP_GATEWAY_SUBNET_1), - ('fake_routes', None)] - destinations_getter.return_value = {'fake': 'objects'} - - route_table_api._update_host_routes( - base.create_context(), self.neutron, common.OnCrashCleaner(), - fakes.DB_ROUTE_TABLE_1, [fakes.DB_SUBNET_1, fakes.DB_SUBNET_2]) - - destinations_getter.assert_called_once_with( - mock.ANY, fakes.DB_ROUTE_TABLE_1) - self.assertEqual(2, routes_getter.call_count) - routes_getter.assert_any_call( - mock.ANY, fakes.DB_ROUTE_TABLE_1, fakes.CIDR_SUBNET_1, - {'fake': 'objects'}) - routes_getter.assert_any_call( - mock.ANY, fakes.DB_ROUTE_TABLE_1, fakes.CIDR_SUBNET_2, - {'fake': 'objects'}) - self.assertEqual(2, self.neutron.update_subnet.call_count) - self.neutron.update_subnet.assert_any_call( - fakes.ID_OS_SUBNET_1, - {'subnet': {'host_routes': 'fake_routes', - 'gateway_ip': fakes.IP_GATEWAY_SUBNET_1}}) - self.neutron.update_subnet.assert_any_call( - fakes.ID_OS_SUBNET_2, - {'subnet': {'host_routes': 'fake_routes', - 'gateway_ip': None}}) - - self.neutron.reset_mock() - - routes_getter.side_effect = None - routes_getter.return_value = ('fake_routes', fakes.IP_GATEWAY_SUBNET_2) - try: - with common.OnCrashCleaner() as cleaner: - route_table_api._update_host_routes( - base.create_context(), self.neutron, cleaner, - fakes.DB_ROUTE_TABLE_1, [fakes.DB_SUBNET_1]) - raise Exception('fake_exception') - except Exception as ex: - if str(ex) != 'fake_exception': - raise - - self.neutron.update_subnet.assert_any_call( - fakes.ID_OS_SUBNET_1, - {'subnet': {'host_routes': fakes.OS_SUBNET_1['host_routes'], - 'gateway_ip': fakes.IP_GATEWAY_SUBNET_1}}) - - @mock.patch('ec2api.api.vpn_connection._update_vpn_routes') - @mock.patch('ec2api.api.route_table._update_host_routes') - def test_update_routes_in_associated_subnets(self, routes_updater, - update_vpn_routes): - subnet_default_rtb = {'id': fakes.random_ec2_id('subnet'), - 'vpc_id': fakes.ID_EC2_VPC_1} - subnet_rtb_1 = {'id': fakes.random_ec2_id('subnet'), - 'vpc_id': fakes.ID_EC2_VPC_1, - 'route_table_id': fakes.ID_EC2_ROUTE_TABLE_1} - subnet_rtb_2 = {'id': fakes.random_ec2_id('subnet'), - 'vpc_id': fakes.ID_EC2_VPC_1, - 'route_table_id': fakes.ID_EC2_ROUTE_TABLE_2} - subnet_vpc_2 = {'id': fakes.random_ec2_id('subnet'), - 'vpc_id': fakes.ID_EC2_VPC_2} - self.set_mock_db_items(subnet_default_rtb, subnet_rtb_1, subnet_rtb_2, - subnet_vpc_2, fakes.DB_VPC_1) - - def do_check(rtb, subnets, default_associations_only=None, - host_only=None): - self.db_api.reset_mock() - routes_updater.reset_mock() - update_vpn_routes.reset_mock() - route_table_api._update_routes_in_associated_subnets( - base.create_context(), 'fake_cleaner', rtb, - default_associations_only=default_associations_only, - update_target=(route_table_api.HOST_TARGET - if host_only else - None)) - - self.db_api.get_items.assert_any_call( - mock.ANY, 'subnet') - routes_updater.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', rtb, subnets) - if host_only: - self.assertFalse(update_vpn_routes.called) - else: - update_vpn_routes.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', rtb, subnets) - - do_check(fakes.DB_ROUTE_TABLE_2, [subnet_rtb_2], host_only=True) - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPC_1) - - do_check(fakes.DB_ROUTE_TABLE_1, [subnet_default_rtb, subnet_rtb_1]) - self.db_api.get_item_by_id.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPC_1) - - do_check(fakes.DB_ROUTE_TABLE_1, [subnet_default_rtb], - default_associations_only=True) - self.assertFalse(self.db_api.get_item_by_id.called) - - routes_updater.reset_mock() - update_vpn_routes.reset_mock() - route_table_api._update_routes_in_associated_subnets( - mock.MagicMock(), 'fake_cleaner', fakes.DB_ROUTE_TABLE_1, - update_target=route_table_api.VPN_TARGET) - routes_updater.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', - fakes.DB_ROUTE_TABLE_1, [subnet_default_rtb, subnet_rtb_1]) - update_vpn_routes.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', - fakes.DB_ROUTE_TABLE_1, [subnet_default_rtb, subnet_rtb_1]) - - def test_get_router_destinations(self): - self.set_mock_db_items(fakes.DB_IGW_1, fakes.DB_NETWORK_INTERFACE_2) - route_table_2 = copy.deepcopy(fakes.DB_ROUTE_TABLE_2) - fake_igw_id = fakes.random_ec2_id('igw') - fake_vgw_id = fakes.random_ec2_id('vgw') - fake_eni_id = fakes.random_ec2_id('eni') - route_table_2['routes'].extend([ - {'gateway_id': fake_igw_id, - 'destination_cidr_block': 'fake'}, - {'gateway_id': fake_vgw_id, - 'destination_cidr_block': 'fake'}, - {'network_interface_id': fake_eni_id, - 'destination_cidr_block': 'fake'}]) - host_routes = route_table_api._get_active_route_destinations( - 'fake_context', route_table_2) - self.assertThat(host_routes, matchers.DictMatches({ - fakes.ID_EC2_IGW_1: fakes.DB_IGW_1, - fakes.ID_EC2_NETWORK_INTERFACE_2: - fakes.DB_NETWORK_INTERFACE_2})) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, [fakes.ID_EC2_NETWORK_INTERFACE_2, fakes.ID_EC2_IGW_1, - fake_igw_id, fake_vgw_id, fake_eni_id, - fakes.ID_EC2_VPN_GATEWAY_1]) - - @mock.patch('ec2api.api.vpn_connection._update_vpn_routes') - @mock.patch('ec2api.api.route_table._update_host_routes') - def test_update_subnet_routes(self, host_routes_updater, - update_vpn_routes): - route_table_api._update_subnet_routes( - base.create_context(), 'fake_cleaner', fakes.DB_SUBNET_1, - fakes.DB_ROUTE_TABLE_1) - host_routes_updater.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', fakes.DB_ROUTE_TABLE_1, - [fakes.DB_SUBNET_1]) - update_vpn_routes.assert_called_once_with( - mock.ANY, self.neutron, 'fake_cleaner', fakes.DB_ROUTE_TABLE_1, - [fakes.DB_SUBNET_1]) - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - def test_describe_route_tables_no_default_vpc(self, check_and_create): - self.configure(disable_ec2_classic=True) - - def mock_check_and_create(context): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_IGW_DEFAULT, - fakes.DB_ROUTE_TABLE_DEFAULT) - check_and_create.side_effect = mock_check_and_create - - resp = self.execute('DescribeRouteTables', {}) - self.assertEqual(resp['routeTableSet'], - [fakes.EC2_ROUTE_TABLE_DEFAULT]) - - check_and_create.assert_called_once_with(mock.ANY) - - -class RouteTableValidatorTestCase(test_base.BaseTestCase): - - def test_validate_igw_or_vgw_id(self): - validator = route_table_api.Validator() - validator.igw_or_vgw_id(fakes.random_ec2_id('igw')) - validator.igw_or_vgw_id(fakes.random_ec2_id('vgw')) - - invalid_ids = ['1234', 'a-1111', '', 'i-1111', 'i-rrr', 'foobar', - fakes.random_ec2_id('eni'), fakes.random_ec2_id('i'), - fakes.random_ec2_id('rtb'), fakes.random_ec2_id('vpn')] - - for id in invalid_ids: - self.assertRaises(exception.InvalidParameterValue, - validator.igw_or_vgw_id, id) diff --git a/ec2api/tests/unit/test_s3.py b/ec2api/tests/unit/test_s3.py deleted file mode 100644 index 300afde6..00000000 --- a/ec2api/tests/unit/test_s3.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unittets for S3 objectstore clone. -""" -from botocore import exceptions as botocore_exception -import botocore.session -import fixtures -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslotest import base as test_base - -from ec2api.s3 import s3server - -CONF = cfg.CONF - - -class S3APITestCase(test_base.BaseTestCase): - """Test objectstore through S3 API.""" - - def setUp(self): - """Setup users, projects, and start a test server.""" - super(S3APITestCase, self).setUp() - tempdir = self.useFixture(fixtures.TempDir()) - conf = self.useFixture(config_fixture.Config()) - conf.config(buckets_path=tempdir.path, - s3_listen='127.0.0.1', - s3_listen_port=0) - - self.server = s3server.get_wsgi_server() - # NOTE(ft): this requires eventlet.monkey_patch, which is called in - # tests/unit/__init__.py. Remove it out from there if you get these - # tests rid of server run - self.server.start() - self.addCleanup(self.server.stop) - - s3_url = 'http://' + CONF.s3_listen + ':' + str(self.server.port) - region = 'FakeRegion' - connection_data = { - 'config_file': (None, 'AWS_CONFIG_FILE', None, None), - 'region': ('region', 'BOTO_DEFAULT_REGION', region, None), - } - session = botocore.session.get_session(connection_data) - conn = session.create_client( - 's3', region_name=region, endpoint_url=s3_url, - aws_access_key_id='fake', aws_secret_access_key='fake') - self.conn = conn - - def get_http_connection(*args): - """Get a new S3 connection, don't attempt to reuse connections.""" - return self.conn.new_http_connection(*args) - - self.conn.get_http_connection = get_http_connection - - def _ensure_no_buckets(self, buckets): - self.assertEqual(len(buckets['Buckets']), 0, - "Bucket list was not empty") - return True - - def _ensure_one_bucket(self, buckets, name): - self.assertEqual(len(buckets['Buckets']), 1, - "Bucket list didn't have exactly one element in it") - self.assertEqual(buckets['Buckets'][0]['Name'], name, "Wrong name") - return True - - def test_list_buckets(self): - # Make sure we started with no buckets. - self._ensure_no_buckets(self.conn.list_buckets()) - - def test_create_and_delete_bucket(self): - # Test bucket creation and deletion. - bucket_name = 'testbucket' - - self.conn.create_bucket(Bucket=bucket_name) - self._ensure_one_bucket(self.conn.list_buckets(), bucket_name) - self.conn.delete_bucket(Bucket=bucket_name) - self._ensure_no_buckets(self.conn.list_buckets()) - - def test_create_bucket_and_key_and_delete_key(self): - # Test key operations on buckets. - bucket_name = 'testbucket' - key_name = 'somekey' - key_contents = b'somekey' - - self.conn.create_bucket(Bucket=bucket_name) - self.conn.put_object(Bucket=bucket_name, Key=key_name, - Body=key_contents) - - # make sure the contents are correct - key = self.conn.get_object(Bucket=bucket_name, Key=key_name) - self.assertEqual(key['Body'].read(), key_contents, - "Bad contents") - - # delete the key - self.conn.delete_object(Bucket=bucket_name, Key=key_name) - - self.assertRaises(botocore_exception.ClientError, self.conn.get_object, - Bucket=bucket_name, Key=key_name) - - def test_unknown_bucket(self): - bucket_name = 'falalala' - self.assertRaises(botocore_exception.ClientError, - self.conn.head_bucket, - Bucket=bucket_name) - self.assertRaises(botocore_exception.ClientError, - self.conn.list_objects, - Bucket=bucket_name, MaxKeys=0) diff --git a/ec2api/tests/unit/test_security_group.py b/ec2api/tests/unit/test_security_group.py deleted file mode 100644 index 896d129b..00000000 --- a/ec2api/tests/unit/test_security_group.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -from unittest import mock - -from neutronclient.common import exceptions as neutron_exception - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class SecurityGroupTestCase(base.ApiTestCase): - - def setUp(self): - super(SecurityGroupTestCase, self).setUp() - - def test_create_security_group(self): - self.set_mock_db_items(fakes.DB_VPC_1, - fakes.DB_SECURITY_GROUP_1) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - self.db_api.add_item.return_value = fakes.DB_SECURITY_GROUP_2 - self.neutron.create_security_group.return_value = ( - {'security_group': copy.deepcopy(fakes.OS_SECURITY_GROUP_2)}) - - resp = self.execute( - 'CreateSecurityGroup', - {'GroupName': 'groupname', - 'GroupDescription': 'Group description'}) - secgroup_body = ( - {'security_group': {'name': 'groupname', - 'description': 'Group description'}}) - self.neutron.create_security_group.assert_called_once_with( - secgroup_body) - db_group = tools.purge_dict(fakes.DB_SECURITY_GROUP_2, ('id',)) - db_group['vpc_id'] = None - self.db_api.add_item.assert_called_once_with(mock.ANY, 'sg', db_group) - self.neutron.create_security_group.reset_mock() - self.db_api.add_item.reset_mock() - - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - resp = self.execute( - 'CreateSecurityGroup', - {'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'groupname', - 'GroupDescription': 'Group description'}) - self.assertEqual(fakes.ID_EC2_SECURITY_GROUP_2, resp['groupId']) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'sg', - tools.purge_dict(fakes.DB_SECURITY_GROUP_2, ('id',))) - self.neutron.create_security_group.assert_called_once_with( - secgroup_body) - self.neutron.create_security_group.reset_mock() - self.db_api.add_item.reset_mock() - - self.configure(disable_ec2_classic=True) - self.add_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SECURITY_GROUP_DEFAULT) - self.neutron.create_security_group.return_value = ( - {'security_group': copy.deepcopy(fakes.OS_SECURITY_GROUP_5)}) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1), - fakes.OS_SECURITY_GROUP_DEFAULT]}) - self.db_api.add_item.return_value = fakes.DB_SECURITY_GROUP_5 - - resp = self.execute( - 'CreateSecurityGroup', - {'GroupName': 'groupname2', - 'GroupDescription': 'Group description'}) - self.assertEqual(fakes.ID_EC2_SECURITY_GROUP_5, resp['groupId']) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'sg', - tools.purge_dict(fakes.DB_SECURITY_GROUP_5, ('id',))) - secgroup_body = ( - {'security_group': {'name': 'groupname2', - 'description': 'Group description'}}) - self.neutron.create_security_group.assert_called_once_with( - secgroup_body) - - def test_create_security_group_invalid(self): - - def do_check(args, error_code): - self.neutron.reset_mock() - self.db_api.reset_mock() - self.assert_execution_error( - error_code, 'CreateSecurityGroup', args) - - self.set_mock_db_items() - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'groupname', - 'GroupDescription': 'Group description'}, - 'InvalidVpcID.NotFound') - self.db_api.get_item_by_id.assert_called_once_with(mock.ANY, - fakes.ID_EC2_VPC_1) - - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'aa #^% -=99', - 'GroupDescription': 'Group description'}, - 'ValidationError') - - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'groupname', - 'GroupDescription': 'aa #^% -=99'}, - 'ValidationError') - - do_check({'GroupName': 'aa \t\x01\x02\x7f', - 'GroupDescription': 'Group description'}, - 'ValidationError') - - do_check({'GroupName': 'groupname', - 'GroupDescription': 'aa \t\x01\x02\x7f'}, - 'ValidationError') - - do_check({'GroupName': 'x' * 256, - 'GroupDescription': 'Group description'}, - 'ValidationError') - - do_check({'GroupName': 'groupname', - 'GroupDescription': 'x' * 256}, - 'ValidationError') - - do_check({'GroupName': 'groupname'}, - 'MissingParameter') - - do_check({'GroupDescription': 'description'}, - 'MissingParameter') - - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_2, fakes.DB_VPC_1) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [fakes.OS_SECURITY_GROUP_2]}) - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': fakes.OS_SECURITY_GROUP_2['name'], - 'GroupDescription': 'description'}, - 'InvalidGroup.Duplicate') - - def test_create_security_group_over_quota(self): - self.neutron.create_security_group.side_effect = ( - neutron_exception.OverQuotaClient(413)) - self.assert_execution_error( - 'ResourceLimitExceeded', 'CreateSecurityGroup', - {'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'groupname', - 'GroupDescription': 'Group description'}) - secgroup_body = ( - {'security_group': {'name': 'groupname', - 'description': 'Group description'}}) - self.neutron.create_security_group.assert_called_once_with( - secgroup_body) - - @tools.screen_unexpected_exception_logs - def test_create_security_group_rollback(self): - self.set_mock_db_items(fakes.DB_VPC_1) - self.db_api.add_item.side_effect = Exception() - self.neutron.create_security_group.return_value = ( - {'security_group': copy.deepcopy(fakes.OS_SECURITY_GROUP_1)}) - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'CreateSecurityGroup', - {'VpcId': fakes.ID_EC2_VPC_1, - 'GroupName': 'groupname', - 'GroupDescription': 'Group description'}) - self.neutron.delete_security_group.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_1) - - def test_delete_security_group(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1) - resp = self.execute( - 'DeleteSecurityGroup', - {'GroupId': - fakes.ID_EC2_SECURITY_GROUP_1}) - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_any_call( - mock.ANY, - fakes.ID_EC2_SECURITY_GROUP_1) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_SECURITY_GROUP_1) - self.neutron.delete_security_group.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_1) - - self.db_api.delete_item.reset_mock() - self.neutron.delete_security_group.reset_mock() - - self.configure(disable_ec2_classic=True) - self.add_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SECURITY_GROUP_DEFAULT, - fakes.DB_SECURITY_GROUP_2, - fakes.DB_SECURITY_GROUP_6) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1), - fakes.OS_SECURITY_GROUP_2, - fakes.OS_SECURITY_GROUP_4, - fakes.OS_SECURITY_GROUP_DEFAULT]}) - self.assert_execution_error( - 'InvalidGroup.NotFound', 'DeleteSecurityGroup', - {'GroupName': 'groupname2'}) - - self.db_api.delete_item.reset_mock() - self.neutron.delete_security_group.reset_mock() - - self.add_mock_db_items(fakes.DB_SECURITY_GROUP_5) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1), - fakes.OS_SECURITY_GROUP_2, - fakes.OS_SECURITY_GROUP_4, - fakes.OS_SECURITY_GROUP_5, - fakes.OS_SECURITY_GROUP_DEFAULT]}) - resp = self.execute( - 'DeleteSecurityGroup', - {'GroupName': 'groupname2'}) - self.assertEqual(True, resp['return']) - self.db_api.get_item_by_id.assert_any_call( - mock.ANY, - fakes.ID_EC2_SECURITY_GROUP_5) - self.db_api.delete_item.assert_called_with( - mock.ANY, - fakes.ID_EC2_SECURITY_GROUP_5) - self.neutron.delete_security_group.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_5) - - # NOTE(Alex) This test is disabled because it checks using non-AWS id. - @base.skip_not_implemented - def test_delete_security_group_nova_os_id(self): - self.nova.security_groups.list.return_value = ( - [fakes.OS_SECURITY_GROUP_1, - fakes.OS_SECURITY_GROUP_2]) - resp = self.execute( - 'DeleteSecurityGroup', - {'GroupId': - fakes.ID_OS_SECURITY_GROUP_2}) - self.assertEqual(True, resp['return']) - self.nova.security_groups.delete.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_2) - - def test_delete_security_group_invalid(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, fakes.DB_VPC_1) - self.neutron.show_security_group.return_value = ( - {'security_group': fakes.OS_SECURITY_GROUP_1}) - self.assert_execution_error( - 'CannotDelete', 'DeleteSecurityGroup', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_1}) - self.assert_execution_error( - 'InvalidGroup.NotFound', 'DeleteSecurityGroup', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2}) - self.assertEqual(0, self.neutron.delete_port.call_count) - self.assert_execution_error( - 'InvalidGroup.NotFound', 'DeleteSecurityGroup', - {'GroupName': 'badname'}) - self.assertEqual(0, self.neutron.delete_port.call_count) - self.assert_execution_error( - 'MissingParameter', 'DeleteSecurityGroup', {}) - self.assertEqual(0, self.neutron.delete_port.call_count) - - def test_delete_security_group_is_in_use(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1) - self.neutron.delete_security_group.side_effect = ( - neutron_exception.Conflict()) - self.assert_execution_error( - 'DependencyViolation', 'DeleteSecurityGroup', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_1}) - self.assertEqual(0, self.db_api.delete_item.call_count) - - def test_describe_security_groups(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2, - fakes.DB_SECURITY_GROUP_3, - fakes.DB_SECURITY_GROUP_4, - fakes.DB_SECURITY_GROUP_5,) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1), - fakes.OS_SECURITY_GROUP_2, - fakes.OS_SECURITY_GROUP_3, - fakes.OS_SECURITY_GROUP_4, - fakes.OS_SECURITY_GROUP_5]}) - - resp = self.execute('DescribeSecurityGroups', {}) - self.assertThat(resp['securityGroupInfo'], - matchers.ListMatches( - [fakes.EC2_SECURITY_GROUP_1, - fakes.EC2_SECURITY_GROUP_2, - fakes.EC2_SECURITY_GROUP_3, - fakes.EC2_SECURITY_GROUP_4, - fakes.EC2_SECURITY_GROUP_5], - orderless_lists=True)) - - resp = self.execute('DescribeSecurityGroups', - {'GroupName.1': 'groupname2'}) - self.assertThat(resp['securityGroupInfo'], - matchers.ListMatches( - [fakes.EC2_SECURITY_GROUP_4], - orderless_lists=True)) - self.assertEqual(0, self.db_api.delete_item.call_count) - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_SECURITY_GROUP_4]) - - resp = self.execute('DescribeSecurityGroups', - {'GroupId.1': fakes.ID_EC2_SECURITY_GROUP_4}) - self.assertThat(resp['securityGroupInfo'], - matchers.ListMatches( - [fakes.EC2_SECURITY_GROUP_4], - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_SECURITY_GROUP_4])) - self.assertEqual(0, self.db_api.delete_item.call_count) - - self.check_filtering( - 'DescribeSecurityGroups', 'securityGroupInfo', - [('vpc-id', fakes.ID_EC2_VPC_1), - ('group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP), - ('group-id', fakes.ID_EC2_SECURITY_GROUP_1), - ('description', fakes.EC2_SECURITY_GROUP_1['groupDescription']), - ('ip-permission.protocol', 'tcp'), - ('ip-permission.to-port', 10), - ('ip-permission.from-port', 10), - ('ip-permission.cidr', '192.168.1.0/24'), - # TODO(andrey-mp): declare this data in fakes - # ('ip-permission.group-id', fakes.ID_EC2_SECURITY_GROUP_1), - # ('ip-permission.group-name', 'default'), - # ('ip-permission.user-id', fakes.ID_OS_PROJECT), - ('owner-id', fakes.ID_OS_PROJECT)]) - self.check_tag_support( - 'DescribeSecurityGroups', 'securityGroupInfo', - fakes.ID_EC2_SECURITY_GROUP_4, 'groupId') - - self.configure(disable_ec2_classic=True) - self.add_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SECURITY_GROUP_6) - resp = self.execute('DescribeSecurityGroups', - {'GroupName.1': 'groupname2'}) - self.assertThat(resp['securityGroupInfo'], - matchers.ListMatches( - [fakes.EC2_SECURITY_GROUP_5], - orderless_lists=True)) - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - def test_describe_security_groups_no_default_vpc(self, check_and_create): - self.configure(disable_ec2_classic=True) - - def mock_check_and_create(context): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SECURITY_GROUP_DEFAULT) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [fakes.OS_SECURITY_GROUP_DEFAULT]}) - check_and_create.side_effect = mock_check_and_create - - resp = self.execute('DescribeSecurityGroups', {}) - self.assertThat(resp['securityGroupInfo'], - matchers.ListMatches([ - fakes.EC2_SECURITY_GROUP_DEFAULT], - orderless_lists=True)) - - check_and_create.assert_called_once_with(mock.ANY) - - def test_repair_default_security_group(self): - self.db_api.add_item.return_value = fakes.DB_SECURITY_GROUP_1 - self.neutron.create_security_group.return_value = ( - {'security_group': copy.deepcopy(fakes.OS_SECURITY_GROUP_1)}) - self.set_mock_db_items(fakes.DB_VPC_1, - fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [fakes.OS_SECURITY_GROUP_2]}) - - resp = self.execute('DescribeSecurityGroups', {}) - self.db_api.restore_item.assert_called_once_with( - mock.ANY, 'sg', - {'id': fakes.ID_EC2_VPC_1.replace('vpc', 'sg'), - 'os_id': fakes.ID_OS_SECURITY_GROUP_1, - 'vpc_id': fakes.ID_EC2_VPC_1}) - secgroup_body = ( - {'security_group': {'name': fakes.ID_EC2_VPC_1, - 'description': 'Default VPC security group'}}) - self.neutron.create_security_group.assert_called_once_with( - secgroup_body) - - def test_authorize_security_group_invalid(self): - - def check_response(error_code, protocol, from_port, to_port, cidr, - group_id=fakes.ID_EC2_SECURITY_GROUP_2): - params = {'IpPermissions.1.FromPort': str(from_port), - 'IpPermissions.1.ToPort': str(to_port), - 'IpPermissions.1.IpProtocol': protocol} - if group_id is not None: - params['GroupId'] = group_id - if cidr is not None: - params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr - self.assert_execution_error( - error_code, 'AuthorizeSecurityGroupIngress', params) - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.execute( - 'AuthorizeSecurityGroupIngress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '-1', - 'IpPermissions.1.ToPort': '-1', - 'IpPermissions.1.IpProtocol': 'icmp', - 'IpPermissions.1.IpRanges.1.CidrIp': '0.0.0.0/0'}) - # Duplicate rule - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.create_security_group_rule.side_effect = ( - neutron_exception.Conflict) - check_response('InvalidPermission.Duplicate', 'icmp', - -1, -1, '0.0.0.0/0') - # Over quota - self.neutron.create_security_group_rule.side_effect = ( - neutron_exception.OverQuotaClient) - check_response('RulesPerSecurityGroupLimitExceeded', 'icmp', -1, -1, - '0.0.0.0/0') - # Invalid CIDR address - check_response('InvalidParameterValue', 'tcp', 80, 81, '0.0.0.0/0444') - # Missing ports - check_response('InvalidParameterValue', 'tcp', -1, -1, '0.0.0.0/0') - # from port cannot be greater than to port - check_response('InvalidParameterValue', 'tcp', 100, 1, '0.0.0.0/0') - # For tcp, negative values are not allowed - check_response('InvalidParameterValue', 'tcp', -1, 1, '0.0.0.0/0') - # For tcp, valid port range 1-65535 - check_response('InvalidParameterValue', 'tcp', 1, 65599, '0.0.0.0/0') - # Invalid protocol - check_response('InvalidParameterValue', 'xyz', 1, 14, '0.0.0.0/0') - # Invalid port - check_response('InvalidParameterValue', 'tcp', " ", "gg", '0.0.0.0/0') - # Invalid icmp port - check_response('InvalidParameterValue', 'icmp', " ", "gg", '0.0.0.0/0') - # Invalid CIDR Address - check_response('InvalidParameterValue', 'icmp', -1, -1, '0.0.0.0') - # Invalid CIDR Address - check_response('InvalidParameterValue', 'icmp', 5, 10, '0.0.0.0/') - # Invalid Cidr ports - check_response('InvalidParameterValue', 'icmp', 1, 256, '0.0.0.0/0') - # Missing group - check_response('MissingParameter', 'tcp', 1, 255, '0.0.0.0/0', None) - # Missing cidr - check_response('MissingParameter', 'tcp', 1, 255, None) - # Invalid remote group - self.assert_execution_error( - 'InvalidGroup.NotFound', 'AuthorizeSecurityGroupIngress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.IpProtocol': 'icmp', - 'IpPermissions.1.Groups.1.GroupName': 'somegroup', - 'IpPermissions.1.Groups.1.UserId': 'i-99999999'}) - - def test_authorize_security_group_ingress_ip_ranges(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.create_security_group_rule.return_value = ( - {'security_group_rule': [fakes.OS_SECURITY_GROUP_RULE_1]}) - self.execute( - 'AuthorizeSecurityGroupIngress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.ToPort': '10', - 'IpPermissions.1.IpProtocol': 'tcp', - 'IpPermissions.1.IpRanges.1.CidrIp': '192.168.1.0/24'}) - self.neutron.create_security_group_rule.assert_called_once_with( - {'security_group_rule': - tools.purge_dict(fakes.OS_SECURITY_GROUP_RULE_1, - {'id', 'remote_group_id', 'tenant_id'})}) - # NOTE(Alex): Openstack extension, AWS-incompability - # IPv6 is not supported by Amazon. - self.execute( - 'AuthorizeSecurityGroupIngress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.ToPort': '10', - 'IpPermissions.1.IpProtocol': 'tcp', - 'IpPermissions.1.IpRanges.1.CidrIp': '::/0'}) - self.neutron.create_security_group_rule.assert_called_with( - {'security_group_rule': - tools.patch_dict( - fakes.OS_SECURITY_GROUP_RULE_1, {'remote_ip_prefix': '::/0'}, - {'id', 'remote_group_id', 'tenant_id'})}) - - self.configure(disable_ec2_classic=True) - self.add_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SECURITY_GROUP_4, - fakes.DB_SECURITY_GROUP_5, - fakes.DB_SECURITY_GROUP_6) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [fakes.OS_SECURITY_GROUP_4, - fakes.OS_SECURITY_GROUP_5]}) - - self.execute( - 'AuthorizeSecurityGroupIngress', - {'GroupName': 'groupname2', - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.ToPort': '10', - 'IpPermissions.1.IpProtocol': 'tcp', - 'IpPermissions.1.IpRanges.1.CidrIp': '::/0'}) - security_group_rule = { - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_min': 10, - 'port_range_max': 10, - 'protocol': 'tcp', - 'remote_ip_prefix': '::/0', - 'security_group_id': fakes.ID_OS_SECURITY_GROUP_5} - self.neutron.create_security_group_rule.assert_called_with( - {'security_group_rule': security_group_rule}) - - def test_authorize_security_group_egress_groups(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.create_security_group_rule.return_value = ( - {'security_group_rule': [fakes.OS_SECURITY_GROUP_RULE_1]}) - self.execute( - 'AuthorizeSecurityGroupEgress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.IpProtocol': '100', - 'IpPermissions.1.Groups.1.GroupId': - fakes.ID_EC2_SECURITY_GROUP_1}) - self.neutron.create_security_group_rule.assert_called_once_with( - {'security_group_rule': - tools.purge_dict(fakes.OS_SECURITY_GROUP_RULE_2, - {'id', 'remote_ip_prefix', 'tenant_id', - 'port_range_max'})}) - - def test_revoke_security_group_ingress_ip_ranges(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.show_security_group.return_value = { - 'security_group': fakes.OS_SECURITY_GROUP_2} - self.neutron.delete_security_group_rule.return_value = True - self.execute( - 'RevokeSecurityGroupIngress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.ToPort': '10', - 'IpPermissions.1.IpProtocol': 'tcp', - 'IpPermissions.1.IpRanges.1.CidrIp': '192.168.1.0/24'}) - self.neutron.show_security_group.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_2) - self.neutron.delete_security_group_rule.assert_called_once_with( - fakes.OS_SECURITY_GROUP_RULE_1['id']) - - def test_revoke_security_group_egress_groups(self): - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2) - self.neutron.show_security_group.return_value = { - 'security_group': fakes.OS_SECURITY_GROUP_2} - self.neutron.delete_security_group_rule.return_value = True - self.execute( - 'RevokeSecurityGroupEgress', - {'GroupId': fakes.ID_EC2_SECURITY_GROUP_2, - 'IpPermissions.1.FromPort': '10', - 'IpPermissions.1.IpProtocol': '100', - 'IpPermissions.1.Groups.1.GroupId': - fakes.ID_EC2_SECURITY_GROUP_1}) - self.neutron.show_security_group.assert_called_once_with( - fakes.ID_OS_SECURITY_GROUP_2) - self.neutron.delete_security_group_rule.assert_called_once_with( - fakes.OS_SECURITY_GROUP_RULE_2['id']) diff --git a/ec2api/tests/unit/test_snapshot.py b/ec2api/tests/unit/test_snapshot.py deleted file mode 100644 index c12c63b2..00000000 --- a/ec2api/tests/unit/test_snapshot.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class SnapshotTestCase(base.ApiTestCase): - - def test_describe_snapshots(self): - self.cinder.volume_snapshots.list.return_value = [ - fakes.OSSnapshot(fakes.OS_SNAPSHOT_1), - fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)] - - self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2, - fakes.DB_VOLUME_2) - - resp = self.execute('DescribeSnapshots', {}) - self.assertThat(resp, matchers.DictMatches( - {'snapshotSet': [fakes.EC2_SNAPSHOT_1, fakes.EC2_SNAPSHOT_2]}, - orderless_lists=True)) - - self.db_api.get_items.assert_any_call(mock.ANY, 'vol') - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_SNAPSHOT_1]) - resp = self.execute('DescribeSnapshots', - {'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1}) - self.assertThat(resp, matchers.DictMatches( - {'snapshotSet': [fakes.EC2_SNAPSHOT_1]}, - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_SNAPSHOT_1])) - - self.check_filtering( - 'DescribeSnapshots', 'snapshotSet', - [ - # TODO(ft): declare a constant for the description in fakes - ('description', 'fake description'), - ('owner-id', fakes.ID_OS_PROJECT), - ('progress', '100%'), - ('snapshot-id', fakes.ID_EC2_SNAPSHOT_1), - ('start-time', fakes.TIME_CREATE_SNAPSHOT_2), - ('status', 'completed'), - ('volume-id', fakes.ID_EC2_VOLUME_2), - # TODO(ft): declare a constant for the volume size in fakes - ('volume-size', 1) - ]) - self.check_tag_support( - 'DescribeSnapshots', 'snapshotSet', - fakes.ID_EC2_SNAPSHOT_1, 'snapshotId') - - def test_describe_snapshots_auto_remove(self): - self.cinder.volume_snapshots.list.return_value = [] - - self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2) - - resp = self.execute('DescribeSnapshots', {}) - self.assertThat(resp, matchers.DictMatches( - {'snapshotSet': []}, - orderless_lists=True)) - - self.db_api.get_items.assert_any_call(mock.ANY, 'vol') - self.db_api.get_items.assert_any_call(mock.ANY, 'snap') - self.db_api.delete_item.assert_any_call(mock.ANY, - fakes.ID_EC2_SNAPSHOT_1) - - def test_describe_snapshots_invalid_parameters(self): - self.cinder.volume_snapshots.list.return_value = [ - fakes.OSSnapshot(fakes.OS_SNAPSHOT_1), - fakes.OSSnapshot(fakes.OS_SNAPSHOT_2)] - - self.assert_execution_error( - 'InvalidSnapshot.NotFound', 'DescribeSnapshots', - {'SnapshotId.1': fakes.random_ec2_id('snap')}) - - self.cinder.volume_snapshots.list.side_effect = lambda: [] - - self.assert_execution_error( - 'InvalidSnapshot.NotFound', 'DescribeSnapshots', - {'SnapshotId.1': fakes.ID_EC2_SNAPSHOT_1}) - - def test_create_snapshot_from_volume(self): - self.cinder.volume_snapshots.create.return_value = ( - fakes.OSSnapshot(fakes.OS_SNAPSHOT_1)) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_SNAPSHOT_1)) - self.set_mock_db_items(fakes.DB_VOLUME_2) - self.cinder.volumes.get.side_effect = ( - lambda vol_id: ( - fakes.OSVolume(fakes.OS_VOLUME_2) - if vol_id == fakes.ID_OS_VOLUME_2 - else None)) - - resp = self.execute( - 'CreateSnapshot', - {'VolumeId': fakes.ID_EC2_VOLUME_2}) - self.assertThat(fakes.EC2_SNAPSHOT_1, matchers.DictMatches(resp)) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'snap', - tools.purge_dict(fakes.DB_SNAPSHOT_1, ('id',))) - - self.cinder.volume_snapshots.create.assert_called_once_with( - fakes.ID_OS_VOLUME_2, force=True) - - def test_format_snapshot_maps_status(self): - fake_snapshot = fakes.OSSnapshot(fakes.OS_SNAPSHOT_1) - self.cinder.volume_snapshots.list.return_value = [fake_snapshot] - self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_VOLUME_2) - - fake_snapshot.status = 'new' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('pending', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'creating' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('pending', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'available' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('completed', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'active' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('completed', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'deleting' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('pending', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'error' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('error', resp['snapshotSet'][0]['status']) - - fake_snapshot.status = 'banana' - resp = self.execute('DescribeSnapshots', {}) - self.assertEqual('banana', resp['snapshotSet'][0]['status']) diff --git a/ec2api/tests/unit/test_subnet.py b/ec2api/tests/unit/test_subnet.py deleted file mode 100644 index e34900fc..00000000 --- a/ec2api/tests/unit/test_subnet.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutronclient.common import exceptions as neutron_exception -from unittest import mock - -from ec2api.api import common -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class SubnetTestCase(base.ApiTestCase): - - def setUp(self): - super(SubnetTestCase, self).setUp() - self.vpn_gateway_api = self.mock('ec2api.api.subnet.vpn_gateway_api') - - def test_create_subnet(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_SUBNET_1)) - self.neutron.create_network.side_effect = ( - tools.get_neutron_create('network', fakes.ID_OS_NETWORK_1, - {'status': 'available'})) - self.neutron.create_subnet.side_effect = ( - tools.get_neutron_create('subnet', fakes.ID_OS_SUBNET_1)) - subnet_1 = tools.purge_dict(fakes.DB_SUBNET_1, ('os_vpnservice_id',)) - - def check_response(resp): - self.assertThat(fakes.EC2_SUBNET_1, matchers.DictMatches( - resp['subnet'])) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'subnet', - tools.purge_dict(subnet_1, ('id',))) - self.neutron.create_network.assert_called_once_with( - {'network': {'name': 'subnet-0'}}) - self.neutron.update_network.assert_called_once_with( - fakes.ID_OS_NETWORK_1, - {'network': {'name': fakes.ID_EC2_SUBNET_1}}) - self.neutron.create_subnet.assert_called_once_with( - {'subnet': tools.purge_dict(fakes.OS_SUBNET_1, - ('id', 'name', 'gateway_ip'))}) - self.neutron.update_subnet.assert_called_once_with( - fakes.ID_OS_SUBNET_1, - {'subnet': {'name': fakes.ID_EC2_SUBNET_1, - 'gateway_ip': None}}) - self.neutron.add_interface_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1, - {'subnet_id': fakes.ID_OS_SUBNET_1}) - self.vpn_gateway_api._start_vpn_in_subnet.assert_called_once_with( - mock.ANY, self.neutron, mock.ANY, subnet_1, - fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.assertIsInstance( - self.vpn_gateway_api._start_vpn_in_subnet.call_args[0][2], - common.OnCrashCleaner) - - resp = self.execute('CreateSubnet', {'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1}) - check_response(resp) - - self.neutron.reset_mock() - self.db_api.reset_mock() - self.vpn_gateway_api.reset_mock() - - resp = self.execute('CreateSubnet', {'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1, - 'AvailabilityZone': 'nova'}) - check_response(resp) - - def test_create_subnet_invalid_parameters(self): - def do_check(args, error_code): - self.neutron.reset_mock() - self.db_api.reset_mock() - self.assert_execution_error(error_code, 'CreateSubnet', args) - self.assertEqual(0, self.neutron.create_network.call_count) - self.assertEqual(0, self.neutron.create_subnet.call_count) - self.assertEqual(0, self.neutron.add_interface_router.call_count) - - self.set_mock_db_items() - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1}, - 'InvalidVpcID.NotFound') - self.db_api.get_item_by_id.assert_called_once_with(mock.ANY, - fakes.ID_EC2_VPC_1) - - self.set_mock_db_items(fakes.DB_VPC_1) - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': 'invalid_cidr'}, - 'InvalidParameterValue') - self.assertEqual(0, self.db_api.get_item_by_id.call_count) - - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': '10.10.0.0/30'}, - 'InvalidSubnet.Range') - self.assertEqual(0, self.db_api.get_item_by_id.call_count) - - do_check({'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': '10.20.0.0/24'}, - 'InvalidSubnet.Range') - self.db_api.get_item_by_id.assert_called_once_with(mock.ANY, - fakes.ID_EC2_VPC_1) - - def test_create_subnet_overlapped(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.neutron.create_network.side_effect = ( - tools.get_neutron_create('network', fakes.ID_OS_NETWORK_1, - {'status': 'available'})) - self.neutron.create_subnet.side_effect = ( - tools.get_neutron_create('subnet', fakes.ID_OS_SUBNET_1)) - self.neutron.add_interface_router.side_effect = ( - neutron_exception.BadRequest()) - - self.assert_execution_error('InvalidSubnet.Conflict', 'CreateSubnet', - {'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1}) - - def test_create_subnet_overlimit(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.neutron.create_network.side_effect = ( - tools.get_neutron_create('network', fakes.ID_OS_NETWORK_1, - {'status': 'available'})) - self.neutron.create_subnet.side_effect = ( - tools.get_neutron_create('subnet', fakes.ID_OS_SUBNET_1)) - - def test_overlimit(func): - self.neutron.reset_mock() - saved_side_effect = func.side_effect - func.side_effect = neutron_exception.OverQuotaClient - - self.assert_execution_error('SubnetLimitExceeded', 'CreateSubnet', - {'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1}) - func.side_effect = saved_side_effect - - test_overlimit(self.neutron.create_network) - test_overlimit(self.neutron.create_subnet) - - @tools.screen_unexpected_exception_logs - def test_create_subnet_rollback(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_SUBNET_1)) - self.neutron.create_network.side_effect = ( - tools.get_neutron_create('network', fakes.ID_OS_NETWORK_1, - {'status': 'available'})) - self.neutron.create_subnet.side_effect = ( - tools.get_neutron_create('subnet', fakes.ID_OS_SUBNET_1)) - self.neutron.update_network.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'CreateSubnet', - {'VpcId': fakes.ID_EC2_VPC_1, - 'CidrBlock': fakes.CIDR_SUBNET_1}) - - self.neutron.assert_has_calls([ - mock.call.remove_interface_router( - fakes.ID_OS_ROUTER_1, {'subnet_id': fakes.ID_OS_SUBNET_1}), - mock.call.delete_subnet(fakes.ID_OS_SUBNET_1), - mock.call.delete_network(fakes.ID_OS_NETWORK_1)]) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_SUBNET_1) - - def test_delete_subnet(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_SUBNET_1) - self.neutron.show_subnet.return_value = ( - {'subnet': fakes.OS_SUBNET_1}) - - resp = self.execute('DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - - self.assertEqual(True, resp['return']) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, - fakes.ID_EC2_SUBNET_1) - self.neutron.remove_interface_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1, - {'subnet_id': fakes.ID_OS_SUBNET_1}) - self.neutron.delete_network.assert_called_once_with( - fakes.ID_OS_NETWORK_1) - self.assertTrue( - self.neutron.mock_calls.index( - mock.call.delete_network(fakes.ID_OS_NETWORK_1)) > - self.neutron.mock_calls.index( - mock.call.remove_interface_router( - fakes.ID_OS_ROUTER_1, - {'subnet_id': fakes.ID_OS_SUBNET_1}))) - self.vpn_gateway_api._stop_vpn_in_subnet.assert_called_once_with( - mock.ANY, self.neutron, mock.ANY, fakes.DB_SUBNET_1) - self.assertIsInstance( - self.vpn_gateway_api._stop_vpn_in_subnet.call_args[0][2], - common.OnCrashCleaner) - - def test_delete_subnet_inconsistent_os(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_SUBNET_1) - self.neutron.remove_interface_router.side_effect = ( - neutron_exception.NotFound()) - self.neutron.show_subnet.return_value = ( - {'subnet': fakes.OS_SUBNET_1}) - self.neutron.delete_network.side_effect = ( - neutron_exception.NetworkInUseClient()) - - resp = self.execute('DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - self.assertEqual(True, resp['return']) - - self.neutron.show_subnet.side_effect = neutron_exception.NotFound() - - resp = self.execute('DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - self.assertEqual(True, resp['return']) - - def test_delete_subnet_invalid_parameters(self): - self.set_mock_db_items() - self.neutron.show_subnet.return_value = fakes.OS_SUBNET_1 - self.neutron.show_network.return_value = fakes.OS_NETWORK_1 - - self.assert_execution_error('InvalidSubnetID.NotFound', 'DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - self.assertEqual(0, self.neutron.delete_network.call_count) - self.assertEqual(0, self.neutron.delete_subnet.call_count) - self.assertEqual(0, self.neutron.remove_interface_router.call_count) - - @mock.patch('ec2api.api.network_interface.describe_network_interfaces') - def test_delete_subnet_not_empty(self, describe_network_interfaces): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_SUBNET_1) - describe_network_interfaces.return_value = ( - {'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1]}) - self.assert_execution_error('DependencyViolation', 'DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - - @tools.screen_unexpected_exception_logs - def test_delete_subnet_rollback(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_SUBNET_1) - self.neutron.show_subnet.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'DeleteSubnet', - {'SubnetId': fakes.ID_EC2_SUBNET_1}) - - self.db_api.restore_item.assert_called_once_with( - mock.ANY, 'subnet', fakes.DB_SUBNET_1) - self.neutron.add_interface_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1, {'subnet_id': fakes.ID_OS_SUBNET_1}) - - def test_describe_subnets(self): - self.set_mock_db_items(fakes.DB_SUBNET_1, fakes.DB_SUBNET_2) - self.neutron.list_subnets.return_value = ( - {'subnets': [fakes.OS_SUBNET_1, fakes.OS_SUBNET_2]}) - self.neutron.list_networks.return_value = ( - {'networks': [fakes.OS_NETWORK_1, fakes.OS_NETWORK_2]}) - - resp = self.execute('DescribeSubnets', {}) - self.assertThat(resp['subnetSet'], - matchers.ListMatches([fakes.EC2_SUBNET_1, - fakes.EC2_SUBNET_2])) - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_SUBNET_2]) - resp = self.execute('DescribeSubnets', - {'SubnetId.1': fakes.ID_EC2_SUBNET_2}) - self.assertThat(resp['subnetSet'], - matchers.ListMatches([fakes.EC2_SUBNET_2])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_SUBNET_2])) - - self.check_filtering( - 'DescribeSubnets', 'subnetSet', - [ - # TODO(ft): declare a constant for the count in fakes - ('available-ip-address-count', 253), - ('cidr', fakes.CIDR_SUBNET_2), - ('cidrBlock', fakes.CIDR_SUBNET_2), - ('cidr-block', fakes.CIDR_SUBNET_2), - ('subnet-id', fakes.ID_EC2_SUBNET_2), - ('state', 'available'), - ('vpc-id', fakes.ID_EC2_VPC_1)]) - self.check_tag_support( - 'DescribeSubnets', 'subnetSet', - fakes.ID_EC2_SUBNET_2, 'subnetId') - - def test_describe_subnets_not_consistent_os_subnet(self): - self.set_mock_db_items(fakes.DB_SUBNET_1, fakes.DB_SUBNET_2) - self.neutron.list_subnets.return_value = ( - {'subnets': [fakes.OS_SUBNET_2]}) - self.neutron.list_networks.return_value = ( - {'networks': [fakes.OS_NETWORK_1]}) - - resp = self.execute('DescribeSubnets', {}) - self.assertEqual([], resp['subnetSet']) - - @mock.patch('ec2api.api.ec2utils.check_and_create_default_vpc') - def test_describe_subnets_no_default_vpc(self, check_and_create): - self.configure(disable_ec2_classic=True) - - def mock_check_and_create(context): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_SUBNET_DEFAULT) - self.neutron.list_subnets.return_value = ( - {'subnets': [fakes.OS_SUBNET_DEFAULT]}) - self.neutron.list_networks.return_value = ( - {'networks': [fakes.OS_NETWORK_DEFAULT]}) - check_and_create.side_effect = mock_check_and_create - - resp = self.execute('DescribeSubnets', {}) - self.assertEqual(resp['subnetSet'], [fakes.EC2_SUBNET_DEFAULT]) - - check_and_create.assert_called_once_with(mock.ANY) diff --git a/ec2api/tests/unit/test_tag.py b/ec2api/tests/unit/test_tag.py deleted file mode 100644 index 91d1fa11..00000000 --- a/ec2api/tests/unit/test_tag.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ec2api.api import ec2utils -from ec2api.api import tag as tag_api -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers - - -class TagTestCase(base.ApiTestCase): - - def test_create_tags(self): - self.db_api.get_item_by_id.return_value = {'id': 'fake'} - - # NOTE(ft): check create several tags for several resources - resp = self.execute('CreateTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'ResourceId.2': fakes.ID_EC2_SUBNET_1, - 'Tag.1.Key': 'private', - 'Tag.1.Value': '', - 'Tag.2.Key': 'admin', - 'Tag.2.Value': 'John Smith'}) - self.assertEqual({'return': True}, resp) - self.assertEqual(1, self.db_api.add_tags.call_count) - self.assertEqual(2, len(self.db_api.add_tags.call_args)) - self.assertThat(self.db_api.add_tags.call_args[0][1], - matchers.ListMatches( - [{'item_id': fakes.ID_EC2_VPC_1, - 'key': 'private', - 'value': ''}, - {'item_id': fakes.ID_EC2_SUBNET_1, - 'key': 'private', - 'value': ''}, - {'item_id': fakes.ID_EC2_VPC_1, - 'key': 'admin', - 'value': 'John Smith'}, - {'item_id': fakes.ID_EC2_SUBNET_1, - 'key': 'admin', - 'value': 'John Smith'}], - orderless_lists=True)) - - # NOTE(ft): check a tag can be created for all valid resource types - resource_ids = [fakes.random_ec2_id(r_t) - for r_t in ['dopt', 'ami', 'aki', 'ari', 'cgw', 'i', - 'igw', 'eni', 'rtb', 'snap', 'subnet', - 'sg', 'vgw', 'vol', 'vpc', 'vpn']] - self.assertEqual(len(resource_ids), len(tag_api.RESOURCE_TYPES)) - - params = {'ResourceId.%s' % num: r_id - for num, r_id in enumerate(resource_ids)} - params.update({'Tag.1.Key': 'tag', - 'Tag.1.Value': 'value'}) - resp = self.execute('CreateTags', params) - self.assertEqual({'return': True}, resp) - - # NOTE(ft): check create a tag for non-existing images - self.db_api.get_item_by_id.return_value = None - resp = self.execute('CreateTags', - {'ResourceId.1': fakes.ID_EC2_IMAGE_1, - 'ResourceId.2': fakes.ID_EC2_IMAGE_AKI_1, - 'ResourceId.3': fakes.ID_EC2_IMAGE_ARI_1, - 'Tag.1.Key': 'Oracle RAC node', - 'Tag.1.Value': ''}) - self.assertEqual({'return': True}, resp) - - def test_create_tags_invalid_parameters(self): - # NOTE(ft): check tag validity checks - self.assert_execution_error('InvalidParameterValue', 'CreateTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'Tag.1.Value': ''}) - - self.assert_execution_error('InvalidParameterValue', 'CreateTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'Tag.1.Key': ''}) - - self.assert_execution_error('InvalidParameterValue', 'CreateTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'Tag.1.Key': 'a' * 128}) - - self.assert_execution_error('InvalidParameterValue', 'CreateTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'Tag.1.Key': 'fake-key', - 'Tag.1.Value': 'a' * 256}) - - # NOTE(ft): check resource type check - self.assert_execution_error( - 'InvalidID', 'CreateTags', - {'ResourceId.1': fakes.random_ec2_id('fake'), - 'Tag.1.Key': 'fake-key', - 'Tag.1.Value': 'fake-value'}) - - # NOTE(ft): check resource existence check - self.db_api.get_item_by_id.return_value = None - for r_id in tag_api.RESOURCE_TYPES: - if r_id in ('ami', 'ari', 'aki'): - continue - exc_class = ec2utils.NOT_FOUND_EXCEPTION_MAP[r_id] - try: - error_code = exc_class.ec2_code - except AttributeError: - error_code = exc_class.__name__ - self.assert_execution_error( - error_code, 'CreateTags', - {'ResourceId.1': fakes.random_ec2_id(r_id), - 'Tag.1.Key': 'fake-key', - 'Tag.1.Value': 'fake-value'}) - - def test_delete_tag(self): - resp = self.execute('DeleteTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1, - 'ResourceId.2': fakes.ID_EC2_SUBNET_1, - 'Tag.1.Key': 'key1', - 'Tag.2.Value': 'value2', - 'Tag.3.Key': 'key3', - 'Tag.3.Value': 'value3'}) - self.assertEqual({'return': True}, resp) - self.db_api.delete_tags.assert_called_once_with( - mock.ANY, [fakes.ID_EC2_VPC_1, fakes.ID_EC2_SUBNET_1], - [{'key': 'key1'}, - {'value': 'value2'}, - {'key': 'key3', - 'value': 'value3'}]) - - resp = self.execute('DeleteTags', - {'ResourceId.1': fakes.ID_EC2_VPC_1}) - self.assertEqual({'return': True}, resp) - self.db_api.delete_tags.assert_called_with( - mock.ANY, [fakes.ID_EC2_VPC_1], None) - - def test_describe_tags(self): - self.db_api.get_tags.return_value = [{'item_id': fakes.ID_EC2_VPC_1, - 'key': 'key1', - 'value': ''}, - {'item_id': fakes.ID_EC2_VPC_2, - 'key': 'key2', - 'value': 'value2'}, - {'item_id': fakes.ID_EC2_VPC_2, - 'key': 'key1', - 'value': 'value3'} - ] - resp = self.execute('DescribeTags', {}) - self.assertThat(resp, - matchers.DictMatches( - {'tagSet': [{'resourceType': 'vpc', - 'resourceId': fakes.ID_EC2_VPC_1, - 'key': 'key1', - 'value': None}, - {'resourceType': 'vpc', - 'resourceId': fakes.ID_EC2_VPC_2, - 'key': 'key2', - 'value': 'value2'}, - {'resourceType': 'vpc', - 'resourceId': fakes.ID_EC2_VPC_2, - 'key': 'key1', - 'value': 'value3'} - ]}, - orderless_lists=True), - verbose=True) - - self.check_filtering( - 'DescribeTags', 'tagSet', - [('resource-type', 'vpc'), - ('resource-id', fakes.ID_EC2_VPC_1), - ('key', 'key1'), - ('value', 'value2')]) - - # NOTE(ft): check all resource types are displayed correctly - for r_id, r_type in [('dopt', 'dhcp-options'), - ('ami', 'image'), - ('aki', 'image'), - ('ari', 'image'), - ('cgw', 'customer-gateway'), - ('i', 'instance'), - ('igw', 'internet-gateway'), - ('eni', 'network-interface'), - ('rtb', 'route-table'), - ('snap', 'snapshot'), - ('subnet', 'subnet'), - ('sg', 'security-group'), - ('vgw', 'vpn-gateway'), - ('vol', 'volume'), - ('vpc', 'vpc'), - ('vpn', 'vpn-connection')]: - item_id = fakes.random_ec2_id(r_id) - self.db_api.get_tags.return_value = [{'item_id': item_id, - 'key': 'fake-key', - 'value': 'fake-value'}] - resp = self.execute('DescribeTags', {}) - self.assertEqual({'tagSet': [{'resourceType': r_type, - 'resourceId': item_id, - 'key': 'fake-key', - 'value': 'fake-value'}]}, - resp) diff --git a/ec2api/tests/unit/test_tools.py b/ec2api/tests/unit/test_tools.py deleted file mode 100644 index ebefe144..00000000 --- a/ec2api/tests/unit/test_tools.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fixtures -from oslo_log import log as logging -from oslotest import base as test_base -import testtools - -from ec2api import exception -from ec2api.tests.unit import base -from ec2api.tests.unit import tools - - -class TestToolsTestCase(testtools.TestCase): - - def test_update_dict(self): - d1 = {'a': 1, 'b': 2} - d2 = {'b': 22, 'c': 33} - res = tools.update_dict(d1, {}) - self.assertEqual({'a': 1, 'b': 2}, res) - res = tools.update_dict(d1, d2) - self.assertEqual({'a': 1, 'b': 22, 'c': 33}, res) - self.assertEqual({'a': 1, 'b': 2}, d1) - - def test_purge_dict(self): - d1 = {'a': 1, 'b': 2, 'c': 3} - res = tools.purge_dict(d1, ()) - self.assertEqual({'a': 1, 'b': 2, 'c': 3}, res) - res = tools.purge_dict(d1, ('b', 'c')) - self.assertEqual({'a': 1}, res) - self.assertEqual({'a': 1, 'b': 2, 'c': 3}, d1) - - def test_patch_dict(self): - d1 = {'a': 1, 'b': 2, 'c': 3} - d2 = {'c': 33, 'd': 44, 'e': 55} - res = tools.patch_dict(d1, d2, ('b', 'e')) - self.assertEqual({'a': 1, 'c': 33, 'd': 44}, res) - self.assertEqual({'a': 1, 'b': 2, 'c': 3}, d1) - - -class TestBaseTestCase(base.ApiTestCase): - - def test_validate_exception_format_is_enabled_for_tests(self): - with tools.ScreeningLogger(): - self.assertRaises(KeyError, exception.InvalidVpcRange, - fake='value') - with tools.ScreeningLogger(): - self.assertRaises(TypeError, exception.InvalidID, {'id': 'value'}) - - -class LoggingTestCase(test_base.BaseTestCase): - - def test_hide_logs(self): - with fixtures.FakeLogger() as logger: - with tools.ScreeningLogger(): - LOG = logging.getLogger('ec2api.api') - LOG.critical('critical message') - LOG.error('error message') - LOG.warning('warning message') - self.assertEqual(0, len(logger.output)) - - def test_screen_logs(self): - with fixtures.FakeLogger() as logger: - with tools.ScreeningLogger(log_name='ec2api.api'): - LOG1 = logging.getLogger('ec2api.api') - LOG1.error('error message') - LOG2 = logging.getLogger('ec2api.api.vpc') - LOG2.warning('warning message') - self.assertIn('warning message', logger.output) - self.assertNotIn('error message', logger.output) - - def test_show_logs_on_unhandled_exception(self): - with fixtures.FakeLogger() as logger: - try: - with tools.ScreeningLogger(): - LOG = logging.getLogger('ec2api.api') - LOG.error('error message') - raise Exception() - except Exception: - pass - self.assertIn('error message', logger.output) diff --git a/ec2api/tests/unit/test_volume.py b/ec2api/tests/unit/test_volume.py deleted file mode 100644 index 6a345494..00000000 --- a/ec2api/tests/unit/test_volume.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class VolumeTestCase(base.ApiTestCase): - - def test_describe_volumes(self): - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_1), - fakes.OSVolume(fakes.OS_VOLUME_2), - fakes.OSVolume(fakes.OS_VOLUME_3)] - self.nova_admin.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_1), - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - - self.set_mock_db_items(fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, - fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_3)) - - resp = self.execute('DescribeVolumes', {}) - self.assertThat(resp, matchers.DictMatches( - {'volumeSet': [fakes.EC2_VOLUME_1, fakes.EC2_VOLUME_2, - fakes.EC2_VOLUME_3]}, - orderless_lists=True)) - - self.db_api.get_items.assert_any_call(mock.ANY, 'vol') - - self.db_api.get_items_by_ids = tools.CopyingMock( - return_value=[fakes.DB_VOLUME_1]) - resp = self.execute('DescribeVolumes', - {'VolumeId.1': fakes.ID_EC2_VOLUME_1}) - self.assertThat(resp, matchers.DictMatches( - {'volumeSet': [fakes.EC2_VOLUME_1]}, - orderless_lists=True)) - self.db_api.get_items_by_ids.assert_any_call( - mock.ANY, set([fakes.ID_EC2_VOLUME_1])) - - self.check_filtering( - 'DescribeVolumes', 'volumeSet', - [('availability-zone', fakes.NAME_AVAILABILITY_ZONE), - ('create-time', fakes.TIME_CREATE_VOLUME_2), - ('encrypted', False), - # TODO(ft): declare a constant for the volume size in fakes - ('size', 1), - ('snapshot-id', fakes.ID_EC2_SNAPSHOT_1), - ('status', 'available'), - ('volume-id', fakes.ID_EC2_VOLUME_1), - # TODO(ft): support filtering by none/empty value - # ('volume-type', ''), - ('attachment.delete-on-termination', False), - ('attachment.device', fakes.ROOT_DEVICE_NAME_INSTANCE_2), - ('attachment.instance-id', fakes.ID_EC2_INSTANCE_2), - ('attachment.status', 'attached')]) - self.check_tag_support( - 'DescribeVolumes', 'volumeSet', - fakes.ID_EC2_VOLUME_1, 'volumeId') - - def test_describe_volumes_auto_remove(self): - self.cinder.volumes.list.return_value = [] - self.nova.servers.list.return_value = [] - self.set_mock_db_items(fakes.DB_VOLUME_1, fakes.DB_VOLUME_2) - resp = self.execute('DescribeVolumes', {}) - self.assertThat(resp, matchers.DictMatches( - {'volumeSet': []})) - - self.db_api.delete_item.assert_any_call( - mock.ANY, fakes.ID_EC2_VOLUME_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, fakes.ID_EC2_VOLUME_2) - - def test_describe_volumes_invalid_parameters(self): - self.cinder.volumes.list.return_value = [ - fakes.OSVolume(fakes.OS_VOLUME_1), - fakes.OSVolume(fakes.OS_VOLUME_2)] - self.nova.servers.list.return_value = [ - fakes.OSInstance_full(fakes.OS_INSTANCE_2)] - - self.assert_execution_error( - 'InvalidVolume.NotFound', 'DescribeVolumes', - {'VolumeId.1': fakes.random_ec2_id('vol')}) - - self.cinder.volumes.list.side_effect = lambda: [] - - self.assert_execution_error( - 'InvalidVolume.NotFound', 'DescribeVolumes', - {'VolumeId.1': fakes.ID_EC2_VOLUME_1}) - - def test_create_volume(self): - self.cinder.volumes.create.return_value = ( - fakes.OSVolume(fakes.OS_VOLUME_1)) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_1)) - - resp = self.execute( - 'CreateVolume', - {'AvailabilityZone': fakes.NAME_AVAILABILITY_ZONE}) - self.assertThat(fakes.EC2_VOLUME_1, matchers.DictMatches(resp)) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'vol', - tools.purge_dict(fakes.DB_VOLUME_1, ('id',))) - - self.cinder.volumes.create.assert_called_once_with( - None, snapshot_id=None, volume_type=None, - availability_zone=fakes.NAME_AVAILABILITY_ZONE) - - def test_create_volume_from_snapshot(self): - self.cinder.volumes.create.return_value = ( - fakes.OSVolume(fakes.OS_VOLUME_3)) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VOLUME_3)) - self.set_mock_db_items(fakes.DB_SNAPSHOT_1) - - resp = self.execute( - 'CreateVolume', - {'AvailabilityZone': fakes.NAME_AVAILABILITY_ZONE, - 'SnapshotId': fakes.ID_EC2_SNAPSHOT_1}) - self.assertThat(fakes.EC2_VOLUME_3, matchers.DictMatches(resp)) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'vol', - tools.purge_dict(fakes.DB_VOLUME_3, ('id',))) - - self.cinder.volumes.create.assert_called_once_with( - None, snapshot_id=fakes.ID_OS_SNAPSHOT_1, volume_type=None, - availability_zone=fakes.NAME_AVAILABILITY_ZONE) - - def test_delete_volume(self): - self.set_mock_db_items(fakes.DB_VOLUME_1) - resp = self.execute('DeleteVolume', - {'VolumeId': fakes.ID_EC2_VOLUME_1}) - self.assertEqual({'return': True}, resp) - self.cinder.volumes.delete.assert_called_once_with( - fakes.ID_OS_VOLUME_1) - self.assertFalse(self.db_api.delete_item.called) - - def test_format_volume_maps_status(self): - fake_volume = fakes.OSVolume(fakes.OS_VOLUME_1) - self.cinder.volumes.list.return_value = [fake_volume] - self.nova.servers.list.return_value = [] - self.set_mock_db_items(fakes.DB_VOLUME_1) - - fake_volume.status = 'creating' - resp = self.execute('DescribeVolumes', {}) - self.assertEqual('creating', resp['volumeSet'][0]['status']) - - fake_volume.status = 'attaching' - resp = self.execute('DescribeVolumes', {}) - self.assertEqual('in-use', resp['volumeSet'][0]['status']) - - fake_volume.status = 'detaching' - resp = self.execute('DescribeVolumes', {}) - self.assertEqual('in-use', resp['volumeSet'][0]['status']) - - fake_volume.status = 'banana' - resp = self.execute('DescribeVolumes', {}) - self.assertEqual('banana', resp['volumeSet'][0]['status']) - - def test_attach_volume(self): - self.set_mock_db_items(fakes.DB_INSTANCE_2, fakes.DB_VOLUME_3) - os_volume = fakes.OSVolume(fakes.OS_VOLUME_3) - os_volume.attachments.append({'device': '/dev/vdf', - 'server_id': fakes.ID_OS_INSTANCE_2}) - os_volume.status = 'attaching' - self.cinder.volumes.get.return_value = os_volume - - resp = self.execute('AttachVolume', - {'VolumeId': fakes.ID_EC2_VOLUME_3, - 'InstanceId': fakes.ID_EC2_INSTANCE_2, - 'Device': '/dev/vdf'}) - self.assertEqual({'deleteOnTermination': False, - 'device': '/dev/vdf', - 'instanceId': fakes.ID_EC2_INSTANCE_2, - 'status': 'attaching', - 'volumeId': fakes.ID_EC2_VOLUME_3}, - resp) - self.nova.volumes.create_server_volume.assert_called_once_with( - fakes.ID_OS_INSTANCE_2, fakes.ID_OS_VOLUME_3, '/dev/vdf') - - @mock.patch.object(fakes.OSVolume, 'get', autospec=True) - def test_detach_volume(self, os_volume_get): - self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2, - fakes.DB_VOLUME_2) - os_volume = fakes.OSVolume(fakes.OS_VOLUME_2) - self.cinder.volumes.get.return_value = os_volume - os_volume_get.side_effect = ( - lambda vol: setattr(vol, 'status', 'detaching')) - - resp = self.execute('DetachVolume', - {'VolumeId': fakes.ID_EC2_VOLUME_2}) - self.assertEqual({'device': os_volume.attachments[0]['device'], - 'instanceId': fakes.ID_EC2_INSTANCE_2, - 'status': 'detaching', - 'volumeId': fakes.ID_EC2_VOLUME_2}, - resp) - self.nova.volumes.delete_server_volume.assert_called_once_with( - fakes.ID_OS_INSTANCE_2, fakes.ID_OS_VOLUME_2) - self.cinder.volumes.get.assert_called_once_with(fakes.ID_OS_VOLUME_2) - - def test_detach_volume_invalid_parameters(self): - self.set_mock_db_items(fakes.DB_VOLUME_1) - self.cinder.volumes.get.return_value = ( - fakes.OSVolume(fakes.OS_VOLUME_1)) - - self.assert_execution_error('IncorrectState', 'DetachVolume', - {'VolumeId': fakes.ID_EC2_VOLUME_1}) diff --git a/ec2api/tests/unit/test_vpc.py b/ec2api/tests/unit/test_vpc.py deleted file mode 100644 index a7756d60..00000000 --- a/ec2api/tests/unit/test_vpc.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -from unittest import mock - -import fixtures -from neutronclient.common import exceptions as neutron_exception - -from ec2api.api import vpc as vpc_api -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class VpcTestCase(base.ApiTestCase): - - @mock.patch('ec2api.api.vpc._create_vpc') - def test_create_vpc(self, create_vpc): - create_vpc.return_value = fakes.DB_VPC_1 - - def check_response(response): - self.assertIn('vpc', response) - vpc = resp['vpc'] - self.assertThat(fakes.EC2_VPC_1, matchers.DictMatches(vpc)) - create_vpc.assert_called_once_with(mock.ANY, fakes.CIDR_VPC_1) - - create_vpc.reset_mock() - - resp = self.execute('CreateVpc', {'CidrBlock': fakes.CIDR_VPC_1}) - check_response(resp) - - resp = self.execute('CreateVpc', {'CidrBlock': fakes.CIDR_VPC_1, - 'instanceTenancy': 'default'}) - check_response(resp) - - def test_create_vpc_invalid_cidr(self): - self.neutron.create_router.side_effect = ( - tools.get_neutron_create('router', fakes.ID_OS_ROUTER_1)) - self.db_api.add_item.side_effect = tools.get_db_api_add_item( - fakes.ID_EC2_VPC_1) - - def do_check(args, error_code): - self.assert_execution_error(error_code, 'CreateVpc', args) - self.assertEqual(0, self.neutron.create_router.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - do_check({'CidrBlock': 'bad_cidr'}, 'InvalidParameterValue') - do_check({'CidrBlock': '10.0.0.0/8'}, 'InvalidVpc.Range') - - def test_create_vpc_overlimit(self): - self.neutron.create_router.side_effect = ( - neutron_exception.OverQuotaClient) - self.db_api.add_item.side_effect = tools.get_db_api_add_item( - fakes.ID_EC2_VPC_1) - - self.assert_execution_error('VpcLimitExceeded', 'CreateVpc', - {'CidrBlock': fakes.CIDR_VPC_1}) - self.neutron.create_router.assert_called_with({'router': {}}) - self.assertEqual(0, self.db_api.add_item.call_count) - - def test_delete_vpc(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1, - fakes.DB_SECURITY_GROUP_1) - - resp = self.execute('DeleteVpc', {'VpcId': fakes.ID_EC2_VPC_1}) - - self.assertEqual(True, resp['return']) - self.neutron.delete_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, - fakes.ID_EC2_VPC_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, - fakes.ID_EC2_ROUTE_TABLE_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, - fakes.ID_EC2_SECURITY_GROUP_1) - - def test_delete_vpc_not_found(self): - self.set_mock_db_items() - - self.assert_execution_error('InvalidVpcID.NotFound', 'DeleteVpc', - {'VpcId': fakes.ID_EC2_VPC_1}) - self.assertEqual(0, self.neutron.delete_router.call_count) - self.assertEqual(0, self.db_api.delete_item.call_count) - - def test_delete_vpc_dependency_violation(self): - def do_check(): - self.assert_execution_error('DependencyViolation', 'DeleteVpc', - {'VpcId': fakes.ID_EC2_VPC_1}) - self.assertEqual(0, self.neutron.delete_router.call_count) - self.assertEqual(0, self.db_api.delete_item.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_IGW_1, fakes.DB_VPC_1, ) - do_check() - - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_VPC_1) - do_check() - - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_SECURITY_GROUP_2, fakes.DB_VPC_1) - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1), - fakes.OS_SECURITY_GROUP_2]}) - do_check() - - self.neutron.list_security_groups.return_value = ( - {'security_groups': [copy.deepcopy(fakes.OS_SECURITY_GROUP_1)]}) - self.set_mock_db_items(fakes.DB_SECURITY_GROUP_1, - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPC_1, ) - do_check() - - def test_delete_vpc_not_conststent_os_vpc(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - - def check_response(resp): - self.assertEqual(True, resp['return']) - self.neutron.delete_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, - fakes.ID_EC2_VPC_1) - self.db_api.delete_item.assert_any_call( - mock.ANY, - fakes.ID_EC2_ROUTE_TABLE_1) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.neutron.delete_router.side_effect = neutron_exception.NotFound - resp = self.execute('DeleteVpc', {'VpcId': fakes.ID_EC2_VPC_1}) - check_response(resp) - - self.neutron.delete_router.side_effect = neutron_exception.Conflict - resp = self.execute('DeleteVpc', {'VpcId': fakes.ID_EC2_VPC_1}) - check_response(resp) - - @tools.screen_unexpected_exception_logs - def test_delete_vpc_rollback(self): - self.set_mock_db_items(fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.neutron.delete_router.side_effect = Exception() - - self.assert_execution_error(self.ANY_EXECUTE_ERROR, 'DeleteVpc', - {'VpcId': fakes.ID_EC2_VPC_1}) - - self.db_api.restore_item.assert_any_call( - mock.ANY, 'vpc', fakes.DB_VPC_1) - self.db_api.restore_item.assert_any_call( - mock.ANY, 'rtb', fakes.DB_ROUTE_TABLE_1) - - def test_describe_vpcs(self): - self.neutron.list_routers.return_value = ( - {'routers': [fakes.OS_ROUTER_DEFAULT, - fakes.OS_ROUTER_1, fakes.OS_ROUTER_2]}) - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, - fakes.DB_VPC_1, fakes.DB_VPC_2) - - resp = self.execute('DescribeVpcs', {}) - self.assertThat(resp['vpcSet'], - matchers.ListMatches([fakes.EC2_VPC_DEFAULT, - fakes.EC2_VPC_1, - fakes.EC2_VPC_2])) - - resp = self.execute('DescribeVpcs', - {'VpcId.1': fakes.ID_EC2_VPC_1}) - self.assertThat(resp['vpcSet'], - matchers.ListMatches([fakes.EC2_VPC_1])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_VPC_1])) - - self.check_filtering( - 'DescribeVpcs', 'vpcSet', - [('cidr', fakes.CIDR_VPC_1), - ('dhcp-options-id', 'default'), - ('is-default', False), - ('state', 'available'), - ('vpc-id', fakes.ID_EC2_VPC_1)]) - self.check_tag_support( - 'DescribeVpcs', 'vpcSet', - fakes.ID_EC2_VPC_1, 'vpcId') - - def test_describe_vpcs_no_router(self): - self.neutron.list_routers.return_value = {'routers': []} - self.set_mock_db_items(fakes.DB_VPC_DEFAULT, fakes.DB_VPC_1) - - resp = self.execute('DescribeVpcs', {}) - - self.assertThat(resp['vpcSet'], - matchers.ListMatches([fakes.EC2_VPC_DEFAULT, - fakes.EC2_VPC_1])) - - @mock.patch('ec2api.api.vpc._check_and_create_default_vpc') - def test_describe_vpcs_no_default_vpc(self, check_and_create): - def mock_check_and_create(context): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT) - check_and_create.side_effect = mock_check_and_create - - resp = self.execute('DescribeVpcs', {}) - self.assertEqual(resp['vpcSet'], [fakes.EC2_VPC_DEFAULT]) - - check_and_create.assert_called_once_with(mock.ANY) - - def test_describe_vpcs_with_default_vpc(self): - self.set_mock_db_items(fakes.DB_VPC_DEFAULT) - - resp = self.execute('DescribeVpcs', {}) - self.assertEqual(resp['vpcSet'], [fakes.EC2_VPC_DEFAULT]) - - self.db_api.add_item.assert_not_called() - - -class VpcPrivateTestCase(base.BaseTestCase): - - def setUp(self): - super(VpcPrivateTestCase, self).setUp() - self.context = base.create_context() - self.nova, self.nova_admin = self.mock_nova() - self.neutron = self.mock_neutron() - self.db_api = self.mock_db() - - @mock.patch('ec2api.api.route_table.create_route') - @mock.patch('ec2api.api.subnet.create_subnet') - @mock.patch('ec2api.api.internet_gateway.attach_internet_gateway') - @mock.patch('ec2api.api.internet_gateway.create_internet_gateway') - @mock.patch('ec2api.api.security_group._create_default_security_group') - @mock.patch('ec2api.api.route_table._create_route_table') - def test_create_vpc(self, create_route_table, - create_default_security_group, - create_internet_gateway, attach_internet_gateway, - create_subnet, create_route): - def _prepare_and_check(vpc=None, ec2_vpc=None, - route_table=None): - self.neutron.create_router.side_effect = ( - tools.get_neutron_create('router', vpc['os_id'])) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item({'vpc': vpc['id']})) - self.db_api.set_mock_items(vpc) - create_route_table.return_value = route_table - - resp = vpc_api._create_vpc(self.context, vpc['cidr_block'], - vpc['is_default']) - - # Check creation of vpc - self.neutron.create_router.assert_called_with({'router': {}}) - self.neutron.update_router.assert_called_once_with( - vpc['os_id'], - {'router': {'name': ec2_vpc['vpcId']}}) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'vpc', tools.purge_dict( - vpc, ('id', 'vpc_id', 'route_table_id'))) - self.db_api.update_item.assert_called_once_with( - mock.ANY, vpc) - - create_route_table.assert_called_once_with( - mock.ANY, vpc) - create_default_security_group.assert_called_once_with( - mock.ANY, vpc) - - _prepare_and_check(vpc=fakes.DB_VPC_1, ec2_vpc=fakes.EC2_VPC_1, - route_table=fakes.DB_ROUTE_TABLE_1) - - # checking that no default vpc related stuff is added - create_internet_gateway.assert_not_called() - attach_internet_gateway.assert_not_called() - create_subnet.assert_not_called() - create_route.assert_not_called() - - self.neutron.reset_mock() - self.db_api.reset_mock() - create_route_table.reset_mock() - create_default_security_group.reset_mock() - - # Creation of default vpc - create_route_table.return_value = fakes.DB_ROUTE_TABLE_DEFAULT - create_subnet.return_value = {'subnet': fakes.EC2_SUBNET_DEFAULT} - create_internet_gateway.return_value = {'internetGateway': - fakes.EC2_IGW_DEFAULT} - - _prepare_and_check(vpc=fakes.DB_VPC_DEFAULT, - ec2_vpc=fakes.EC2_VPC_DEFAULT, - route_table=fakes.DB_ROUTE_TABLE_DEFAULT) - - create_internet_gateway.assert_called_once_with(mock.ANY) - attach_internet_gateway.assert_called_once_with(mock.ANY, - fakes.ID_EC2_IGW_DEFAULT, - fakes.ID_EC2_VPC_DEFAULT) - create_subnet.assert_called_once_with(mock.ANY, - fakes.ID_EC2_VPC_DEFAULT, - fakes.CIDR_SUBNET_DEFAULT) - create_route.assert_called_once_with(mock.ANY, - fakes.ID_EC2_ROUTE_TABLE_DEFAULT, - '0.0.0.0/0', gateway_id=fakes.ID_EC2_IGW_DEFAULT) - - @mock.patch('ec2api.api.vpc._create_vpc') - def test_check_and_create_default_vpc(self, create_vpc): - self.configure(disable_ec2_classic=True) - vpc_api._check_and_create_default_vpc(self.context) - - create_vpc.assert_called_once_with(mock.ANY, fakes.CIDR_VPC_DEFAULT, - is_default=True) - - @tools.screen_logs('ec2api.api.vpc') - @mock.patch('ec2api.api.internet_gateway.detach_internet_gateway') - @mock.patch('ec2api.api.route_table.create_route') - @mock.patch('ec2api.api.subnet.create_subnet') - @mock.patch('ec2api.api.internet_gateway.attach_internet_gateway') - @mock.patch('ec2api.api.internet_gateway.create_internet_gateway') - @mock.patch('ec2api.api.security_group._create_default_security_group') - @mock.patch('ec2api.api.route_table._create_route_table') - def test_create_vpc_rollback(self, create_route_table, - create_default_security_group, - create_internet_gateway, - attach_internet_gateway, create_subnet, - create_route, detach_internet_gateway): - self.configure(disable_ec2_classic=True) - - self.neutron.create_router.side_effect = ( - tools.get_neutron_create('router', fakes.ID_OS_ROUTER_DEFAULT)) - - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item({'vpc': fakes.ID_EC2_VPC_DEFAULT})) - - DB_IGW_DEFAULT_DETACHED = ( - {'id': fakes.ID_EC2_IGW_DEFAULT, - 'os_id': None, - 'vpc_id': None}) - self.db_api.get_item_by_id.side_effect = ( - tools.get_db_api_get_item_by_id(fakes.DB_VPC_DEFAULT, - fakes.DB_SUBNET_DEFAULT, - fakes.DB_SECURITY_GROUP_DEFAULT, - DB_IGW_DEFAULT_DETACHED)) - create_route_table.return_value = fakes.DB_ROUTE_TABLE_DEFAULT - create_internet_gateway.return_value = {'internetGateway': - fakes.EC2_IGW_DEFAULT} - create_subnet.return_value = {'subnet': fakes.EC2_SUBNET_DEFAULT} - create_default_security_group.return_value = ( - fakes.ID_EC2_SECURITY_GROUP_DEFAULT) - - # exception during attaching internet gateway - create_route.side_effect = Exception() - - vpc_api._check_and_create_default_vpc(self.context) - - detach_internet_gateway.assert_any_call(mock.ANY, - fakes.ID_EC2_IGW_DEFAULT, - fakes.ID_EC2_VPC_DEFAULT) - self.db_api.delete_item.assert_any_call(mock.ANY, - fakes.ID_EC2_SUBNET_DEFAULT) - self.db_api.delete_item.assert_any_call(mock.ANY, - fakes.ID_EC2_IGW_DEFAULT) - self.neutron.delete_security_group.assert_any_call( - fakes.ID_OS_SECURITY_GROUP_DEFAULT) - self.db_api.delete_item.assert_any_call(mock.ANY, - fakes.ID_EC2_ROUTE_TABLE_DEFAULT) - self.db_api.delete_item.assert_any_call(mock.ANY, - fakes.ID_EC2_VPC_DEFAULT) - self.neutron.delete_router.assert_called_once_with( - fakes.ID_OS_ROUTER_DEFAULT) - - @mock.patch('ec2api.api.vpc._create_vpc') - def test_check_and_create_default_vpc_failed(self, create_vpc): - self.configure(disable_ec2_classic=True) - create_vpc.side_effect = Exception() - with fixtures.LoggerFixture( - format='[%(levelname)s] %(message)s') as log: - vpc_api._check_and_create_default_vpc(self.context) - self.assertTrue(log.output.startswith( - '[ERROR] Failed to create default vpc')) diff --git a/ec2api/tests/unit/test_vpn_connection.py b/ec2api/tests/unit/test_vpn_connection.py deleted file mode 100644 index 1363ce05..00000000 --- a/ec2api/tests/unit/test_vpn_connection.py +++ /dev/null @@ -1,832 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from unittest import mock - -from neutronclient.common import exceptions as neutron_exception - -from ec2api.api import common -from ec2api.api import vpn_connection as vpn_connection_api -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class VpnConnectionTestCase(base.ApiTestCase): - - @mock.patch('ec2api.api.vpn_connection.describe_vpn_connections') - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections', - wraps=vpn_connection_api._reset_vpn_connections) - @mock.patch('random.choice') - def test_create_vpn_connection(self, random_choice, reset_vpn_connections, - describe_vpn_connections): - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2, - fakes.DB_CUSTOMER_GATEWAY_1, fakes.DB_CUSTOMER_GATEWAY_2, - fakes.DB_VPC_1) - self.neutron.create_ikepolicy.side_effect = ( - tools.get_neutron_create('ikepolicy', fakes.ID_OS_IKEPOLICY_1)) - self.neutron.create_ipsecpolicy.side_effect = ( - tools.get_neutron_create('ipsecpolicy', fakes.ID_OS_IPSECPOLICY_1)) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VPN_CONNECTION_1)) - random_choice.side_effect = iter(fakes.PRE_SHARED_KEY_1) - describe_vpn_connections.return_value = { - 'vpnConnectionSet': [fakes.EC2_VPN_CONNECTION_1]} - - resp = self.execute( - 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - self.assertThat( - resp, - matchers.DictMatches( - {'vpnConnection': fakes.EC2_VPN_CONNECTION_1})) - - self.neutron.create_ikepolicy.assert_called_once_with( - {'ikepolicy': tools.purge_dict(fakes.OS_IKEPOLICY_1, ('id',))}) - self.neutron.create_ipsecpolicy.assert_called_once_with( - {'ipsecpolicy': tools.purge_dict(fakes.OS_IPSECPOLICY_1, ('id',))}) - random_choice.assert_called_with(vpn_connection_api.SHARED_KEY_CHARS) - new_vpn_connection_1 = tools.update_dict( - fakes.DB_VPN_CONNECTION_1, {'cidrs': [], - 'os_ipsec_site_connections': {}}) - self.db_api.add_item.assert_called_once_with( - mock.ANY, 'vpn', - tools.purge_dict(new_vpn_connection_1, ('id', 'vpc_id', 'os_id'))) - self.neutron.update_ikepolicy.assert_called_once_with( - fakes.ID_OS_IKEPOLICY_1, - {'ikepolicy': {'name': fakes.ID_EC2_VPN_CONNECTION_1}}) - self.neutron.update_ipsecpolicy.assert_called_once_with( - fakes.ID_OS_IPSECPOLICY_1, - {'ipsecpolicy': {'name': fakes.ID_EC2_VPN_CONNECTION_1}}) - reset_vpn_connections.assert_called_once_with( - mock.ANY, self.neutron, mock.ANY, fakes.DB_VPN_GATEWAY_1, - vpn_connections=[new_vpn_connection_1]) - self.assertIsInstance(reset_vpn_connections.call_args[0][2], - common.OnCrashCleaner) - describe_vpn_connections.assert_called_once_with( - mock.ANY, vpn_connection_id=[fakes.ID_EC2_VPN_CONNECTION_1]) - - @mock.patch('ec2api.api.vpn_connection.describe_vpn_connections') - def test_create_vpn_connection_idempotent(self, describe_vpn_connections): - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_1, fakes.DB_CUSTOMER_GATEWAY_1, - fakes.DB_VPN_CONNECTION_1) - describe_vpn_connections.return_value = { - 'vpnConnectionSet': [fakes.EC2_VPN_CONNECTION_1]} - - resp = self.execute( - 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - self.assertThat({'vpnConnection': fakes.EC2_VPN_CONNECTION_1}, - matchers.DictMatches(resp)) - self.assertFalse(self.neutron.create_ikepolicy.called) - self.assertFalse(self.neutron.create_ipsecpolicy.called) - self.assertFalse(self.db_api.add_item.called) - describe_vpn_connections.assert_called_once_with( - mock.ANY, vpn_connection_id=[fakes.ID_EC2_VPN_CONNECTION_1]) - - def test_create_vpn_connection_invalid_parameters(self): - self.assert_execution_error( - 'Unsupported', 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'False'}) - - self.assert_execution_error( - 'Unsupported', 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1'}) - - self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1) - self.assert_execution_error( - 'InvalidVpnGatewayID.NotFound', 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1) - self.assert_execution_error( - 'InvalidCustomerGatewayID.NotFound', 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_2, fakes.DB_CUSTOMER_GATEWAY_1, - fakes.DB_VPN_CONNECTION_1) - self.assert_execution_error( - 'InvalidCustomerGateway.DuplicateIpAddress', 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - - @tools.screen_unexpected_exception_logs - def test_create_vpn_connection_rollback(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, - fakes.DB_CUSTOMER_GATEWAY_1) - self.neutron.create_ikepolicy.side_effect = ( - tools.get_neutron_create('ikepolicy', fakes.ID_OS_IKEPOLICY_1)) - self.neutron.create_ipsecpolicy.side_effect = ( - tools.get_neutron_create('ipsecpolicy', fakes.ID_OS_IPSECPOLICY_1)) - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VPN_CONNECTION_1)) - self.neutron.update_ikepolicy.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'CreateVpnConnection', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1, - 'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1, - 'Type': 'ipsec.1', - 'Options.StaticRoutesOnly': 'True'}) - - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPN_CONNECTION_1) - self.neutron.delete_ipsecpolicy.assert_called_once_with( - fakes.ID_OS_IPSECPOLICY_1) - self.neutron.delete_ikepolicy.assert_called_once_with( - fakes.ID_OS_IKEPOLICY_1) - - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections', - wraps=vpn_connection_api._reset_vpn_connections) - def test_create_vpn_connection_route(self, reset_vpn_connections): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2, - fakes.DB_VPN_GATEWAY_2) - - resp = self.execute( - 'CreateVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': '192.168.123.0/24'}) - self.assertEqual({'return': True}, resp) - - vpn = copy.deepcopy(fakes.DB_VPN_CONNECTION_2) - vpn['cidrs'].append('192.168.123.0/24') - self.db_api.update_item.assert_called_once_with(mock.ANY, vpn) - reset_vpn_connections.assert_called_once_with( - mock.ANY, self.neutron, mock.ANY, fakes.DB_VPN_GATEWAY_2, - vpn_connections=[vpn]) - - def test_create_vpn_connection_route_idempotent(self): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2) - - resp = self.execute( - 'CreateVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1}) - self.assertEqual({'return': True}, resp) - self.assertFalse(self.db_api.update_item.called) - - def test_create_vpn_connection_route_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidVpnConnectionID.NotFound', 'CreateVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1}) - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections') - def test_create_vpn_connection_route_rollback(self, reset_vpn_connections): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2, - fakes.DB_VPN_GATEWAY_2) - reset_vpn_connections.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'CreateVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': '192.168.123.0/24'}) - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_VPN_CONNECTION_2) - - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections', - wraps=vpn_connection_api._reset_vpn_connections) - def test_delete_vpn_connection_route(self, reset_vpn_connections): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2, - fakes.DB_VPN_GATEWAY_2) - - resp = self.execute( - 'DeleteVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1}) - self.assertEqual({'return': True}, resp) - vpn = tools.update_dict(fakes.DB_VPN_CONNECTION_2, - {'cidrs': [fakes.CIDR_VPN_2_PROPAGATED_2]}) - self.db_api.update_item.assert_called_once_with(mock.ANY, vpn) - reset_vpn_connections.assert_called_once_with( - mock.ANY, self.neutron, mock.ANY, fakes.DB_VPN_GATEWAY_2, - vpn_connections=[vpn]) - - def test_delete_vpn_connection_route_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidVpnConnectionID.NotFound', 'DeleteVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1}) - - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2) - self.assert_execution_error( - 'InvalidRoute.NotFound', 'DeleteVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': '192.168.123.0/24'}) - - @tools.screen_unexpected_exception_logs - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections') - def test_delete_vpn_connection_route_rollback(self, reset_vpn_connections): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_2, - fakes.DB_VPN_GATEWAY_2) - reset_vpn_connections.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DeleteVpnConnectionRoute', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_2, - 'DestinationCidrBlock': fakes.CIDR_VPN_2_PROPAGATED_1}) - self.assert_any_call(self.db_api.update_item, - mock.ANY, fakes.DB_VPN_CONNECTION_2) - - def test_delete_vpn_connection(self): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1) - resp = self.execute('DeleteVpnConnection', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_1}) - self.assertEqual({'return': True}, resp) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPN_CONNECTION_1) - self.neutron.delete_ipsec_site_connection.assert_called_once_with( - fakes.ID_OS_IPSEC_SITE_CONNECTION_2) - self.neutron.delete_ipsecpolicy.assert_called_once_with( - fakes.ID_OS_IPSECPOLICY_1) - self.neutron.delete_ikepolicy.assert_called_once_with( - fakes.ID_OS_IKEPOLICY_1) - - def test_delete_vpn_connection_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidVpnConnectionID.NotFound', 'DeleteVpnConnection', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_1}) - - @tools.screen_unexpected_exception_logs - def test_delete_vpn_connection_rollback(self): - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1) - self.neutron.delete_ikepolicy.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DeleteVpnConnection', - {'VpnConnectionId': fakes.ID_EC2_VPN_CONNECTION_1}) - - self.db_api.restore_item.assert_called_once_with( - mock.ANY, 'vpn', fakes.DB_VPN_CONNECTION_1) - self.assertFalse(self.neutron.create_ipsec_site_connection.called) - self.assertFalse(self.neutron.create_ipsecpolicy.called) - self.assertFalse(self.neutron.create_ikepolicy.called) - - def test_describe_vpn_connections(self): - self.set_mock_db_items( - fakes.DB_VPN_CONNECTION_1, fakes.DB_VPN_CONNECTION_2, - fakes.DB_CUSTOMER_GATEWAY_1, fakes.DB_CUSTOMER_GATEWAY_2, - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2, - fakes.DB_VPC_1, fakes.DB_VPC_2) - self.neutron.list_ikepolicies.return_value = { - 'ikepolicies': [fakes.OS_IKEPOLICY_1, fakes.OS_IKEPOLICY_2]} - self.neutron.list_ipsecpolicies.return_value = { - 'ipsecpolicies': [fakes.OS_IPSECPOLICY_1, fakes.OS_IPSECPOLICY_2]} - self.neutron.list_ipsec_site_connections.return_value = { - 'ipsec_site_connections': []} - self.neutron.list_routers.return_value = { - 'routers': [fakes.OS_ROUTER_1, fakes.OS_ROUTER_2]} - - resp = self.execute('DescribeVpnConnections', {}) - vpns = [tools.update_dict( - vpn, {'customerGatewayConfiguration': 'DONTCARE'}) - for vpn in (fakes.EC2_VPN_CONNECTION_1, - fakes.EC2_VPN_CONNECTION_2)] - self.assertThat( - resp, - matchers.DictMatches( - {'vpnConnectionSet': vpns}, - orderless_lists=True)) - for vpn in (fakes.EC2_VPN_CONNECTION_1, fakes.EC2_VPN_CONNECTION_2): - config = next(v['customerGatewayConfiguration'] - for v in resp['vpnConnectionSet'] - if v['vpnConnectionId'] == vpn['vpnConnectionId']) - self.assertThat( - config.encode(), - matchers.XMLMatches( - vpn['customerGatewayConfiguration'].encode(), - orderless_sequence=True)) - self.assertTrue(config.startswith( - '')) - self.neutron.list_ikepolicies.assert_called_once_with( - tenant_id=fakes.ID_OS_PROJECT) - self.neutron.list_ipsecpolicies.assert_called_once_with( - tenant_id=fakes.ID_OS_PROJECT) - self.neutron.list_ipsec_site_connections.assert_called_once_with( - tenant_id=fakes.ID_OS_PROJECT) - self.neutron.list_routers.assert_called_once_with( - tenant_id=fakes.ID_OS_PROJECT) - - resp = self.execute( - 'DescribeVpnConnections', - {'VpnConnectionId.1': fakes.ID_EC2_VPN_CONNECTION_1}) - self.assertThat( - resp, - matchers.DictMatches( - {'vpnConnectionSet': [vpns[0]]}, - orderless_lists=True)) - - self.check_filtering( - 'DescribeVpnConnections', 'vpnConnectionSet', - [('customer-gateway-configuration', - '*' + fakes.PRE_SHARED_KEY_1 + '*'), - ('customer-gateway-id', fakes.ID_EC2_CUSTOMER_GATEWAY_1), - ('state', 'available'), - ('option.static-routes-only', True), - ('route.destination-cidr-block', fakes.CIDR_VPN_2_PROPAGATED_1), - ('type', 'ipsec.1'), - ('vpn-connection-id', fakes.ID_EC2_VPN_CONNECTION_1), - ('vpn-gateway-id', fakes.ID_EC2_VPN_GATEWAY_1)]) - - self.check_tag_support( - 'DescribeVpnConnections', 'vpnConnectionSet', - fakes.ID_EC2_VPN_CONNECTION_1, 'vpnConnectionId') - - def test_format_vpn_connection(self): - db_vpn_connection_1 = tools.update_dict(fakes.DB_VPN_CONNECTION_1, - {'cidrs': []}) - ec2_vpn_connection_1 = tools.patch_dict( - fakes.EC2_VPN_CONNECTION_1, - {'routes': [], 'vgwTelemetry': []}, - ('customerGatewayConfiguration',)) - formatted = vpn_connection_api._format_vpn_connection( - db_vpn_connection_1, - {fakes.ID_EC2_CUSTOMER_GATEWAY_1: fakes.DB_CUSTOMER_GATEWAY_1}, - {}, {}, {}, {}) - formatted.pop('customerGatewayConfiguration') - self.assertThat(ec2_vpn_connection_1, matchers.DictMatches(formatted)) - - def test_format_customer_config(self): - ikepolicy = { - 'auth_algorithm': 'sha1-fake', - 'encryption_algorithm': '3des', - 'lifetime': {'value': 1111}, - 'pfs': 'group5', - 'phase1_negotiation_mode': 'main-fake', - } - ipsecpolicy = { - 'transform_protocol': 'ah-esp', - 'auth_algorithm': 'sha1-fake', - 'encryption_algorithm': 'aes-256', - 'lifetime': {'value': 2222}, - 'pfs': 'group14', - 'encapsulation_mode': 'transport', - } - ipsec_site_connection = { - 'peer_address': '1.2.3.4', - 'psk': 'password', - 'mtu': 1400, - } - conf = vpn_connection_api._format_customer_config( - fakes.DB_VPN_CONNECTION_1, - {fakes.ID_EC2_CUSTOMER_GATEWAY_1: fakes.DB_CUSTOMER_GATEWAY_1}, - {fakes.ID_OS_IKEPOLICY_1: ikepolicy}, - {fakes.ID_OS_IPSECPOLICY_1: ipsecpolicy}, - {fakes.ID_OS_IPSEC_SITE_CONNECTION_2: ipsec_site_connection}, - {fakes.ID_EC2_VPN_GATEWAY_1: '5.6.7.8'}) - - self.assertThat( - {'ipsec_tunnel': { - 'customer_gateway': { - 'tunnel_outside_address': {'ip_address': '1.2.3.4'}}, - 'vpn_gateway': { - 'tunnel_outside_address': {'ip_address': '5.6.7.8'}}, - 'ike': {'authentication_protocol': 'sha1-fake', - 'encryption_protocol': '3des', - 'lifetime': 1111, - 'perfect_forward_secrecy': 'group5', - 'mode': 'main-fake', - 'pre_shared_key': 'password'}, - 'ipsec': {'protocol': 'ah-esp', - 'authentication_protocol': 'sha1-fake', - 'encryption_protocol': 'aes-256', - 'lifetime': 2222, - 'perfect_forward_secrecy': 'group14', - 'mode': 'transport', - 'tcp_mss_adjustment': 1400 - 40}}}, - matchers.IsSubDictOf(conf)) - - def test_stop_vpn_connection(self): - # delete several connections - os_conn_ids = [fakes.random_os_id() for _x in range(3)] - fake_conn = { - 'os_ipsec_site_connections': { - fakes.random_ec2_id('subnet'): conn_id - for conn_id in os_conn_ids}} - vpn_connection_api._stop_vpn_connection(self.neutron, fake_conn) - self.assertEqual( - 3, self.neutron.delete_ipsec_site_connection.call_count) - for conn_id in os_conn_ids: - self.neutron.delete_ipsec_site_connection.assert_any_call(conn_id) - - # delete several connections with exception suppressing - self.neutron.reset_mock() - self.neutron.delete_ipsec_site_connection.side_effect = [ - None, neutron_exception.NotFound(), None] - vpn_connection_api._stop_vpn_connection(self.neutron, fake_conn) - self.assertEqual( - 3, self.neutron.delete_ipsec_site_connection.call_count) - - @mock.patch('ec2api.api.vpn_connection._stop_vpn_connection', - new_callable=tools.CopyingMock) - def test_stop_gateway_vpn_connections(self, stop_vpn_connection): - context = base.create_context() - cleaner = common.OnCrashCleaner() - vpn_connection_3 = tools.update_dict( - fakes.DB_VPN_CONNECTION_1, - {'id': fakes.random_ec2_id('vpn'), - 'os_ipsec_site_connections': {}}) - - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1, vpn_connection_3, - fakes.DB_VPN_CONNECTION_2) - vpn_connection_api._stop_gateway_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1) - self.assertEqual(2, stop_vpn_connection.call_count) - stop_vpn_connection.assert_any_call( - self.neutron, fakes.DB_VPN_CONNECTION_1) - stop_vpn_connection.assert_any_call( - self.neutron, vpn_connection_3) - self.assertEqual(2, self.db_api.update_item.call_count) - self.db_api.update_item.assert_any_call( - mock.ANY, tools.update_dict(fakes.DB_VPN_CONNECTION_1, - {'os_ipsec_site_connections': {}})) - self.db_api.update_item.assert_any_call( - mock.ANY, vpn_connection_3) - - self.db_api.reset_mock() - self.neutron.reset_mock() - stop_vpn_connection.reset_mock() - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1) - try: - with common.OnCrashCleaner() as cleaner: - vpn_connection_api._stop_gateway_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_VPN_CONNECTION_1) - - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections') - def test_update_vpn_routes(self, reset_vpn_connections): - context = base.create_context() - cleaner = common.OnCrashCleaner() - - self.set_mock_db_items() - vpn_connection_api._update_vpn_routes( - context, self.neutron, cleaner, - fakes.DB_ROUTE_TABLE_1, [fakes.DB_SUBNET_1]) - self.assertFalse(reset_vpn_connections.called) - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1) - vpn_connection_api._update_vpn_routes( - context, self.neutron, cleaner, - fakes.DB_ROUTE_TABLE_1, [fakes.DB_SUBNET_1]) - reset_vpn_connections.assert_called_once_with( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1, - route_tables=[fakes.DB_ROUTE_TABLE_1], subnets=[fakes.DB_SUBNET_1]) - - @mock.patch('ec2api.api.vpn_connection._delete_subnet_vpn') - @mock.patch('ec2api.api.vpn_connection._set_subnet_vpn') - @mock.patch('ec2api.api.vpn_connection._get_route_table_vpn_cidrs', - wraps=vpn_connection_api._get_route_table_vpn_cidrs) - def test_reset_vpn_connections(self, get_route_table_vpn_cidrs, - set_subnet_vpn, delete_subnet_vpn): - context = base.create_context() - cleaner = common.OnCrashCleaner() - - vpn_gateway_3 = {'id': fakes.random_ec2_id('vpn'), - 'os_id': None, - 'vpc_id': None} - vpn_connection_api._reset_vpn_connections( - context, self.neutron, cleaner, vpn_gateway_3) - self.assertEqual(0, len(self.db_api.mock_calls)) - self.assertFalse(get_route_table_vpn_cidrs.called) - self.assertFalse(set_subnet_vpn.called) - self.assertFalse(delete_subnet_vpn.called) - - customer_gateway_3 = {'id': fakes.random_ec2_id('cgw')} - subnet_3 = {'id': fakes.random_ec2_id('subnet'), - 'vpc_id': fakes.ID_EC2_VPC_2} - vpn_connection_3 = {'id': fakes.random_ec2_id('vpn'), - 'vpn_gateway_id': fakes.ID_EC2_VPN_GATEWAY_1, - 'customer_gateway_id': customer_gateway_3['id'], - 'cidrs': []} - self.set_mock_db_items( - fakes.DB_VPC_1, fakes.DB_VPC_2, - fakes.DB_CUSTOMER_GATEWAY_1, fakes.DB_CUSTOMER_GATEWAY_2, - customer_gateway_3, - fakes.DB_SUBNET_1, fakes.DB_SUBNET_2, subnet_3, - fakes.DB_ROUTE_TABLE_1, fakes.DB_ROUTE_TABLE_2, - fakes.DB_ROUTE_TABLE_3, - fakes.DB_VPN_CONNECTION_1, fakes.DB_VPN_CONNECTION_2, - vpn_connection_3) - - # common case - vpn_connection_api._reset_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1) - self.assertEqual(2, set_subnet_vpn.call_count) - set_subnet_vpn.assert_any_call( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - fakes.DB_VPN_CONNECTION_1, fakes.DB_CUSTOMER_GATEWAY_1, - [fakes.CIDR_VPN_1_STATIC]) - set_subnet_vpn.assert_any_call( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - vpn_connection_3, customer_gateway_3, - [fakes.CIDR_VPN_1_STATIC]) - self.assertEqual(2, delete_subnet_vpn.call_count) - delete_subnet_vpn.assert_any_call( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - fakes.DB_VPN_CONNECTION_1) - delete_subnet_vpn.assert_any_call( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - vpn_connection_3) - self.assertEqual(2, get_route_table_vpn_cidrs.call_count) - get_route_table_vpn_cidrs.assert_any_call( - fakes.DB_ROUTE_TABLE_1, fakes.DB_VPN_GATEWAY_1, - [fakes.DB_VPN_CONNECTION_1, vpn_connection_3]) - get_route_table_vpn_cidrs.assert_any_call( - fakes.DB_ROUTE_TABLE_3, fakes.DB_VPN_GATEWAY_1, - [fakes.DB_VPN_CONNECTION_1, vpn_connection_3]) - - # reset for the vpn connection - set_subnet_vpn.reset_mock() - delete_subnet_vpn.reset_mock() - self.db_api.reset_mock() - get_route_table_vpn_cidrs.reset_mock() - vpn_connection_api._reset_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1, - vpn_connections=[fakes.DB_VPN_CONNECTION_1]) - self.assertEqual(1, set_subnet_vpn.call_count) - self.assertEqual(1, delete_subnet_vpn.call_count) - self.assertNotIn(mock.call(mock.ANY, 'vpn'), - self.db_api.get_items.mock_calls) - - # reset for the subnet list - set_subnet_vpn.reset_mock() - delete_subnet_vpn.reset_mock() - self.db_api.reset_mock() - get_route_table_vpn_cidrs.reset_mock() - vpn_connection_api._reset_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1, - subnets=[fakes.DB_SUBNET_1]) - self.assertFalse(set_subnet_vpn.called) - self.assertEqual(2, delete_subnet_vpn.call_count) - self.assertNotIn(mock.call(mock.ANY, 'subnets'), - self.db_api.get_items.mock_calls) - - # reset for the subnet list and the route table - set_subnet_vpn.reset_mock() - delete_subnet_vpn.reset_mock() - self.db_api.reset_mock() - get_route_table_vpn_cidrs.reset_mock() - vpn_connection_api._reset_vpn_connections( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1, - subnets=[fakes.DB_SUBNET_2], route_tables=[fakes.DB_ROUTE_TABLE_3]) - self.assertEqual(2, set_subnet_vpn.call_count) - self.assertFalse(delete_subnet_vpn.called) - self.assertNotIn(mock.call(mock.ANY, 'subnets'), - self.db_api.get_items.mock_calls) - self.assertNotIn(mock.call(mock.ANY, 'rtb'), - self.db_api.get_items.mock_calls) - - def test_set_subnet_vpn(self): - context = base.create_context() - cleaner = common.OnCrashCleaner() - cidrs = [fakes.CIDR_VPN_1_STATIC, fakes.CIDR_VPN_1_PROPAGATED_1] - - # create ipsec site connection case - id_os_connection = fakes.random_os_id() - os_connection = { - 'vpnservice_id': fakes.ID_OS_VPNSERVICE_1, - 'ikepolicy_id': fakes.ID_OS_IKEPOLICY_1, - 'ipsecpolicy_id': fakes.ID_OS_IPSECPOLICY_1, - 'peer_address': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'peer_cidrs': cidrs, - 'psk': fakes.PRE_SHARED_KEY_1, - 'name': (fakes.ID_EC2_VPN_CONNECTION_1 + '/' + - fakes.ID_EC2_SUBNET_1), - 'peer_id': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1, - 'mtu': 1427, - 'initiator': 'response-only', - } - self.neutron.create_ipsec_site_connection.side_effect = ( - tools.get_neutron_create('ipsec_site_connection', - id_os_connection)) - vpn_connection_api._set_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - copy.deepcopy(fakes.DB_VPN_CONNECTION_1), - fakes.DB_CUSTOMER_GATEWAY_1, cidrs) - - self.neutron.create_ipsec_site_connection.assert_called_once_with( - {'ipsec_site_connection': os_connection}) - vpn_connection_1 = copy.deepcopy(fakes.DB_VPN_CONNECTION_1) - (vpn_connection_1['os_ipsec_site_connections'] - [fakes.ID_EC2_SUBNET_1]) = id_os_connection - self.db_api.update_item.assert_called_once_with( - context, vpn_connection_1) - - # update ipsec site connection case - self.db_api.reset_mock() - self.neutron.reset_mock() - vpn_connection_api._set_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - fakes.DB_VPN_CONNECTION_1, fakes.DB_CUSTOMER_GATEWAY_1, cidrs) - self.neutron.update_ipsec_site_connection.assert_called_once_with( - fakes.ID_OS_IPSEC_SITE_CONNECTION_2, - {'ipsec_site_connection': {'peer_cidrs': cidrs}}) - self.assertFalse(self.neutron.create_ipsec_site_connection.called) - self.assertFalse(self.db_api.update_item.called) - - # rollback creating of ipsec site connection case - self.db_api.reset_mock() - self.neutron.reset_mock() - try: - with common.OnCrashCleaner() as cleaner: - vpn_connection_api._set_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - copy.deepcopy(fakes.DB_VPN_CONNECTION_1), - fakes.DB_CUSTOMER_GATEWAY_1, cidrs) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.neutron.delete_ipsec_site_connection.assert_called_once_with( - id_os_connection) - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_VPN_CONNECTION_1) - - # rollback updating of ipsec site connection case - self.db_api.reset_mock() - self.neutron.reset_mock() - try: - with common.OnCrashCleaner() as cleaner: - vpn_connection_api._set_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - fakes.DB_VPN_CONNECTION_1, fakes.DB_CUSTOMER_GATEWAY_1, - cidrs) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.assertFalse(self.neutron.delete_ipsec_site_connection.called) - self.assertFalse(self.db_api.update_item.called) - - def test_delete_subnet_vpn(self): - context = base.create_context() - cleaner = common.OnCrashCleaner() - - # subnet is not connected to the vpn - vpn_connection_api._delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - fakes.DB_VPN_CONNECTION_1) - self.assertFalse(self.db_api.update_item.called) - self.assertFalse(self.neutron.delete_ipsec_site_connection.called) - - # delete subnet vpn connection - vpn_connection_api._delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - copy.deepcopy(fakes.DB_VPN_CONNECTION_1)) - self.db_api.update_item.assert_called_once_with( - mock.ANY, tools.update_dict(fakes.DB_VPN_CONNECTION_1, - {'os_ipsec_site_connections': {}})) - self.neutron.delete_ipsec_site_connection.assert_called_once_with( - fakes.ID_OS_IPSEC_SITE_CONNECTION_2) - - # delete subnet vpn connection, leave connections of other subnets - self.db_api.reset_mock() - self.neutron.reset_mock() - id_os_connection = fakes.random_os_id() - vpn_connection_1 = copy.deepcopy(fakes.DB_VPN_CONNECTION_1) - (vpn_connection_1['os_ipsec_site_connections'] - [fakes.ID_EC2_SUBNET_1]) = id_os_connection - vpn_connection_api._delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - vpn_connection_1) - self.db_api.update_item.assert_called_once_with( - mock.ANY, fakes.DB_VPN_CONNECTION_1) - self.neutron.delete_ipsec_site_connection.assert_called_once_with( - id_os_connection) - - # rollback of deleting subnet vpn connection - self.db_api.reset_mock() - self.neutron.reset_mock() - try: - with common.OnCrashCleaner() as cleaner: - vpn_connection_api._delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_2, - copy.deepcopy(fakes.DB_VPN_CONNECTION_1)) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_VPN_CONNECTION_1) - self.assertFalse(self.neutron.create_ipsec_site_connection.called) - - def test_get_route_table_vpn_cidrs(self): - route_table_1 = copy.deepcopy(fakes.DB_ROUTE_TABLE_1) - vpn_connection_1 = tools.update_dict( - fakes.DB_VPN_CONNECTION_1, {'cidrs': []}) - vpn_connection_2 = tools.update_dict( - vpn_connection_1, {'id': fakes.ID_EC2_VPN_CONNECTION_2}) - - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, []), - matchers.DictMatches({})) - - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches({})) - - route_table_1['propagating_gateways'] = [fakes.ID_EC2_VPN_GATEWAY_1, - fakes.ID_EC2_VPN_GATEWAY_2] - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches({})) - - vpn_connection_1['cidrs'] = ['cidr_1'] - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches({fakes.ID_EC2_VPN_CONNECTION_1: ['cidr_1']})) - - vpn_connection_2['cidrs'] = ['cidr_1', 'cidr_2'] - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches( - {fakes.ID_EC2_VPN_CONNECTION_1: ['cidr_1'], - fakes.ID_EC2_VPN_CONNECTION_2: ['cidr_1', 'cidr_2']}, - orderless_lists=True)) - - route_table_1['routes'] = [ - {'destination_cidr_block': 'fake_1', - 'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}, - {'destination_cidr_block': 'fake_2', - 'gateway_id': None}, - {'destination_cidr_block': 'fake_3', - 'gateway_id': fakes.ID_EC2_IGW_1}, - {'destination_cidr_block': 'cidr_3', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1}, - {'destination_cidr_block': 'cidr_4', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_1}, - {'destination_cidr_block': 'fake_4', - 'gateway_id': fakes.ID_EC2_VPN_GATEWAY_2}] - - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches( - {fakes.ID_EC2_VPN_CONNECTION_1: ['cidr_1', 'cidr_3', 'cidr_4'], - fakes.ID_EC2_VPN_CONNECTION_2: ['cidr_1', 'cidr_2', - 'cidr_3', 'cidr_4']}, - orderless_lists=True)) - - route_table_1['propagating_gateways'] = [fakes.ID_EC2_VPN_GATEWAY_2] - self.assertThat( - vpn_connection_api._get_route_table_vpn_cidrs( - route_table_1, fakes.DB_VPN_GATEWAY_1, - [vpn_connection_1, vpn_connection_2]), - matchers.DictMatches( - {fakes.ID_EC2_VPN_CONNECTION_1: ['cidr_3', 'cidr_4'], - fakes.ID_EC2_VPN_CONNECTION_2: ['cidr_3', 'cidr_4']}, - orderless_lists=True)) diff --git a/ec2api/tests/unit/test_vpn_gateway.py b/ec2api/tests/unit/test_vpn_gateway.py deleted file mode 100644 index 2ca57fb6..00000000 --- a/ec2api/tests/unit/test_vpn_gateway.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from unittest import mock - -import fixtures -from neutronclient.common import exceptions as neutron_exception - -from ec2api.api import common -from ec2api.api import vpn_gateway as vpn_gateway_api -from ec2api.tests.unit import base -from ec2api.tests.unit import fakes -from ec2api.tests.unit import matchers -from ec2api.tests.unit import tools - - -class VpnGatewayTestCase(base.ApiTestCase): - - def setUp(self): - super(VpnGatewayTestCase, self).setUp() - self.DB_VPN_GATEWAY_2_ATTACHED = tools.update_dict( - fakes.DB_VPN_GATEWAY_2, {'vpc_id': fakes.ID_EC2_VPC_2}) - self.DB_VPN_GATEWAY_1_DETACHED = tools.update_dict( - fakes.DB_VPN_GATEWAY_1, {'vpc_id': None}) - self.DB_SUBNET_1_NO_VPN = tools.purge_dict( - fakes.DB_SUBNET_1, ('os_vpnservice_id',)) - - def test_create_vpn_gateway(self): - self.db_api.add_item.side_effect = ( - tools.get_db_api_add_item(fakes.ID_EC2_VPN_GATEWAY_2)) - - resp = self.execute('CreateVpnGateway', - {'Type': 'ipsec.1'}) - self.assertEqual({'vpnGateway': fakes.EC2_VPN_GATEWAY_2}, resp) - self.db_api.add_item.assert_called_once_with(mock.ANY, 'vgw', {}) - - def test_attach_vpn_gateway(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - subnet_2 = tools.patch_dict(fakes.DB_SUBNET_2, - {'vpc_id': fakes.ID_EC2_VPC_2}, - ('os_vpnservice_id',)) - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2, - fakes.DB_VPC_2, fakes.DB_IGW_1, fakes.DB_IGW_2, - fakes.DB_SUBNET_1, subnet_2) - subnet_2_updated = tools.update_dict( - subnet_2, {'os_vpnservice_id': fakes.ID_OS_VPNSERVICE_2}) - os_vpnservice_2 = tools.patch_dict(fakes.OS_VPNSERVICE_2, - {'router_id': fakes.ID_OS_ROUTER_2}, - ('id',)) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_vpnservice.side_effect = tools.get_neutron_create( - 'vpnservice', fakes.ID_OS_VPNSERVICE_2) - - def do_check(): - resp = self.execute('AttachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - self.assertEqual({'attachment': {'state': 'attached', - 'vpcId': fakes.ID_EC2_VPC_2}}, - resp) - self.assertEqual(2, self.db_api.update_item.call_count) - self.db_api.update_item.assert_has_calls( - [mock.call(mock.ANY, self.DB_VPN_GATEWAY_2_ATTACHED), - mock.call(mock.ANY, subnet_2_updated)]) - self.neutron.create_vpnservice.assert_called_once_with( - {'vpnservice': os_vpnservice_2}) - - do_check() - self.neutron.add_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_2, - {'network_id': fakes.ID_OS_PUBLIC_NETWORK}) - self.neutron.list_networks.assert_called_once_with( - **{'router:external': True, - 'name': fakes.NAME_OS_PUBLIC_NETWORK}) - - # Internet gateway is already attached - self.db_api.reset_mock() - self.neutron.reset_mock() - igw_2 = tools.update_dict(fakes.DB_IGW_2, - {'vpc_id': fakes.ID_EC2_VPC_2}) - self.add_mock_db_items(igw_2) - - do_check() - self.neutron.add_gateway_router.assert_not_called() - - def test_attach_vpn_gateway_idempotent(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPC_1) - - resp = self.execute('AttachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.assertEqual({'attachment': {'state': 'attached', - 'vpcId': fakes.ID_EC2_VPC_1}}, - resp) - self.assertFalse(self.db_api.update_item.called) - self.assertFalse(self.neutron.add_gateway_router.called) - self.assertFalse(self.neutron.create_vpnservice.called) - - def test_attach_vpn_gateway_invalid_parameters(self): - def do_check(error_code): - self.assert_execution_error( - error_code, 'AttachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - self.assertFalse(self.db_api.update_item.called) - self.db_api.reset_mock() - - self.set_mock_db_items(fakes.DB_VPC_2) - do_check('InvalidVpnGatewayID.NotFound') - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_2) - do_check('InvalidVpcID.NotFound') - - self.set_mock_db_items( - tools.update_dict(fakes.DB_VPN_GATEWAY_2, - {'vpc_id': fakes.ID_EC2_VPC_1}), - fakes.DB_VPC_2) - do_check('VpnGatewayAttachmentLimitExceeded') - - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_2, fakes.DB_VPC_2, - tools.update_dict(fakes.DB_VPN_GATEWAY_1, - {'vpc_id': fakes.ID_EC2_VPC_2})) - do_check('InvalidVpcState') - - @tools.screen_unexpected_exception_logs - def test_attach_vpn_gateway_rollback(self): - self.configure(external_network=fakes.NAME_OS_PUBLIC_NETWORK) - subnet_2 = tools.patch_dict(fakes.DB_SUBNET_2, - {'vpc_id': fakes.ID_EC2_VPC_2}, - ('os_vpnservice_id',)) - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_2, fakes.DB_VPC_2, - subnet_2) - self.neutron.list_networks.return_value = ( - {'networks': [{'id': fakes.ID_OS_PUBLIC_NETWORK}]}) - self.neutron.create_vpnservice.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'AttachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_2, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - self.db_api.update_item.assert_any_call( - mock.ANY, fakes.DB_VPN_GATEWAY_2) - self.neutron.remove_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_2) - - def test_detach_vpn_gateway(self): - self.set_mock_db_items( - fakes.DB_VPN_GATEWAY_1, fakes.DB_VPC_1, fakes.DB_VPN_CONNECTION_1, - fakes.DB_SUBNET_1, - tools.update_dict(fakes.DB_SUBNET_2, - {'vpc_id': fakes.ID_EC2_VPC_2})) - - def do_check(): - resp = self.execute( - 'DetachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - self.assertEqual({'return': True}, resp) - self.assertEqual(3, self.db_api.update_item.call_count) - self.db_api.update_item.assert_has_calls( - [mock.call(mock.ANY, self.DB_VPN_GATEWAY_1_DETACHED), - mock.call(mock.ANY, - tools.update_dict( - fakes.DB_VPN_CONNECTION_1, - {'os_ipsec_site_connections': {}})), - mock.call(mock.ANY, self.DB_SUBNET_1_NO_VPN)]) - self.neutron.delete_vpnservice.assert_called_once_with( - fakes.ID_OS_VPNSERVICE_1) - self.neutron.delete_ipsec_site_connection.assert_called_once_with( - fakes.ID_OS_IPSEC_SITE_CONNECTION_2) - - do_check() - self.neutron.remove_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - - # Internet gateway is still attached - self.db_api.reset_mock() - self.neutron.reset_mock() - self.add_mock_db_items(fakes.DB_IGW_1) - - do_check() - self.neutron.remove_gateway_router.assert_not_called() - - def test_detach_vpn_gateway_invalid_parameters(self): - def do_check(error_code): - self.assert_execution_error( - error_code, 'DetachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - self.assertEqual(0, self.neutron.remove_gateway_router.call_count) - self.assertEqual(0, self.db_api.update_item.call_count) - - self.neutron.reset_mock() - self.db_api.reset_mock() - - self.set_mock_db_items() - do_check('InvalidVpnGatewayID.NotFound') - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_2) - do_check('InvalidVpnGatewayAttachment.NotFound') - - self.set_mock_db_items(self.DB_VPN_GATEWAY_2_ATTACHED) - do_check('InvalidVpnGatewayAttachment.NotFound') - - def test_detach_vpn_gateway_no_router(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPC_1) - self.neutron.remove_gateway_router.side_effect = ( - neutron_exception.NotFound) - - resp = self.execute( - 'DetachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.assertEqual(True, resp['return']) - self.neutron.remove_gateway_router.assert_called_once_with( - fakes.ID_OS_ROUTER_1) - - @tools.screen_unexpected_exception_logs - def test_detach_vpn_gateway_rollback(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPC_1, - fakes.DB_SUBNET_1) - self.neutron.remove_gateway_router.side_effect = Exception() - - self.assert_execution_error( - self.ANY_EXECUTE_ERROR, 'DetachVpnGateway', - {'VpcId': fakes.ID_EC2_VPC_1, - 'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.db_api.update_item.assert_has_calls( - [mock.call(mock.ANY, fakes.DB_SUBNET_1), - mock.call(mock.ANY, fakes.DB_VPN_GATEWAY_1)]) - - def test_delete_vpn_gateway(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_2) - - resp = self.execute( - 'DeleteVpnGateway', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - self.assertEqual({'return': True}, resp) - self.db_api.delete_item.assert_called_once_with( - mock.ANY, fakes.ID_EC2_VPN_GATEWAY_2) - - def test_delete_vpn_gateway_invalid_parameters(self): - self.set_mock_db_items() - self.assert_execution_error( - 'InvalidVpnGatewayID.NotFound', 'DeleteVpnGateway', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1) - self.assert_execution_error( - 'IncorrectState', 'DeleteVpnGateway', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_1}) - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_2, - fakes.DB_VPN_CONNECTION_2) - self.assert_execution_error( - 'IncorrectState', 'DeleteVpnGateway', - {'VpnGatewayId': fakes.ID_EC2_VPN_GATEWAY_2}) - - def test_describe_vpn_gateways(self): - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2) - - resp = self.execute('DescribeVpnGateways', {}) - self.assertThat(resp['vpnGatewaySet'], - matchers.ListMatches([fakes.EC2_VPN_GATEWAY_1, - fakes.EC2_VPN_GATEWAY_2])) - - resp = self.execute('DescribeVpnGateways', - {'VpnGatewayId.1': fakes.ID_EC2_VPN_GATEWAY_2}) - self.assertThat(resp['vpnGatewaySet'], - matchers.ListMatches([fakes.EC2_VPN_GATEWAY_2])) - self.db_api.get_items_by_ids.assert_called_once_with( - mock.ANY, set([fakes.ID_EC2_VPN_GATEWAY_2])) - - self.check_filtering( - 'DescribeVpnGateways', 'vpnGatewaySet', - [('attachment.state', 'attached'), - ('attachment.vpc-id', fakes.ID_EC2_VPC_1), - ('state', 'available'), - ('type', 'ipsec.1'), - ('vpn-gateway-id', fakes.ID_EC2_VPN_GATEWAY_2)]) - self.check_tag_support( - 'DescribeVpnGateways', 'vpnGatewaySet', - fakes.ID_EC2_VPN_GATEWAY_2, 'vpnGatewayId') - - @mock.patch('ec2api.api.vpn_connection._reset_vpn_connections') - @mock.patch('ec2api.api.vpn_gateway._create_subnet_vpnservice') - def test_start_vpn_in_subnet(self, create_subnet_vpnservice, - reset_vpn_connection): - context = base.create_context() - cleaner = common.OnCrashCleaner() - mock_manager = mock.Mock() - mock_manager.attach_mock(create_subnet_vpnservice, - 'create_subnet_vpnservice') - mock_manager.attach_mock(reset_vpn_connection, 'reset_vpn_connection') - - self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2) - vpn_gateway_api._start_vpn_in_subnet( - context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1), - fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - mock_manager.assert_has_calls([ - mock.call.create_subnet_vpnservice( - context, self.neutron, cleaner, - fakes.DB_SUBNET_1, fakes.DB_VPC_1), - mock.call.reset_vpn_connection( - context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1, - subnets=[fakes.DB_SUBNET_1], - route_tables=[fakes.DB_ROUTE_TABLE_1])]) - - create_subnet_vpnservice.reset_mock() - reset_vpn_connection.reset_mock() - self.add_mock_db_items(self.DB_VPN_GATEWAY_1_DETACHED) - vpn_gateway_api._start_vpn_in_subnet( - context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1), - fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1) - self.assertFalse(create_subnet_vpnservice.called) - self.assertFalse(reset_vpn_connection.called) - - @mock.patch('ec2api.api.vpn_connection._delete_subnet_vpn') - @mock.patch('ec2api.api.vpn_gateway._safe_delete_vpnservice') - def test_stop_vpn_in_subnet(self, delete_vpnservice, delete_subnet_vpn): - context = base.create_context() - cleaner = common.OnCrashCleaner() - mock_manager = mock.Mock() - mock_manager.attach_mock(delete_vpnservice, 'delete_vpnservice') - mock_manager.attach_mock(delete_subnet_vpn, 'delete_subnet_vpn') - - self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1, - fakes.DB_VPN_CONNECTION_2) - vpn_gateway_api._stop_vpn_in_subnet( - context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1)) - mock_manager.has_calls([ - mock.call.delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - fakes.DB_VPN_CONNECTION_1), - mock.call.delete_subnet_vpn( - context, self.neutron, cleaner, fakes.DB_SUBNET_1, - fakes.DB_VPN_CONNECTION_2), - mock.call.delete_vpnservice( - self.neutron, fakes.ID_OS_VPNSERVICE_1, - fakes.ID_EC2_SUBNET_1)]) - - delete_subnet_vpn.reset_mock() - delete_vpnservice.reset_mock() - vpn_gateway_api._stop_vpn_in_subnet( - context, self.neutron, cleaner, self.DB_SUBNET_1_NO_VPN) - self.assertFalse(delete_subnet_vpn.called) - self.assertFalse(delete_vpnservice.called) - - def test_create_subnet_vpnservice(self): - self.neutron.create_vpnservice.side_effect = tools.get_neutron_create( - 'vpnservice', fakes.ID_OS_VPNSERVICE_1) - context = base.create_context() - cleaner = common.OnCrashCleaner() - - vpn_gateway_api._create_subnet_vpnservice( - context, self.neutron, cleaner, - copy.deepcopy(self.DB_SUBNET_1_NO_VPN), fakes.DB_VPC_1) - - self.neutron.create_vpnservice.assert_called_once_with( - {'vpnservice': tools.purge_dict(fakes.OS_VPNSERVICE_1, - ('id',))}) - self.db_api.update_item.assert_called_once_with( - mock.ANY, fakes.DB_SUBNET_1) - - try: - with common.OnCrashCleaner() as cleaner: - vpn_gateway_api._create_subnet_vpnservice( - context, self.neutron, cleaner, - copy.deepcopy(self.DB_SUBNET_1_NO_VPN), fakes.DB_VPC_1) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.db_api.update_item.assert_called_with( - mock.ANY, self.DB_SUBNET_1_NO_VPN) - self.neutron.delete_vpnservice.assert_called_once_with( - fakes.ID_OS_VPNSERVICE_1) - - @mock.patch('ec2api.api.vpn_gateway._safe_delete_vpnservice') - def test_delete_subnet_vpnservice(self, delete_vpnservice): - context = base.create_context() - cleaner = common.OnCrashCleaner() - - vpn_gateway_api._delete_subnet_vpnservice( - context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1)) - - self.db_api.update_item.assert_called_once_with( - mock.ANY, self.DB_SUBNET_1_NO_VPN) - - try: - with common.OnCrashCleaner() as cleaner: - vpn_gateway_api._delete_subnet_vpnservice( - context, self.neutron, cleaner, - copy.deepcopy(fakes.DB_SUBNET_1)) - raise Exception('fake-exception') - except Exception as ex: - if str(ex) != 'fake-exception': - raise - self.db_api.update_item.assert_called_with( - mock.ANY, fakes.DB_SUBNET_1) - self.assertFalse(self.neutron.create_vpnservice.called) - - def test_safe_delete_vpnservice(self): - vpn_gateway_api._safe_delete_vpnservice( - self.neutron, fakes.ID_OS_VPNSERVICE_1, fakes.ID_EC2_SUBNET_1) - self.neutron.delete_vpnservice.assert_called_once_with( - fakes.ID_OS_VPNSERVICE_1) - - self.neutron.delete_vpnservice.side_effect = ( - neutron_exception.NotFound()) - with fixtures.FakeLogger() as log: - vpn_gateway_api._safe_delete_vpnservice( - self.neutron, fakes.ID_OS_VPNSERVICE_1, fakes.ID_EC2_SUBNET_1) - self.assertEqual(0, len(log.output)) - - self.neutron.delete_vpnservice.side_effect = ( - neutron_exception.Conflict()) - with fixtures.FakeLogger() as log: - vpn_gateway_api._safe_delete_vpnservice( - self.neutron, fakes.ID_OS_VPNSERVICE_1, fakes.ID_EC2_SUBNET_1) - self.assertIn(fakes.ID_EC2_SUBNET_1, log.output) - self.assertIn(fakes.ID_OS_VPNSERVICE_1, log.output) diff --git a/ec2api/tests/unit/tools.py b/ec2api/tests/unit/tools.py deleted file mode 100644 index 163b7019..00000000 --- a/ec2api/tests/unit/tools.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import copy -import logging -import re -from unittest import mock - -import fixtures -from lxml import etree - -from ec2api.api import ec2utils - - -def update_dict(dict1, dict2): - """Get a copy of union of two dicts.""" - res = copy.deepcopy(dict1) - res.update(dict2) - return res - - -def purge_dict(dict1, trash_keys): - """Get a copy of dict, removed keys.""" - res = copy.deepcopy(dict1) - for key in trash_keys: - res.pop(key, None) - return res - - -def patch_dict(dict1, dict2, trash_iter): - """Get a copy of union of two dicts, removed keys.""" - res = update_dict(dict1, dict2) - res = purge_dict(res, trash_iter) - return res - - -def get_db_api_add_item(item_id_dict): - """Generate db_api.add_item mock function.""" - - def db_api_add_item(context, kind, data): - if isinstance(item_id_dict, dict): - item_id = item_id_dict[kind] - else: - item_id = item_id_dict - data = update_dict(data, {'id': item_id}) - data.setdefault('os_id') - data.setdefault('vpc_id') - return data - return db_api_add_item - - -def get_db_api_get_items(*items): - """Generate db_api.get_items mock function.""" - - def db_api_get_items(context, kind): - return [copy.deepcopy(item) - for item in items - if ec2utils.get_ec2_id_kind(item['id']) == kind] - return db_api_get_items - - -def get_db_api_get_item_by_id(*items): - """Generate db_api.get_item_by_id mock function.""" - - def db_api_get_item_by_id(context, item_id): - return next((copy.deepcopy(item) - for item in items - if item['id'] == item_id), - None) - return db_api_get_item_by_id - - -def get_db_api_get_items_by_ids(*items): - """Generate db_api.get_items_by_ids mock function.""" - - def db_api_get_items_by_ids(context, item_ids): - return [copy.deepcopy(item) - for item in items - if (item['id'] in item_ids)] - return db_api_get_items_by_ids - - -def get_db_api_get_items_ids(*items): - """Generate db_api.get_items_ids mock function.""" - - def db_api_get_items_ids(context, kind, item_ids=None, item_os_ids=None): - return [(item['id'], item['os_id']) - for item in items - if (ec2utils.get_ec2_id_kind(item['id']) == kind and - (not item_ids or item['id'] in item_ids) and - (not item_os_ids or item['os_id'] in item_os_ids))] - return db_api_get_items_ids - - -def get_neutron_create(kind, os_id, addon={}): - """Generate Neutron create an object mock function.""" - - def neutron_create(body): - body = copy.deepcopy(body) - body[kind].update(addon) - body[kind]['id'] = os_id - return body - return neutron_create - - -def get_by_1st_arg_getter(results_dict_by_id, notfound_exception=None): - """Generate mock function for getter by 1st argurment.""" - - def getter(obj_id): - try: - return copy.deepcopy(results_dict_by_id[obj_id]) - except KeyError: - if notfound_exception: - raise notfound_exception - else: - return None - return getter - - -def get_by_2nd_arg_getter(results_dict_by_id): - """Generate mock function for getter by 2nd argurment.""" - - def getter(_context, obj_id): - return copy.deepcopy(results_dict_by_id.get(obj_id)) - return getter - - -def _safe_copy_parameters(args, kwargs): - # NOTE(ft): deepcopy fails to copy a complicated mock like - # neutron client mock or OnCrashCleaner object - def _safe_copy(obj): - try: - return copy.deepcopy(obj) - except Exception: - return obj - - args = [_safe_copy(arg) - for arg in args] - kwargs = {key: _safe_copy(val) - for key, val in kwargs.items()} - return (args, kwargs) - - -class CopyingMock(mock.MagicMock): - """Mock class for calls with mutable arguments. - - See https://docs.python.org/3/library/unittest.mock-examples.html# - coping-with-mutable-arguments - """ - - def __call__(self, *args, **kwargs): - args, kwargs = _safe_copy_parameters(args, kwargs) - return super(CopyingMock, self).__call__(*args, **kwargs) - - -def deepcopy_call_args_saver(destination): - def side_effect(*args, **kwargs): - args, kwargs = _safe_copy_parameters(args, kwargs) - destination.append(mock.call(*args, **kwargs)) - return side_effect - - -_xml_scheme = re.compile(r'\sxmlns=".*"') - - -def parse_xml(xml_string): - xml_string = _xml_scheme.sub('', xml_string.decode("utf-8")) - xml = etree.fromstring(xml_string) - - def convert_node(node): - children = list(node) - if len(children): - if children[0].tag == 'item': - val = list(convert_node(child)[1] for child in children) - else: - val = dict(convert_node(child) for child in children) - elif node.tag.endswith('Set'): - val = [] - else: - # TODO(ft): do not use private function - val = (ec2utils._try_convert(node.text) - if node.text - else node.text) - return node.tag, val - - return dict([convert_node(xml)]) - - -class KeepingHandler(logging.Handler): - - def __init__(self): - super(KeepingHandler, self).__init__() - self._storage = [] - - def emit(self, record): - self._storage.append(record) - - def emit_records_to(self, handlers, record_filter=None): - for record in self._storage: - if not record_filter or record_filter.filter(record): - for handler in handlers: - if self != handler: - handler.emit(record) - - -class ScreeningFilter(logging.Filter): - - def __init__(self, name=None): - self._name = name - - def filter(self, record): - if self._name is not None and record.name == self._name: - return False - return True - - -class ScreeningLogger(fixtures.Fixture): - - def __init__(self, log_name=None): - super(ScreeningLogger, self).__init__() - self.handler = KeepingHandler() - if log_name: - self._filter = ScreeningFilter(name=log_name) - else: - self._filter = None - - def setUp(self): - super(ScreeningLogger, self).setUp() - self.useFixture(fixtures.LogHandler(self.handler)) - - def __exit__(self, exc_type, exc_val, exc_tb): - res = super(ScreeningLogger, self).__exit__(exc_type, exc_val, exc_tb) - handlers = logging.getLogger().handlers - if exc_type: - self.handler.emit_records_to(handlers) - elif self._filter: - self.handler.emit_records_to(handlers, self._filter) - return res - - -def screen_logs(log_name=None): - def decorator(func): - def wrapper(*args, **kwargs): - with ScreeningLogger(log_name): - return func(*args, **kwargs) - return wrapper - return decorator - - -screen_unexpected_exception_logs = screen_logs('ec2api.api') -screen_all_logs = screen_logs() diff --git a/ec2api/utils.py b/ec2api/utils.py deleted file mode 100644 index cef06390..00000000 --- a/ec2api/utils.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Utilities and helper functions.""" - -import hashlib -import hmac -from xml.sax import saxutils - -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -def get_hash_str(base_str): - """returns string that represents hash of base_str (in hex format).""" - return hashlib.md5(base_str).hexdigest() - - -if hasattr(hmac, 'compare_digest'): - constant_time_compare = hmac.compare_digest -else: - def constant_time_compare(first, second): - """Returns True if both string inputs are equal, otherwise False. - - This function should take a constant amount of time regardless of - how many characters in the strings match. - - """ - if len(first) != len(second): - return False - result = 0 - for x, y in zip(first, second): - result |= ord(x) ^ ord(y) - return result == 0 - - -def xhtml_escape(value): - """Escapes a string so it is valid within XML or XHTML. - - """ - return saxutils.escape(value, {'"': '"', "'": '''}) diff --git a/ec2api/version.py b/ec2api/version.py deleted file mode 100644 index 0567fe22..00000000 --- a/ec2api/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('ec2-api') diff --git a/ec2api/wsgi.py b/ec2api/wsgi.py deleted file mode 100644 index b4e5066f..00000000 --- a/ec2api/wsgi.py +++ /dev/null @@ -1,515 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utility methods for working with WSGI servers.""" - -import os.path -import socket -import ssl -import sys - -import eventlet.wsgi -import greenlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service.service import ServiceBase -from oslo_utils import excutils -from paste import deploy -import routes.middleware -import webob.dec -import webob.exc - -from ec2api import exception -from ec2api.i18n import _ - -wsgi_opts = [ - cfg.StrOpt('api_paste_config', - default="api-paste.ini", - help='File name for the paste.deploy config for ec2api'), - cfg.StrOpt( - 'wsgi_log_format', - default='%(client_ip)s "%(request_line)s" status: %(status_code)s' - ' len: %(body_length)s time: %(wall_seconds).7f', - help='A python format string that is used as the template to ' - 'generate log lines. The following values can be formatted ' - 'into it: client_ip, date_time, request_line, status_code, ' - 'body_length, wall_seconds.'), - cfg.StrOpt('ssl_ca_file', - help="Path to the CA certificate file that should be used" - "to verify connecting clients."), - cfg.StrOpt('ssl_cert_file', - help="SSL certificate of API server"), - cfg.StrOpt('ssl_key_file', - help="SSL private key of API server"), - cfg.IntOpt('tcp_keepidle', - default=600, - help="Sets the value of TCP_KEEPIDLE in seconds for each " - "server socket. Not supported on OS X."), - cfg.IntOpt('wsgi_default_pool_size', - default=1000, - help="Size of the pool of greenthreads used by wsgi"), - cfg.IntOpt('max_header_line', - default=16384, - help="Maximum line size of message headers to be accepted. " - "max_header_line may need to be increased when using " - "large tokens (typically those generated by the " - "Keystone v3 API with big service catalogs)."), -] -CONF = cfg.CONF -CONF.register_opts(wsgi_opts) - -LOG = logging.getLogger(__name__) - - -class Server(ServiceBase): - """Server class to manage a WSGI server, serving a WSGI application.""" - - default_pool_size = CONF.wsgi_default_pool_size - - def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, - protocol=eventlet.wsgi.HttpProtocol, backlog=128, - use_ssl=False, max_url_len=None): - """Initialize, but do not start, a WSGI server. - - :param name: Pretty name for logging. - :param app: The WSGI application to serve. - :param host: IP address to serve the application. - :param port: Port number to server the application. - :param pool_size: Maximum number of eventlets to spawn concurrently. - :param backlog: Maximum number of queued connections. - :param max_url_len: Maximum length of permitted URLs. - :returns: None - :raises: ec2api.exception.InvalidInput - """ - # Allow operators to customize http requests max header line size. - eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line - self.name = name - self.app = app - self._server = None - self._protocol = protocol - self.pool_size = pool_size or self.default_pool_size - self._pool = eventlet.GreenPool(self.pool_size) - self._logger = logging.getLogger("ec2api.wsgi.server") - self._use_ssl = use_ssl - self._max_url_len = max_url_len - - if backlog < 1: - raise exception.InvalidInput( - reason='The backlog must be more than 1') - - bind_addr = (host, port) - # TODO(dims): eventlet's green dns/socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - try: - info = socket.getaddrinfo(bind_addr[0], - bind_addr[1], - socket.AF_UNSPEC, - socket.SOCK_STREAM)[0] - family = info[0] - bind_addr = info[-1] - except Exception: - family = socket.AF_INET - - try: - self._socket = eventlet.listen(bind_addr, family, backlog=backlog) - except EnvironmentError: - LOG.error("Could not bind to %(host)s:%(port)s", - {'host': host, 'port': port}) - raise - - (self.host, self.port) = self._socket.getsockname()[0:2] - LOG.info("%(name)s listening on %(host)s:%(port)s", - {'name': self.name, 'host': self.host, 'port': self.port}) - - def start(self): - """Start serving a WSGI application. - - :returns: None - """ - # The server socket object will be closed after server exits, - # but the underlying file descriptor will remain open, and will - # give bad file descriptor error. So duplicating the socket object, - # to keep file descriptor usable. - - dup_socket = self._socket.dup() - dup_socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - # sockets can hang around forever without keepalive - dup_socket.setsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE, 1) - - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - dup_socket.setsockopt(socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - CONF.tcp_keepidle) - - if self._use_ssl: - try: - ca_file = CONF.ssl_ca_file - cert_file = CONF.ssl_cert_file - key_file = CONF.ssl_key_file - - if ca_file and not os.path.exists(ca_file): - raise RuntimeError( - _("Unable to find ca_file : %s") % ca_file) - if cert_file and not os.path.exists(cert_file): - raise RuntimeError(_("Unable to find cert_file : %s") % - cert_file) - if key_file and not os.path.exists(key_file): - raise RuntimeError(_("Unable to find key_file : %s") % - key_file) - if self._use_ssl and (not cert_file or not key_file): - raise RuntimeError(_("When running server in SSL mode, " - "you must specify both a cert_file " - "and key_file option value in your " - "configuration file")) - ssl_kwargs = { - 'server_side': True, - 'certfile': cert_file, - 'keyfile': key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if ca_file: - ssl_kwargs['ca_certs'] = ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - dup_socket = eventlet.wrap_ssl(dup_socket, - **ssl_kwargs) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to start %(name)s on %(host)s" - ":%(port)s with SSL support", - {'name': self.name, 'host': self.host, - 'port': self.port}) - - wsgi_kwargs = { - 'func': eventlet.wsgi.server, - 'sock': dup_socket, - 'site': self.app, - 'protocol': self._protocol, - 'custom_pool': self._pool, - 'log': self._logger, - 'log_format': CONF.wsgi_log_format, - 'debug': False, - } - - if self._max_url_len: - wsgi_kwargs['url_length_limit'] = self._max_url_len - - self._server = eventlet.spawn(**wsgi_kwargs) - - def reset(self): - """Reset server greenpool size to default. - - :returns: None - - """ - self._pool.resize(self.pool_size) - - def stop(self): - """Stop this server. - - This is not a very nice action, as currently the method by which a - server is stopped is by killing its eventlet. - - :returns: None - - """ - LOG.info("Stopping WSGI server.") - - if self._server is not None: - # Resize pool to stop new requests from being processed - self._pool.resize(0) - self._server.kill() - - def wait(self): - """Block, until the server has stopped. - - Waits on the server's eventlet to finish, then returns. - - :returns: None - - """ - try: - if self._server is not None: - self._server.wait() - except greenlet.GreenletExit: - LOG.info("WSGI server has stopped.") - - -class Request(webob.Request): - pass - - -class Application(object): - """Base WSGI application wrapper. Subclasses need to implement __call__.""" - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [app:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [app:wadl] - latest_version = 1.3 - paste.app_factory = ec2api.api.fancy_api:Wadl.factory - - which would result in a call to the `Wadl` class as - - import ec2api.api.fancy_api - fancy_api.Wadl(latest_version='1.3') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - return cls(**local_config) - - def __call__(self, environ, start_response): - r"""Subclasses will probably want to implement __call__ like this: - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - # Any of the following objects work as responses: - - # Option 1: simple string - res = 'message\n' - - # Option 2: a nicely formatted HTTP exception page - res = exc.HTTPForbidden(explanation='Nice try') - - # Option 3: a webob Response object (in case you need to play with - # headers, or you want to be treated like an iterable, or or or) - res = Response(); - res.app_iter = open('somefile') - - # Option 4: any wsgi app to be run next - res = self.application - - # Option 5: you can get a Response object for a wsgi app, too, to - # play with headers etc - res = req.get_response(self.application) - - # You can then just return your response... - return res - # ... or set req.response and return None. - req.response = res - - See the end of http://pythonpaste.org/webob/modules/dec.html - for more info. - - """ - raise NotImplementedError(_('You must implement __call__')) - - -class Middleware(Application): - """Base WSGI middleware. - - These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - - """ - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [filter:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [filter:analytics] - redis_host = 127.0.0.1 - paste.filter_factory = ec2api.api.analytics:Analytics.factory - - which would result in a call to the `Analytics` class as - - import ec2api.api.analytics - analytics.Analytics(app_from_paste, redis_host='127.0.0.1') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - def _factory(app): - return cls(app, **local_config) - return _factory - - def __init__(self, application): - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) - - -class Debug(Middleware): - """Helper class for debugging a WSGI application. - - Can be inserted into any WSGI application chain to get information - about the request and response. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - print(('*' * 40) + ' REQUEST ENVIRON') - for key, value in req.environ.items(): - print(key, '=', value) - print() - resp = req.get_response(self.application) - - print(('*' * 40) + ' RESPONSE HEADERS') - for (key, value) in resp.headers.items(): - print(key, '=', value) - print() - - resp.app_iter = self.print_generator(resp.app_iter) - - return resp - - @staticmethod - def print_generator(app_iter): - """Iterator that prints the contents of a wrapper string.""" - print(('*' * 40) + ' BODY') - for part in app_iter: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print() - - -class Router(object): - """WSGI middleware that maps incoming requests to WSGI apps.""" - - def __init__(self, mapper): - """Create a router for the given routes.Mapper. - - Each route in `mapper` must specify a 'controller', which is a - WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be an object that can route - the request to the action-specific method. - - Examples: - mapper = routes.Mapper() - sc = ServerController() - - # Explicit mapping of one route to a controller+action - mapper.connect(None, '/svrlist', controller=sc, action='list') - - # Actions are all implicitly defined - mapper.resource('server', 'servers', controller=sc) - - # Pointing to an arbitrary WSGI app. You can specify the - # {path_info:.*} parameter so the target app can be handed just that - # section of the URL. - mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) - - """ - self.map = mapper - self._router = routes.middleware.RoutesMiddleware(self._dispatch, - self.map) - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Route the incoming request to a controller based on self.map. - - If no match, return a 404. - - """ - return self._router - - @staticmethod - @webob.dec.wsgify(RequestClass=Request) - def _dispatch(req): - """Dispatch the request to the appropriate controller. - - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. Either returns 404 - or the routed WSGI app's response. - - """ - match = req.environ['wsgiorg.routing_args'][1] - if not match: - return webob.exc.HTTPNotFound() - app = match['controller'] - return app - - -class Loader(object): - """Used to load WSGI applications from paste configurations.""" - - def __init__(self, config_path=None): - """Initialize the loader, and attempt to find the config. - - :param config_path: Full or relative path to the paste config. - :returns: None - - """ - self.config_path = None - - config_path = config_path or CONF.api_paste_config - if not os.path.isabs(config_path): - self.config_path = CONF.find_file(config_path) - elif os.path.exists(config_path): - self.config_path = config_path - - if not self.config_path: - raise exception.EC2APIConfigNotFound(path=config_path) - - def load_app(self, name): - """Return the paste URLMap wrapped WSGI application. - - :param name: Name of the application to load. - :returns: Paste URLMap object wrapping the requested application. - :raises: `ec2api.exception.EC2APIPasteAppNotFound` - - """ - try: - LOG.debug("Loading app %(name)s from %(path)s", - {'name': name, 'path': self.config_path}) - return deploy.loadapp("config:%s" % self.config_path, name=name) - except LookupError as err: - LOG.error(err) - raise exception.EC2APIPasteAppNotFound(name=name, - path=self.config_path) diff --git a/etc/ec2api/README-ec2api.conf.txt b/etc/ec2api/README-ec2api.conf.txt deleted file mode 100644 index 9fb50d07..00000000 --- a/etc/ec2api/README-ec2api.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample ec2api.conf file, run the following -command from the top level of the ec2api directory: - -tox -egenconfig diff --git a/etc/ec2api/api-paste.ini b/etc/ec2api/api-paste.ini deleted file mode 100644 index 010c94b9..00000000 --- a/etc/ec2api/api-paste.ini +++ /dev/null @@ -1,39 +0,0 @@ -####### -# EC2 # -####### - -[composite:ec2api] -use = egg:Paste#urlmap -/: ec2apicloud - -[composite:ec2apicloud] -use = call:ec2api.api.auth:pipeline_factory -keystone = ec2apifaultwrap logrequest ec2apikeystoneauth cloudrequest ec2apiexecutor - -[filter:ec2apifaultwrap] -paste.filter_factory = ec2api.api:FaultWrapper.factory - -[filter:logrequest] -paste.filter_factory = ec2api.api:RequestLogging.factory - -[filter:ec2apikeystoneauth] -paste.filter_factory = ec2api.api:EC2KeystoneAuth.factory - -[filter:cloudrequest] -paste.filter_factory = ec2api.api:Requestify.factory - -[app:ec2apiexecutor] -paste.app_factory = ec2api.api:Executor.factory - -############ -# Metadata # -############ -[composite:metadata] -use = egg:Paste#urlmap -/: meta - -[pipeline:meta] -pipeline = ec2apifaultwrap logrequest metaapp - -[app:metaapp] -paste.app_factory = ec2api.metadata:MetadataRequestHandler.factory diff --git a/etc/ec2api/ec2api-config-generator.conf b/etc/ec2api/ec2api-config-generator.conf deleted file mode 100644 index 64934424..00000000 --- a/etc/ec2api/ec2api-config-generator.conf +++ /dev/null @@ -1,13 +0,0 @@ -[DEFAULT] -output_file = etc/ec2api/ec2api.conf.sample -wrap_width = 79 -namespace = ec2api -namespace = keystoneauth1 -namespace = ec2api.api -namespace = ec2api.metadata -namespace = ec2api.s3 -namespace = oslo.log -namespace = oslo.service.service -namespace = oslo.cache -namespace = oslo.db -namespace = oslo.concurrency diff --git a/install.sh b/install.sh deleted file mode 100755 index ab20be44..00000000 --- a/install.sh +++ /dev/null @@ -1,369 +0,0 @@ -#!/bin/bash -e - -#Parameters to configure -SERVICE_USERNAME=ec2api -SERVICE_PASSWORD=ec2api -SERVICE_TENANT=service -# this domain name will be used for project and user -SERVICE_DOMAIN_NAME=Default -EC2API_PORT=8788 -CONNECTION="mysql://ec2api:ec2api@127.0.0.1/ec2api?charset=utf8" -LOG_DIR=/var/log/ec2api -CONF_DIR=/etc/ec2api -NOVA_CONF=/etc/nova/nova.conf -CONF_FILE=$CONF_DIR/ec2api.conf -APIPASTE_FILE=$CONF_DIR/api-paste.ini - -DATA_DIR=${DATA_DIR:-/var/lib/ec2api} -AUTH_CACHE_DIR=${AUTH_CACHE_DIR:-/var/cache/ec2api} - -#Check for environment -if [[ -z "$OS_AUTH_URL" || -z "$OS_USERNAME" || -z "$OS_PASSWORD" ]]; then - echo "Please set OS_AUTH_URL, OS_USERNAME, OS_PASSWORD" - exit 1 -fi -if [[ -z "$OS_TENANT_NAME" && -z "$OS_PROJECT_NAME" ]]; then - echo "Please set OS_TENANT_NAME or OS_PROJECT_NAME" - exit 1 -fi - - -#### utilities functions merged from devstack to check required parameter is not empty -# Prints line number and "message" in error format -# err $LINENO "message" -function err() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - $errXTRACE - return $exitcode -} -# Prints backtrace info -# filename:lineno:function -function backtrace { - local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) - echo "[Call Trace]" - while [ $level -le $deep ]; do - echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" - deep=$((deep - 1)) - done -} - - -# Prints line number and "message" then exits -# die $LINENO "message" -function die() { - local exitcode=$? - set +o xtrace - local line=$1; shift - if [ $exitcode == 0 ]; then - exitcode=1 - fi - backtrace 2 - err $line "$*" - exit $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" and exits -# NOTE: env-var is the variable name without a '$' -# die_if_not_set $LINENO env-var "message" -function die_if_not_set() { - local exitcode=$? - FXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - die $line "$*" - fi - $FXTRACE -} - -# Test if the named environment variable is set and not zero length -# is_set env-var -function is_set() { - local var=\$"$1" - eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this -} - -####################################### - -get_data() { - local match_column=$(($1 + 1)) - local regex="$2" - local output_column=$(($3 + 1)) - shift 3 - - output=$("$@" | \ - awk -F'|' \ - "! /^\+/ && \$${match_column} ~ \"^ *${regex} *\$\" \ - { print \$${output_column} }") - - echo "$output" -} - -get_id () { - get_data 1 id 2 "$@" -} - -get_user() { - local username=$1 - - local user_id=$(openstack user show $username -f value -c id 2>/dev/null) - - if [ -n "$user_id" ]; then - echo "Found existing $username user" >&2 - echo $user_id - else - echo "Creating $username user..." >&2 - openstack user create -f value -c id \ - $username \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email $username@example.com - fi -} - -add_role() { - local user_id=$1 - local tenant=$2 - local role_id=$3 - local username=$4 - local domain=$5 - - if [[ -n $domain ]] - then - domain_args="--project-domain $domain --user-domain $domain" - fi - - # Gets user role id - existing_role=$(openstack role assignment list -f value -c User \ - --role $role_id \ - --user $user_id \ - --project $tenant \ - $domain_args) - if [ -n "$existing_role" ] - then - echo "User $username already has role $role_id" >&2 - return - fi - - # Adds role to user - openstack role add $role_id \ - --user $user_id \ - --project $tenant \ - $domain_args -} - - -# Determines if the given option is present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - if ! sudo grep -q "^\[$section\]" "$file"; then - # Add section at the end - sudo bash -c "echo -e \"\n[$section]\" >>\"$file\"" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sudo sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - # Replace it - sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" - fi -} - -# Get an option from an INI file -# iniget config-file section option -function iniget() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} -} - -# Copy an option from Nova INI file or from environment if it's set -function copynovaopt() { - local option_name=$1 - local option_group=$2 - local env_var - local option - env_var=${option_name^^} - if [ ${!env_var+x} ]; then - option=${!env_var} - elif ini_has_option "$NOVA_CONF" $option_group $option_name; then - option=$(iniget $NOVA_CONF $option_group $option_name) - else - return 0 - fi - iniset $CONF_FILE $option_group $option_name $option -} - -if [[ -n $(openstack catalog show network) ]]; then - VPC_SUPPORT="True" - DISABLE_EC2_CLASSIC="True" -else - VPC_SUPPORT="False" - DISABLE_EC2_CLASSIC="False" -fi -if [[ "$VPC_SUPPORT" == "True" && -z "$EXTERNAL_NETWORK" ]]; then - declare -a neutron_output - readarray -s 3 -t neutron_output < <(openstack network list --external) - if ((${#neutron_output[@]} < 2)); then - reason="No external network is declared in Neutron." - elif ((${#neutron_output[@]} > 2)); then - reason="More than one external networks are declared in Neutron." - else - EXTERNAL_NETWORK=$(echo $neutron_output | awk -F '|' '{ print $3 }') - fi - die_if_not_set $LINENO EXTERNAL_NETWORK "$reason. Please set EXTERNAL_NETWORK environment variable to the external network dedicated to EC2 elastic IP operations" -fi - -#create keystone user with admin and service privileges -ADMIN_ROLE=$(openstack role show admin -c id -f value) -die_if_not_set $LINENO ADMIN_ROLE "Fail to get ADMIN_ROLE by 'openstack role show' " -SERVICE_ROLE=$(openstack role show service -c id -f value) -die_if_not_set $LINENO ADMIN_ROLE "Fail to get SERVICE_ROLE by 'openstack role show' " -SERVICE_TENANT_ID=$(openstack project show service -c id -f value) -die_if_not_set $LINENO SERVICE_TENANT_ID "Fail to get service tenant 'openstack project show' " - -echo ADMIN_ROLE $ADMIN_ROLE -echo SERVICE_ROLE $SERVICE_ROLE -echo SERVICE_TENANT $SERVICE_TENANT - -SERVICE_USERID=$(get_user $SERVICE_USERNAME) -die_if_not_set $LINENO SERVICE_USERID "Fail to get user for $SERVICE_USERNAME" -echo SERVICE_USERID $SERVICE_USERID -SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} -add_role $SERVICE_USERID $SERVICE_TENANT $ADMIN_ROLE $SERVICE_USERNAME -add_role $SERVICE_USERID $SERVICE_TENANT $SERVICE_ROLE $SERVICE_USERNAME $SERVICE_DOMAIN_NAME - -#create log dir -echo Creating log dir -sudo install -d $LOG_DIR --owner=$USER - -#copy conf files (do not override it) -echo Creating configs -sudo mkdir -p /etc/ec2api > /dev/null -if [ ! -s $CONF_FILE ]; then - sudo touch $CONF_FILE -fi -if [ ! -s $APIPASTE_FILE ]; then - sudo cp etc/ec2api/api-paste.ini $APIPASTE_FILE -fi - - -#update default config with some values -iniset $CONF_FILE DEFAULT ec2api_listen_port "$EC2API_PORT" -iniset $CONF_FILE DEFAULT ec2_port "$EC2API_PORT" -iniset $CONF_FILE DEFAULT api_paste_config $APIPASTE_FILE -iniset $CONF_FILE DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" -iniset $CONF_FILE DEFAULT log_dir "$LOG_DIR" -iniset $CONF_FILE DEFAULT verbose True -iniset $CONF_FILE DEFAULT keystone_ec2_tokens_url "$OS_AUTH_URL/v3/ec2tokens" -iniset $CONF_FILE database connection "$CONNECTION" -iniset $CONF_FILE DEFAULT disable_ec2_classic "$DISABLE_EC2_CLASSIC" -iniset $CONF_FILE DEFAULT external_network "$EXTERNAL_NETWORK" -iniset $CONF_FILE oslo_concurrency lock_path "$EC2API_STATE_PATH" -iniset $CONF_FILE DEFAULT state_path "$DATA_DIR" - -GROUP_AUTHTOKEN="keystone_authtoken" -iniset $CONF_FILE $GROUP_AUTHTOKEN signing_dir "$AUTH_CACHE_DIR" -iniset $CONF_FILE $GROUP_AUTHTOKEN www_authenticate_uri "$OS_AUTH_URL" -iniset $CONF_FILE $GROUP_AUTHTOKEN auth_url "$OS_AUTH_URL" -iniset $CONF_FILE $GROUP_AUTHTOKEN username $SERVICE_USERNAME -iniset $CONF_FILE $GROUP_AUTHTOKEN password $SERVICE_PASSWORD -iniset $CONF_FILE $GROUP_AUTHTOKEN project_name $SERVICE_TENANT -iniset $CONF_FILE $GROUP_AUTHTOKEN project_domain_name $SERVICE_DOMAIN_NAME -iniset $CONF_FILE $GROUP_AUTHTOKEN user_domain_name $SERVICE_DOMAIN_NAME -iniset $CONF_FILE $GROUP_AUTHTOKEN auth_type password - -GROUP_CACHE="cache" -iniset $CONF_FILE $GROUP_CACHE enabled True - -if [[ -f "$NOVA_CONF" ]]; then - # NOTE(ft): use swift instead internal s3 server if enabled - if [[ -n $(openstack catalog show object-store 2>/dev/null) ]] && - [[ -n $(openstack catalog show s3 2>/dev/null) ]]; then - s3_host="127.0.0.1" - if ini_has_option "$NOVA_CONF" DEFAULT "s3_host"; then - s3_host=$(iniget $NOVA_CONF DEFAULT $option_name) - fi - s3_port="3334" - if ini_has_option "$NOVA_CONF" DEFAULT "s3_port"; then - s3_port=$(iniget $NOVA_CONF DEFAULT $option_name) - fi - s3_proto="http" - if ini_has_option "$NOVA_CONF" DEFAULT "s3_use_ssl"; then - s3_use_ssl=$(iniget $NOVA_CONF DEFAULT $option_name) - s3_use_ssl=`echo $s3_use_ssl | awk '{print toupper($0)}'` - if [[ $s3_use_ssl == "TRUE" ]]; then - s3_proto="https" - fi - fi - iniset $CONF_FILE DEFAULT s3_url "$s3_proto://$s3_host:$s3_port" - - fi -fi - -#init cache dir -echo Creating signing dir -sudo mkdir -p $AUTH_CACHE_DIR -sudo chown $USER $AUTH_CACHE_DIR -sudo rm -f $AUTH_CACHE_DIR/* - -#init data dir -echo Creating data dir -sudo mkdir -p $DATA_DIR -sudo chown $USER $DATA_DIR -sudo rm -f $DATA_DIR/* - -#install it -echo Installing package -if [[ -z "$VIRTUAL_ENV" ]]; then - SUDO_PREFIX="sudo" - if ! command -v pip >/dev/null; then - sudo apt-get install python-pip - fi -fi -$SUDO_PREFIX pip install -e ./ -$SUDO_PREFIX rm -rf build ec2_api.egg-info - -#recreate database -echo Setuping database -PACKAGE_MANAGER_SELECTED=0 -while [ $PACKAGE_MANAGER_SELECTED -eq 0 ] -do - printf "Enter the package manager you use " - read PACKAGE_MANAGER - if [ $PACKAGE_MANAGER = "rpm" ] || [ $PACKAGE_MANAGER = "deb" ] ; then - PACKAGE_MANAGER_SELECTED=1 - else - echo "The package manager you entered \"${PACKAGE_MANAGER}\" is not in " - fi -done - -$SUDO_PREFIX tools/db/ec2api-db-setup $PACKAGE_MANAGER diff --git a/releasenotes/notes/drop-py-2-7-a4b96d486289a772.yaml b/releasenotes/notes/drop-py-2-7-a4b96d486289a772.yaml deleted file mode 100644 index 37fafebf..00000000 --- a/releasenotes/notes/drop-py-2-7-a4b96d486289a772.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of ec2-api - to support python 2.7 is OpenStack Train. The minimum version of Python now - supported by ec2-api is Python 3.6. diff --git a/releasenotes/notes/use-volumev3-by-default-fa726fed293d94bb.yaml b/releasenotes/notes/use-volumev3-by-default-fa726fed293d94bb.yaml deleted file mode 100644 index ef798e70..00000000 --- a/releasenotes/notes/use-volumev3-by-default-fa726fed293d94bb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Default value of the ``cinder_service_type`` parameter has been changed - from ``volumev2`` to ``volume3``, because volume v2 API was already - deprecated and v3 API should be used instead. diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index dc5718b6..00000000 --- a/requirements.txt +++ /dev/null @@ -1,37 +0,0 @@ -# Requirements lower bounds listed here are our best effort to keep them up to -# date but we do not test them so no guarantee of having them all correct. If -# you find any incorrect lower bounds, let us know or propose a fix. - -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -botocore>=1.9.7 # Apache-2.0 -cryptography>=2.1.4 # BSD/Apache-2.0 -eventlet>=0.20.0 # MIT -greenlet>=0.4.13 # MIT -httplib2>=0.10.3 # MIT -keystoneauth1>=3.14.0 # Apache-2.0 -lxml>=4.1.1 # BSD -oslo.cache>=1.29.0 # Apache-2.0 -oslo.config>=5.2.0 # Apache-2.0 -oslo.concurrency>=3.26.0 # Apache-2.0 -oslo.context>=2.20.0 # Apache-2.0 -oslo.db>=4.40.0 # Apache-2.0 -oslo.log>=3.37.0 # Apache-2.0 -oslo.serialization>=2.25.0 # Apache-2.0 -oslo.service>=1.30.0 # Apache-2.0 -oslo.utils>=3.36.0 # Apache-2.0 -Paste>=2.0.3 # MIT -PasteDeploy>=1.5.2 # MIT -pbr>=3.1.1 # Apache-2.0 -python-cinderclient>=3.5.0 # Apache-2.0 -python-glanceclient>=2.16.0 # Apache-2.0 -python-keystoneclient>=3.15.0 # Apache-2.0 -python-neutronclient>=6.7.0 # Apache-2.0 -python-novaclient>=10.1.0 # Apache-2.0 -python-openstackclient>=3.14.0 # Apache-2.0 -Routes>=2.4.1 # MIT -SQLAlchemy>=1.2.5 # MIT -sqlalchemy-migrate>=0.11.0 # Apache-2.0 -WebOb>=1.7.4 # MIT diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 17003674..00000000 --- a/setup.cfg +++ /dev/null @@ -1,51 +0,0 @@ -[metadata] -name = ec2-api -summary = OpenStack Ec2api Service -description_file = - README.rst -license = Apache License, Version 2.0 -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://launchpad.net/ec2-api -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - -[files] -packages = - ec2api - -[entry_points] -oslo.config.opts = - ec2api = ec2api.opts:list_opts - ec2api.api = ec2api.api.opts:list_opts - keystoneauth1 = ec2api.opts:list_auth_opts - ec2api.metadata = ec2api.metadata.opts:list_opts - ec2api.s3 = ec2api.s3.opts:list_opts -console_scripts = - ec2-api=ec2api.cmd.api:main - ec2-api-manage=ec2api.cmd.manage:main - ec2-api-metadata=ec2api.cmd.api_metadata:main - ec2-api-s3=ec2api.cmd.api_s3:main - -[compile_catalog] -directory = ec2api/locale -domain = ec2api - -[update_catalog] -domain = ec2api -output_dir = ec2api/locale -input_file = ec2api/locale/ec2api.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -output_file = ec2api/locale/ec2api.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c3..00000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 0d2e73c3..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -hacking>=3.0.1,<3.1.0 # Apache-2.0 - -coverage>=4.5.1 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -nose>=1.3.7 # LGPL -oslotest>=3.3.0 # Apache-2.0 -stestr>=2.0.0 # Apache-2.0 -pylint==1.4.5 # GPLv2 -python-subunit>=1.2.0 # Apache-2.0/BSD -testrepository>=0.0.20 # Apache-2.0/BSD -testtools>=2.3.0 # MIT diff --git a/tools/colorizer.py b/tools/colorizer.py deleted file mode 100755 index afd4da06..00000000 --- a/tools/colorizer.py +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2013, Nebula, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Colorizer Code is borrowed from Twisted: -# Copyright (c) 2001-2010 Twisted Matrix Laboratories. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -"""Display a subunit stream through a colorized unittest test runner.""" - -import heapq -import sys -import unittest - -import subunit -import testtools - - -class _AnsiColorizer(object): - """ - A colorizer is an object that loosely wraps around a stream, allowing - callers to write text to the stream in a particular color. - - Colorizer classes must implement C{supported()} and C{write(text, color)}. - """ - _colors = dict(black=30, red=31, green=32, yellow=33, - blue=34, magenta=35, cyan=36, white=37) - - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - """ - A class method that returns True if the current platform supports - coloring terminal output using this method. Returns False otherwise. - """ - if not stream.isatty(): - return False # auto color only on TTYs - try: - import curses - except ImportError: - return False - else: - try: - try: - return curses.tigetnum("colors") > 2 - except curses.error: - curses.setupterm() - return curses.tigetnum("colors") > 2 - except Exception: - # guess false in case of error - return False - supported = classmethod(supported) - - def write(self, text, color): - """ - Write the given text to the stream in the given color. - - @param text: Text to be written to the stream. - - @param color: A string label for a color. e.g. 'red', 'white'. - """ - color = self._colors[color] - self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) - - -class _Win32Colorizer(object): - """ - See _AnsiColorizer docstring. - """ - def __init__(self, stream): - import win32console - red, green, blue, bold = (win32console.FOREGROUND_RED, - win32console.FOREGROUND_GREEN, - win32console.FOREGROUND_BLUE, - win32console.FOREGROUND_INTENSITY) - self.stream = stream - self.screenBuffer = win32console.GetStdHandle( - win32console.STD_OUT_HANDLE) - self._colors = {'normal': red | green | blue, - 'red': red | bold, - 'green': green | bold, - 'blue': blue | bold, - 'yellow': red | green | bold, - 'magenta': red | blue | bold, - 'cyan': green | blue | bold, - 'white': red | green | blue | bold} - - def supported(cls, stream=sys.stdout): - try: - import win32console - screenBuffer = win32console.GetStdHandle( - win32console.STD_OUT_HANDLE) - except ImportError: - return False - import pywintypes - try: - screenBuffer.SetConsoleTextAttribute( - win32console.FOREGROUND_RED | - win32console.FOREGROUND_GREEN | - win32console.FOREGROUND_BLUE) - except pywintypes.error: - return False - else: - return True - supported = classmethod(supported) - - def write(self, text, color): - color = self._colors[color] - self.screenBuffer.SetConsoleTextAttribute(color) - self.stream.write(text) - self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) - - -class _NullColorizer(object): - """ - See _AnsiColorizer docstring. - """ - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - return True - supported = classmethod(supported) - - def write(self, text, color): - self.stream.write(text) - - -def get_elapsed_time_color(elapsed_time): - if elapsed_time > 1.0: - return 'red' - elif elapsed_time > 0.25: - return 'yellow' - else: - return 'green' - - -class EC2ApiTestResult(testtools.TestResult): - def __init__(self, stream, descriptions, verbosity): - super(EC2ApiTestResult, self).__init__() - self.stream = stream - self.showAll = verbosity > 1 - self.num_slow_tests = 10 - self.slow_tests = [] # this is a fixed-sized heap - self.colorizer = None - # NOTE(vish): reset stdout for the terminal check - stdout = sys.stdout - sys.stdout = sys.__stdout__ - for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: - if colorizer.supported(): - self.colorizer = colorizer(self.stream) - break - sys.stdout = stdout - self.start_time = None - self.last_time = {} - self.results = {} - self.last_written = None - - def _writeElapsedTime(self, elapsed): - color = get_elapsed_time_color(elapsed) - self.colorizer.write(" %.2f" % elapsed, color) - - def _addResult(self, test, *args): - try: - name = test.id() - except AttributeError: - name = 'Unknown.unknown' - test_class, test_name = name.rsplit('.', 1) - - elapsed = (self._now() - self.start_time).total_seconds() - item = (elapsed, test_class, test_name) - if len(self.slow_tests) >= self.num_slow_tests: - heapq.heappushpop(self.slow_tests, item) - else: - heapq.heappush(self.slow_tests, item) - - self.results.setdefault(test_class, []) - self.results[test_class].append((test_name, elapsed) + args) - self.last_time[test_class] = self._now() - self.writeTests() - - def _writeResult(self, test_name, elapsed, long_result, color, - short_result, success): - if self.showAll: - self.stream.write(' %s' % str(test_name).ljust(66)) - self.colorizer.write(long_result, color) - if success: - self._writeElapsedTime(elapsed) - self.stream.writeln() - else: - self.colorizer.write(short_result, color) - - def addSuccess(self, test): - super(EC2ApiTestResult, self).addSuccess(test) - self._addResult(test, 'OK', 'green', '.', True) - - def addFailure(self, test, err): - if test.id() == 'process-returncode': - return - super(EC2ApiTestResult, self).addFailure(test, err) - self._addResult(test, 'FAIL', 'red', 'F', False) - - def addError(self, test, err): - super(EC2ApiTestResult, self).addFailure(test, err) - self._addResult(test, 'ERROR', 'red', 'E', False) - - def addSkip(self, test, reason=None, details=None): - super(EC2ApiTestResult, self).addSkip(test, reason, details) - self._addResult(test, 'SKIP', 'blue', 'S', True) - - def startTest(self, test): - self.start_time = self._now() - super(EC2ApiTestResult, self).startTest(test) - - def writeTestCase(self, cls): - if not self.results.get(cls): - return - if cls != self.last_written: - self.colorizer.write(cls, 'white') - self.stream.writeln() - for result in self.results[cls]: - self._writeResult(*result) - del self.results[cls] - self.stream.flush() - self.last_written = cls - - def writeTests(self): - time = self.last_time.get(self.last_written, self._now()) - if not self.last_written or (self._now() - time).total_seconds() > 2.0: - diff = 3.0 - while diff > 2.0: - classes = self.results.keys() - oldest = min(classes, key=lambda x: self.last_time[x]) - diff = (self._now() - self.last_time[oldest]).total_seconds() - self.writeTestCase(oldest) - else: - self.writeTestCase(self.last_written) - - def done(self): - self.stopTestRun() - - def stopTestRun(self): - for cls in list(self.results.iterkeys()): - self.writeTestCase(cls) - self.stream.writeln() - self.writeSlowTests() - - def writeSlowTests(self): - # Pare out 'fast' tests - slow_tests = [item for item in self.slow_tests - if get_elapsed_time_color(item[0]) != 'green'] - if slow_tests: - slow_total_time = sum(item[0] for item in slow_tests) - slow = ("Slowest %i tests took %.2f secs:" - % (len(slow_tests), slow_total_time)) - self.colorizer.write(slow, 'yellow') - self.stream.writeln() - last_cls = None - # sort by name - for elapsed, cls, name in sorted(slow_tests, - key=lambda x: x[1] + x[2]): - if cls != last_cls: - self.colorizer.write(cls, 'white') - self.stream.writeln() - last_cls = cls - self.stream.write(' %s' % str(name).ljust(68)) - self._writeElapsedTime(elapsed) - self.stream.writeln() - - def printErrors(self): - if self.showAll: - self.stream.writeln() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) - - def printErrorList(self, flavor, errors): - for test, err in errors: - self.colorizer.write("=" * 70, 'red') - self.stream.writeln() - self.colorizer.write(flavor, 'red') - self.stream.writeln(": %s" % test.id()) - self.colorizer.write("-" * 70, 'red') - self.stream.writeln() - self.stream.writeln("%s" % err) - - -test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) - -runner = unittest.TextTestRunner(verbosity=2, resultclass=EC2ApiTestResult) - -if runner.run(test).wasSuccessful(): - exit_code = 0 -else: - exit_code = 1 -sys.exit(exit_code) diff --git a/tools/db/ec2api-db-setup b/tools/db/ec2api-db-setup deleted file mode 100755 index 22291cbb..00000000 --- a/tools/db/ec2api-db-setup +++ /dev/null @@ -1,318 +0,0 @@ -#!/bin/bash -e -# -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# -# Print --help output and exit. -# -usage() { - -cat << EOF -Set up a local MySQL database for use with ec2api. -This script will create a 'ec2api' database that is accessible -only on localhost by user 'ec2api' with password 'ec2api'. - -Usage: ec2api-db-setup [options] -Options: - select a distro type (rpm or debian) - - --help | -h - Print usage information. - --password | -p - Specify the password for the 'ec2api' MySQL user that will - use to connect to the 'ec2api' MySQL database. By default, - the password 'ec2api' will be used. - --rootpw | -r - Specify the root MySQL password. If the script installs - the MySQL server, it will set the root password to this value - instead of prompting for a password. If the MySQL server is - already installed, this password will be used to connect to the - database instead of having to prompt for it. - --yes | -y - In cases where the script would normally ask for confirmation - before doing something, such as installing mysql-server, - just assume yes. This is useful if you want to run the script - non-interactively. -EOF - - exit 0 -} - -install_mysql_server() { - if [ -z "${ASSUME_YES}" ] ; then - $PACKAGE_INSTALL mysql-server - else - $PACKAGE_INSTALL -y mysql-server - fi -} - -start_mysql_server() { - $SERVICE_START -} - -check_mysql_credentials() { - echo "SELECT 1;" | mysql --protocol=TCP -u root --password=${MYSQL_ROOT_PW} > /dev/null - echo $? -} - -MYSQL_EC2API_PW_DEFAULT="ec2api" -MYSQL_EC2API_PW=${MYSQL_EC2API_PW_DEFAULT} -EC2API_CONFIG="/etc/ec2api/ec2api.conf" -ASSUME_YES="" -ELEVATE="" - -# Check for root privileges -if [[ $EUID -ne 0 ]] ; then - echo "This operation requires superuser privileges, using sudo:" - if sudo -l > /dev/null ; then - ELEVATE="sudo" - else - exit 1 - fi -fi - -case "$1" in - rpm) - echo "Installing on an RPM system." - PACKAGE_INSTALL="$ELEVATE yum install" - PACKAGE_STATUS="rpm -q" - SERVICE_MYSQLD="mysqld" - SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" - SERVICE_STATUS="service $SERVICE_MYSQLD status" - SERVICE_ENABLE="$ELEVATE chkconfig" - ;; - deb) - echo "Installing on a Debian system." - PACKAGE_INSTALL="$ELEVATE apt-get install" - PACKAGE_STATUS="dpkg-query -S" - SERVICE_MYSQLD="mysql" - SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" - SERVICE_STATUS="$ELEVATE service $SERVICE_MYSQLD status" - SERVICE_ENABLE="" - ;; - *) - usage - ;; -esac - -while [ $# -gt 0 ] -do - case "$1" in - -h|--help) - usage - ;; - -p|--password) - shift - MYSQL_EC2API_PW=${1} - ;; - -r|--rootpw) - shift - MYSQL_ROOT_PW=${1} - ;; - -y|--yes) - ASSUME_YES="yes" - ;; - *) - # ignore - ;; - esac - shift -done - - -# Make sure MySQL is installed. - -NEW_MYSQL_INSTALL=0 -if ! $PACKAGE_STATUS mysql-server && ! $PACKAGE_STATUS mariadb-server && ! $PACKAGE_STATUS mariadb-galera-server > /dev/null -then - if [ -z "${ASSUME_YES}" ] ; then - printf "mysql-server is not installed. Would you like to install it now? (y/n): " - read response - case "$response" in - y|Y) - ;; - n|N) - echo "mysql-server must be installed. Please install it before proceeding." - exit 0 - ;; - *) - echo "Invalid response." - exit 1 - esac - fi - - NEW_MYSQL_INSTALL=1 - install_mysql_server -fi - - -# Make sure mysqld is running. - -if ! $SERVICE_STATUS > /dev/null -then - if [ -z "${ASSUME_YES}" ] ; then - printf "$SERVICE_MYSQLD is not running. Would you like to start it now? (y/n): " - read response - case "$response" in - y|Y) - ;; - n|N) - echo "$SERVICE_MYSQLD must be running. Please start it before proceeding." - exit 0 - ;; - *) - echo "Invalid response." - exit 1 - esac - fi - - start_mysql_server - - # If we both installed and started, ensure it starts at boot - [ $NEW_MYSQL_INSTALL -eq 1 ] && $SERVICE_ENABLE $SERVICE_MYSQLD on -fi - - -# Get MySQL root access. - -if [ $NEW_MYSQL_INSTALL -eq 1 ] -then - if [ ! "${MYSQL_ROOT_PW+defined}" ] ; then - echo "Since this is a fresh installation of MySQL, please set a password for the 'root' mysql user." - - PW_MATCH=0 - while [ $PW_MATCH -eq 0 ] - do - printf "Enter new password for 'root' mysql user: " - read -s MYSQL_ROOT_PW - echo - printf "Enter new password again: " - read -s PW2 - echo - if [ "${MYSQL_ROOT_PW}" = "${PW2}" ] ; then - PW_MATCH=1 - else - echo "Passwords did not match." - fi - done - fi - - echo "UPDATE mysql.user SET password = password('${MYSQL_ROOT_PW}') WHERE user = 'root'; DELETE FROM mysql.user WHERE user = ''; flush privileges;" | mysql --protocol=TCP -u root - if ! [ $? -eq 0 ] ; then - echo "Failed to set password for 'root' MySQL user." - exit 1 - fi -elif [ ! "${MYSQL_ROOT_PW+defined}" ] ; then - PW_OK=0 - while ! [ $PW_OK -eq 1 ]; - do - printf "Please enter the password for the 'root' MySQL user: " - read -s MYSQL_ROOT_PW - echo - if [ $(check_mysql_credentials) -eq 0 ]; then - PW_OK=1 - fi - done -fi - - -# Sanity check MySQL credentials. - -MYSQL_ROOT_PW_ARG="" -if [ "${MYSQL_ROOT_PW+defined}" ] -then - MYSQL_ROOT_PW_ARG="--password=${MYSQL_ROOT_PW}" -fi -if ! [ $(check_mysql_credentials) -eq 0 ] -then - echo "Failed to connect to the MySQL server. Please check your root user credentials." - exit 1 -fi -echo "Verified connectivity to MySQL." - - -# Now create the db. - -echo "Creating 'ec2api' database." -MYSQL_VERSION=`echo "select version();" | mysql -u root ${MYSQL_ROOT_PW_ARG} | grep -oP -m1 "^\d+(?=\..*)"` -if [[ $MYSQL_VERSION -ge 8 ]]; then -cat << EOF | mysql --protocol=TCP -u root ${MYSQL_ROOT_PW_ARG} -DROP DATABASE IF EXISTS ec2api; -CREATE DATABASE IF NOT EXISTS ec2api DEFAULT CHARACTER SET utf8; -CREATE USER 'ec2api'@'%' IDENTIFIED BY '${MYSQL_EC2API_PW}'; -GRANT ALL ON ec2api.* TO 'ec2api'@'localhost'; -GRANT ALL ON ec2api.* TO 'ec2api'@'%'; -flush privileges; -EOF -else -cat << EOF | mysql --protocol=TCP -u root ${MYSQL_ROOT_PW_ARG} -DROP DATABASE IF EXISTS ec2api; -CREATE DATABASE IF NOT EXISTS ec2api DEFAULT CHARACTER SET utf8; -GRANT ALL ON ec2api.* TO 'ec2api'@'localhost' IDENTIFIED BY '${MYSQL_EC2API_PW}'; -GRANT ALL ON ec2api.* TO 'ec2api'@'%' IDENTIFIED BY '${MYSQL_EC2API_PW}'; -flush privileges; -EOF -fi - - -# Make sure ec2api configuration has the right MySQL password. - -if [ "${MYSQL_EC2API_PW}" != "${MYSQL_EC2API_PW_DEFAULT}" ] ; then - echo "Updating 'ec2api' database password in ${EC2API_CONFIG}" - sed -i -e "s/mysql:\/\/ec2api:\(.*\)@/mysql:\/\/ec2api:${MYSQL_EC2API_PW}@/" ${EC2API_CONFIG} -fi - -# override the logging config in ec2api.conf -log_conf=$(mktemp /tmp/ec2api-logging.XXXXXXXXXX.conf) -cat < $log_conf -[loggers] -keys=root - -[handlers] -keys=consoleHandler - -[formatters] -keys=simpleFormatter - -[logger_root] -level=INFO -handlers=consoleHandler - -[handler_consoleHandler] -class=StreamHandler -formatter=simpleFormatter -args=(sys.stdout,) - -[formatter_simpleFormatter] -format=%(name)s - %(levelname)s - %(message)s -EOF - -pip install mysqlclient - -ec2-api-manage --log-config=$log_conf db_sync -rm $log_conf - -# Do a final sanity check on the database. -echo "Run final sanity check." - -echo "SELECT * FROM migrate_version;" | mysql --protocol=TCP -u ec2api --password=${MYSQL_EC2API_PW} ec2api > /dev/null -if ! [ $? -eq 0 ] -then - echo "Final sanity check failed." - exit 1 -fi - -echo "Complete!" diff --git a/tools/db/import-nova-ec2-data.sql b/tools/db/import-nova-ec2-data.sql deleted file mode 100644 index e4517f03..00000000 --- a/tools/db/import-nova-ec2-data.sql +++ /dev/null @@ -1,33 +0,0 @@ -truncate table ec2api.items; -truncate table ec2api.tags; - -insert into ec2api.items (project_id, id, os_id, data) -select - i.owner, - concat(if(i.container_format in ("ari","aki"), i.container_format, "ami"), - "-", lpad(hex(m.id), 8, "0")), - m.uuid, - concat("{'is_public': ", if(i.is_public=1, "True", "False"), "}") -from nova.s3_images m join glance.images i on i.id=m.uuid and i.deleted=0; - -insert into ec2api.items (project_id, id, os_id, data) -select v.project_id, concat("vol-", lpad(hex(m.id), 8, "0")), m.uuid, "{}" -from nova.volume_id_mappings m join cinder.volumes v on v.id=m.uuid and v.deleted=0; - -insert into ec2api.items (project_id, id, os_id, data) -select s.project_id, concat("snap-", lpad(hex(m.id), 8, "0")), m.uuid, "{}" -from nova.snapshot_id_mappings m join cinder.snapshots s on s.id=m.uuid and s.deleted=0; - -insert into ec2api.items (project_id, id, os_id, data) -select i.project_id, concat("i-", lpad(hex(m.id), 8, "0")), m.uuid, - concat("{'reservation_id': '", i.reservation_id, "', 'launch_index': ", i.launch_index, - ifnull(concat(", 'client_token': '", ism.value, "'}"), "}")) -from nova.instance_id_mappings m join nova.instances i on i.uuid=m.uuid and i.deleted=0 - left outer join nova.instance_system_metadata ism - on ism.instance_uuid=i.uuid and ism.key="EC2_client_token" and ism.deleted=0; - -insert into ec2api.tags (project_id, item_id, `key`, value) -select i.project_id, concat("i-", lpad(hex(m.id), 8, "0")), im.key, im.value -from nova.instance_id_mappings m join nova.instances i on i.uuid=m.uuid and i.deleted=0 - join nova.instance_metadata im on im.instance_uuid=i.uuid and im.deleted=0; - diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py deleted file mode 100755 index eefe564f..00000000 --- a/tools/db/schema_diff.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility for diff'ing two versions of the DB schema. - -Each release cycle the plan is to compact all of the migrations from that -release into a single file. This is a manual and, unfortunately, error-prone -process. To ensure that the schema doesn't change, this tool can be used to -diff the compacted DB schema to the original, uncompacted form. - - -The schema versions are specified by providing a git ref (a branch name or -commit hash) and a SQLAlchemy-Migrate version number: -Run like: - - ./tools/db/schema_diff.py mysql master:latest my_branch:82 -""" -import datetime -import glob -import os -import subprocess -import sys - - -### Dump - - -def dump_db(db_driver, db_name, migration_version, dump_filename): - db_driver.create(db_name) - try: - migrate(db_driver, db_name, migration_version) - db_driver.dump(db_name, dump_filename) - finally: - db_driver.drop(db_name) - - -### Diff - - -def diff_files(filename1, filename2): - pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % locals()] - - # Use colordiff if available - if subprocess.call(['which', 'colordiff']) == 0: - pipeline.append('colordiff') - - pipeline.append('less -R') - - cmd = ' | '.join(pipeline) - subprocess.check_call(cmd, shell=True) - - -### Database - - -class MySQL(object): - def create(self, name): - subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name]) - - def drop(self, name): - subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name]) - - def dump(self, name, dump_filename): - subprocess.check_call( - 'mysqldump -u root %(name)s > %(dump_filename)s' % locals(), - shell=True) - - def url(self, name): - return 'mysql://root@localhost/%s' % name - - -class Postgres(object): - def create(self, name): - subprocess.check_call(['createdb', name]) - - def drop(self, name): - subprocess.check_call(['dropdb', name]) - - def dump(self, name, dump_filename): - subprocess.check_call( - 'pg_dump %(name)s > %(dump_filename)s' % locals(), - shell=True) - - def url(self, name): - return 'postgres://localhost/%s' % name - - -def _get_db_driver_class(db_type): - if db_type == "mysql": - return MySQL - elif db_type == "postgres": - return Postgres - else: - raise Exception(_("database %s not supported") % db_type) - - -### Migrate - - -MIGRATE_REPO = os.path.join(os.getcwd(), "ec2api/db/sqlalchemy/migrate_repo") - - -def migrate(db_driver, db_name, migration_version): - earliest_version = _migrate_get_earliest_version() - - # NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of - # migration numbers. - _migrate_cmd( - db_driver, db_name, 'version_control', str(earliest_version - 1)) - - upgrade_cmd = ['upgrade'] - if migration_version != 'latest': - upgrade_cmd.append(str(migration_version)) - - _migrate_cmd(db_driver, db_name, *upgrade_cmd) - - -def _migrate_cmd(db_driver, db_name, *cmd): - manage_py = os.path.join(MIGRATE_REPO, 'manage.py') - - args = ['python', manage_py] - args += cmd - args += ['--repository=%s' % MIGRATE_REPO, - '--url=%s' % db_driver.url(db_name)] - - subprocess.check_call(args) - - -def _migrate_get_earliest_version(): - versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py') - - versions = [] - for path in glob.iglob(versions_glob): - filename = os.path.basename(path) - prefix = filename.split('_', 1)[0] - try: - version = int(prefix) - except ValueError: - pass - versions.append(version) - - versions.sort() - return versions[0] - - -### Git - - -def git_current_branch_name(): - ref_name = git_symbolic_ref('HEAD', quiet=True) - current_branch_name = ref_name.replace('refs/heads/', '') - return current_branch_name - - -def git_symbolic_ref(ref, quiet=False): - args = ['git', 'symbolic-ref', ref] - if quiet: - args.append('-q') - proc = subprocess.Popen(args, stdout=subprocess.PIPE) - stdout, stderr = proc.communicate() - return stdout.strip() - - -def git_checkout(branch_name): - subprocess.check_call(['git', 'checkout', branch_name]) - - -def git_has_uncommited_changes(): - return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1 - - -### Command - - -def die(msg): - print >> sys.stderr, "ERROR: %s" % msg - sys.exit(1) - - -def usage(msg=None): - if msg: - print >> sys.stderr, "ERROR: %s" % msg - - prog = "schema_diff.py" - args = ["", "", - ""] - - print >> sys.stderr, "usage: %s %s" % (prog, ' '.join(args)) - sys.exit(1) - - -def parse_options(): - try: - db_type = sys.argv[1] - except IndexError: - usage("must specify DB type") - - try: - orig_branch, orig_version = sys.argv[2].split(':') - except IndexError: - usage('original branch and version required (e.g. master:82)') - - try: - new_branch, new_version = sys.argv[3].split(':') - except IndexError: - usage('new branch and version required (e.g. master:82)') - - return db_type, orig_branch, orig_version, new_branch, new_version - - -def main(): - timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S") - - ORIG_DB = 'orig_db_%s' % timestamp - NEW_DB = 'new_db_%s' % timestamp - - ORIG_DUMP = ORIG_DB + ".dump" - NEW_DUMP = NEW_DB + ".dump" - - options = parse_options() - db_type, orig_branch, orig_version, new_branch, new_version = options - - # Since we're going to be switching branches, ensure user doesn't have any - # uncommited changes - if git_has_uncommited_changes(): - die("You have uncommited changes. Please commit them before running " - "this command.") - - db_driver = _get_db_driver_class(db_type)() - - users_branch = git_current_branch_name() - git_checkout(orig_branch) - - try: - # Dump Original Schema - dump_db(db_driver, ORIG_DB, orig_version, ORIG_DUMP) - - # Dump New Schema - git_checkout(new_branch) - dump_db(db_driver, NEW_DB, new_version, NEW_DUMP) - - diff_files(ORIG_DUMP, NEW_DUMP) - finally: - git_checkout(users_branch) - - if os.path.exists(ORIG_DUMP): - os.unlink(ORIG_DUMP) - - if os.path.exists(NEW_DUMP): - os.unlink(NEW_DUMP) - - -if __name__ == "__main__": - main() diff --git a/tools/update-from-global-requirements.sh b/tools/update-from-global-requirements.sh deleted file mode 100755 index ff9ab1ae..00000000 --- a/tools/update-from-global-requirements.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -GLOBAL_REQS_PATH=${1:-$HOME/openstack/requirements/global-requirements.txt} - -function update() { - rm -f temp-requirements.txt - touch temp-requirements.txt - while read line ; do - local module=`echo $line | sed 's/\([.A-Za-z0-9\-]*\)[ >/dev/null ; then - echo "$newm" >> temp-requirements.txt - fi - done < $1 - mv temp-requirements.txt $1 -} - -echo "Update requirements" -update requirements.txt -echo "Update test-requirements" -update test-requirements.txt diff --git a/tox.ini b/tox.ini deleted file mode 100644 index ee486901..00000000 --- a/tox.ini +++ /dev/null @@ -1,65 +0,0 @@ -[tox] -minversion = 3.18.0 -envlist = pep8,py3,docs,api-ref -skipsdist = True -ignore_basepython_conflict = True - -[testenv] -usedevelop = True -setenv = - PYTHONDONTWRITEBYTECODE=1 -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - stestr run {posargs} - stestr slowest - -[testenv:pep8] -commands = - flake8 {posargs} - -[testenv:genconfig] -commands = oslo-config-generator --config-file=etc/ec2api/ec2api-config-generator.conf - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -# This environment is called from CI scripts to test and publish -# the API Ref and Docs to docs.openstack.org. -allowlist_externals = rm -deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt -commands = - rm -rf doc/build - sphinx-build -W --keep-going -b html -d doc/build/doctrees doc/source doc/build/html - -[testenv:api-ref] -# This environment is called from CI scripts to test and publish -# the API Ref to docs.openstack.org. -allowlist_externals = rm -deps = {[testenv:docs]deps} -commands = - rm -rf api-ref/build - sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html - -[flake8] -# E712 is ignored on purpose, since it is normal to use 'column == true' -# in sqlalchemy. -# TODO Hacking 0.6 checks to fix -# H102 Apache 2.0 license header not found -# W503 line break before binary operator -# W504 line break after binary operator -ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H102,H303,H404,F403,F811,F841,W503,W504 -# H106: Don't put vim configuration in source files -# H203: Use assertIs(Not)None to check for None -enable-extensions=H106,H203 -exclude = .venv,.git,.tox,dist,envname,*lib/python*,*egg,build,tools -max-complexity=25 - -[flake8:local-plugins] -extension = - N537 = checks:no_translate_logs -paths = ./ec2api/hacking