Retire ec2-api: remove repo content
ec2-api project is retiring - https://review.opendev.org/c/openstack/governance/+/919394/1 this commit remove the content of this project repo Depends-On: https://review.opendev.org/c/openstack/project-config/+/919396/1 Change-Id: I671d27260e11ec0ae3488acf561bbdaa73a29a60
This commit is contained in:
parent
32740781f6
commit
d61398b5d9
24
.gitignore
vendored
24
.gitignore
vendored
@ -1,24 +0,0 @@
|
||||
*.pyc
|
||||
*~
|
||||
etc/ec2api/ec2api.conf.sample
|
||||
.project
|
||||
.pydevproject
|
||||
ec2_api.egg-info
|
||||
.tox
|
||||
.stestr
|
||||
*.log
|
||||
*.egg
|
||||
*.swp
|
||||
*.swo
|
||||
build
|
||||
dist
|
||||
.testrepository
|
||||
/functional_tests.conf*
|
||||
/buckets
|
||||
.venv
|
||||
.coverage*
|
||||
!.coveragerc
|
||||
cover/
|
||||
.idea
|
||||
ec2api/tests/unit/test_cert.pem
|
||||
.DS_Store
|
@ -1,3 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_path=./ec2api/tests/unit
|
||||
top_dir=./
|
12
.zuul.yaml
12
.zuul.yaml
@ -1,12 +0,0 @@
|
||||
- project:
|
||||
queue: ec2-api
|
||||
templates:
|
||||
- check-requirements
|
||||
- openstack-python3-jobs
|
||||
- publish-openstack-docs-pti
|
||||
check:
|
||||
jobs:
|
||||
- ec2api-tempest-plugin-functional
|
||||
gate:
|
||||
jobs:
|
||||
- ec2api-tempest-plugin-functional
|
@ -1,19 +0,0 @@
|
||||
The source repository for this project can be found at:
|
||||
|
||||
https://opendev.org/openstack/ec2-api
|
||||
|
||||
Pull requests submitted through GitHub are not monitored.
|
||||
|
||||
To start contributing to OpenStack, follow the steps in the contribution guide
|
||||
to set up and use Gerrit:
|
||||
|
||||
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
|
||||
|
||||
Bugs should be filed on Launchpad:
|
||||
|
||||
https://bugs.launchpad.net/ec2-api
|
||||
|
||||
For more specific information about contributing to this repository, see the
|
||||
ec2-api contributor guide:
|
||||
|
||||
https://docs.openstack.org/ec2-api/latest/contributor/contributing.html
|
43
HACKING.rst
43
HACKING.rst
@ -1,43 +0,0 @@
|
||||
Ec2api Style Commandments
|
||||
=========================
|
||||
|
||||
- Step 1: Read the OpenStack Style Commandments
|
||||
https://github.com/openstack-dev/hacking/blob/master/doc/source/index.rst
|
||||
- Step 2: Read on
|
||||
|
||||
Ec2api Specific Commandments
|
||||
----------------------------
|
||||
|
||||
General
|
||||
-------
|
||||
- Do not use locals(). Example::
|
||||
|
||||
LOG.debug("volume %(vol_name)s: creating size %(vol_size)sG" %
|
||||
locals()) # BAD
|
||||
|
||||
LOG.debug("volume %(vol_name)s: creating size %(vol_size)sG" %
|
||||
{'vol_name': vol_name,
|
||||
'vol_size': vol_size}) # OKAY
|
||||
|
||||
- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
|
||||
|
||||
except Exception as e:
|
||||
...
|
||||
raise e # BAD
|
||||
|
||||
except Exception:
|
||||
...
|
||||
raise # OKAY
|
||||
|
||||
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
For every new feature, unit tests should be created that both test and
|
||||
(implicitly) document the usage of said feature. If submitting a patch for a
|
||||
bug that had no unit test, a new passing unit test should be added. If a
|
||||
submitted bug fix does have a unit test, be sure to add a new one that fails
|
||||
without the patch and passes with the patch.
|
||||
|
||||
For more information on creating unit tests and utilizing the testing
|
||||
infrastructure in OpenStack Ec2api, please read ec2api/testing/README.rst.
|
176
LICENSE
176
LICENSE
@ -1,176 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
706
README.rst
706
README.rst
@ -1,700 +1,10 @@
|
||||
=================
|
||||
OpenStack EC2 API
|
||||
=================
|
||||
This project is no longer maintained.
|
||||
|
||||
.. image:: https://governance.openstack.org/tc/badges/ec2-api.svg
|
||||
:target: https://governance.openstack.org/tc/reference/tags/index.html
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
.. Change things from this point on
|
||||
|
||||
Support of EC2 API for OpenStack.
|
||||
This project provides a standalone EC2 API service which pursues two goals:
|
||||
|
||||
1. Implement VPC API
|
||||
2. Create a standalone service for EC2 API support.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
For more detailed information, please see the `Installation Guide <https://docs.openstack.org/ec2-api/latest/install/index.html>`_.
|
||||
|
||||
Installation by install.sh
|
||||
==========================
|
||||
|
||||
Run install.sh
|
||||
|
||||
The EC2 API service gets installed on port 8788 by default. It can be changed
|
||||
before the installation in install.sh script.
|
||||
|
||||
The services afterwards can be started as binaries:
|
||||
|
||||
::
|
||||
|
||||
/usr/local/bin/ec2-api
|
||||
/usr/local/bin/ec2-api-metadata
|
||||
/usr/local/bin/ec2-api-s3
|
||||
|
||||
or set up as Linux services.
|
||||
|
||||
Configuring OpenStack for EC2 API metadata service refering to section "EC2 metadata Configuration".
|
||||
|
||||
Installation on devstack
|
||||
========================
|
||||
|
||||
Installation in devstack:
|
||||
|
||||
In order to install ec2-api with devstack the following should be added to
|
||||
the local.conf or localrc the following line:
|
||||
|
||||
::
|
||||
|
||||
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
|
||||
|
||||
Devstack installation with ec2-api and ec2api-tempest-plugin for development:
|
||||
|
||||
1. install packages: awscli, git, python3, python3-devel, ruby
|
||||
2. clone devstack repository
|
||||
|
||||
::
|
||||
|
||||
git clone https://opendev.org/openstack/devstack
|
||||
|
||||
3. grant all permissions for your user for directory: "/opt"
|
||||
4. create folder "/opt/stack/logs/"
|
||||
5. clone repository "ec2api-tempest-plugin" to stack folder:
|
||||
|
||||
::
|
||||
|
||||
git clone https://github.com/openstack/ec2api-tempest-plugin /opt/stack/ec2api-tempest-plugin
|
||||
|
||||
6. create local.conf:
|
||||
|
||||
::
|
||||
|
||||
[[local|localrc]]
|
||||
ADMIN_PASSWORD=secret
|
||||
DATABASE_PASSWORD=$ADMIN_PASSWORD
|
||||
RABBIT_PASSWORD=$ADMIN_PASSWORD
|
||||
SERVICE_PASSWORD=$ADMIN_PASSWORD
|
||||
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
|
||||
enable_plugin neutron-tempest-plugin https://github.com/openstack/neutron-tempest-plugin
|
||||
TEMPEST_PLUGINS='/opt/stack/ec2api-tempest-plugin'
|
||||
|
||||
7. go to devstack folder and start installation
|
||||
|
||||
::
|
||||
|
||||
cd ~/devstack/
|
||||
./stack.sh
|
||||
|
||||
8. check installed devstack
|
||||
|
||||
::
|
||||
|
||||
source ~/devstack/accrc/admin/admin
|
||||
tempest list-plugins
|
||||
ps -aux | grep "ec2"
|
||||
aws --endpoint-url http://<IP-ADDRESS> --region <REGION> --profile admin ec2 describe-images
|
||||
openstack catalog list
|
||||
openstack flavor list
|
||||
openstack image list
|
||||
sudo journalctl -u devstack@ec2-api.service
|
||||
|
||||
9. run integration tests (ec2 tempest test)
|
||||
|
||||
::
|
||||
|
||||
cd /opt/stack/tempest
|
||||
tox -eall -- ec2api_tempest_plugin --concurrency 1
|
||||
tox -eall ec2api_tempest_plugin.api.test_network_interfaces.NetworkInterfaceTest.test_create_max_network_interface
|
||||
|
||||
10. run ec2-api unit tests
|
||||
|
||||
::
|
||||
|
||||
cd /opt/stack/ec2-api
|
||||
tox -epy36 ec2api.tests.unit.test_security_group.SecurityGroupTestCase.test_describe_security_groups_no_default_vpc
|
||||
|
||||
Configuring OpenStack for EC2 API metadata service refering to section "EC2 metadata Configuration".
|
||||
|
||||
EC2 metadata Configuration
|
||||
==========================
|
||||
|
||||
To configure OpenStack for EC2 API metadata service:
|
||||
|
||||
for Nova-network
|
||||
add::
|
||||
|
||||
[DEFAULT]
|
||||
metadata_port = 8789
|
||||
[neutron]
|
||||
service_metadata_proxy = True
|
||||
|
||||
to /etc/nova.conf
|
||||
|
||||
then restart nova-metadata (can be run as part of nova-api service) and
|
||||
nova-network services.
|
||||
|
||||
for Neutron
|
||||
add::
|
||||
|
||||
[DEFAULT]
|
||||
nova_metadata_port = 8789
|
||||
|
||||
to /etc/neutron/metadata_agent.ini for legacy neutron or
|
||||
to neutron_ovn_metadata_agent.ini for OVN
|
||||
|
||||
then restart neutron-metadata service.
|
||||
|
||||
S3 server is intended only to support EC2 operations which require S3 server
|
||||
(e.g. CreateImage) in OpenStack deployments without regular object storage.
|
||||
It must not be used as a substitution for all-purposes object storage server.
|
||||
Do not start it if the deployment has its own object storage or uses a public
|
||||
one (e.g. AWS S3).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Download aws cli from Amazon.
|
||||
Create configuration file for aws cli in your home directory ~/.aws/config:
|
||||
|
||||
::
|
||||
|
||||
[default]
|
||||
aws_access_key_id = 1b013f18d5ed47ae8ed0fbb8debc036b
|
||||
aws_secret_access_key = 9bbc6f270ffd4dfdbe0e896947f41df3
|
||||
region = us-east-1
|
||||
|
||||
Change the aws_access_key_id and aws_secret_acces_key above to the values
|
||||
appropriate for your cloud (can be obtained by "openstack ec2 credentials list"
|
||||
command).
|
||||
|
||||
Run aws cli commands using new EC2 API endpoint URL (can be obtained from
|
||||
openstack cli with the new port 8788) like this:
|
||||
|
||||
aws --endpoint-url http://10.0.2.15:8788 ec2 describe-instances
|
||||
|
||||
|
||||
Supported Features and Limitations
|
||||
----------------------------------
|
||||
|
||||
General:
|
||||
* DryRun option is not supported.
|
||||
* Some exceptions are not exactly the same as reported by AWS.
|
||||
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| AWS | Command | Functionality | Limitations |
|
||||
| Component| | group | |
|
||||
+==========+==========================================+=================+========================================+
|
||||
| | **bold** - supported, normal - supported | | |
|
||||
| | with limitations, *italic* -not supported| | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *AcceptVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **AllocateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AllocateHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssignIpv6Addresses* | network | not supported |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | AssignPrivateIpAddresses | network | allowReassignment parameter |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **AssociateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AssociateDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AssociateRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssociateSubnetCidrBlock* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssociateVpcCidrBlock* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *AttachClassicLinkVpc* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachInternetGateway** | internet | |
|
||||
| | | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, EBS | **AttachVolume** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | AuthorizeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | AuthorizeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *BundleInstance* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelBundleTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelConversionTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelExportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelImportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelReservedInstancesListing* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelSpotFleetRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelSpotInstanceRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ConfirmProductInstance* | product codes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *CopyImage* | image | not supported |
|
||||
| | | provisioning | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *CopySnapshot* | snapshots,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateCustomerGateway | VPC gateways | BGPdynamicrouting |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateEgressOnlyInternetGateway* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | CreateImage | images | blockDeviceMapping parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateInstanceExportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateInternetGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **CreateKeyPair** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateNatGateway* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateNetworkAcl* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreatePlacementGroup* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateReservedInstancesListing* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateRoute | routes | vpcPeeringConnection parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **CreateSecurityGroup** | security groups | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **CreateSnapshot** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateSubnet | subnets | availabilityZone parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **CreateTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | CreateVolume | volumes | iops, encrypted, kmsKeyId parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateVpc** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateVpcEndpoint* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateVpnConnection | VPN | BGP dynamic routing |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateVpnConnectionRoute** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateVpnGateway | VPN | BGP dynamic routing |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteCustomerGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteEgressOnlyInternetGateway* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteInternetGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DeleteKeyPair** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteNatGateway* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteNetworkAcl* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | *DeletePlacementGroup* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteRoute** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DeleteSecurityGroup** | security groups | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeleteSnapshot** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteSubnet** | subnets | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DeleteTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeleteVolume** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpc** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteVpcEndpoints* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnConnection** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnConnectionRoute** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeregisterImage** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeAccountAttributes | infrastructural | vpc-max-security-groups-per-interface, |
|
||||
| | | | max-elastic-ips, |
|
||||
| | | | vpc-max-elastic-ips attributes |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DescribeAddresses** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeAvailabilityZones** | availability | |
|
||||
| | | zones | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeBundleTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeClassicLinkInstances* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeConversionTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeCustomerGateways** | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeEgressOnlyInternetGateways* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeExportTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeIdentityIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | DescribeImageAttribute | images | productCodes, sriovNetSupport |
|
||||
| | | | attributes |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeImages** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeImportImageTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeImportSnapshotTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeInstanceAttribute | instances | same limitations as for |
|
||||
| | | | ModifyInstanceAttribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, | **DescribeInstances** | instances | |
|
||||
| EBS, VPC | | | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeInstanceStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeInternetGateways** | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeKeyPairs** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeMovingAddresses* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeNatGateways* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeNetworkAcls* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeNetworkInterfaces** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | *DescribePlacementGroups* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribePrefixLists* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeRegions | availability | RegionNameparameter |
|
||||
| | | zones | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesListings* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesModifications* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesOfferings* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeRouteTables** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeScheduledInstanceAvailability* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSecurityGroupReferences* | security groups | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | DescribeSecurityGroups | security groups | cidr, protocol, port, sourceGroup |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *DescribeSnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeSnapshots** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetRequestHistory* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotInstanceRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotPriceHistory* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeStaleSecurityGroups* | security groups | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeSubnets** | subnets | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *DescribeVolumeAttribute* | volumes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeVolumes** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeVolumeStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcAttribute* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcEndpoints* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcEndpointServices* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcPeeringConnections* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpcs** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpnConnections** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpnGateways** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DetachClassicLinkVpc* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachInternetGateway** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, EBS | DetachVolume | volumes | instance_id, device, force parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DisableVgwRoutePropagation** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DisableVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DisableVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DisassociateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DisassociateRouteTable** | routes | |
|
||||
| | *DisassociateSubnetCidrBlock* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DisassociateVpcCidrBlock* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **EnableVgwRoutePropagation** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *EnableVolumeIO* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *EnableVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *EnableVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **GetConsoleOutput** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *GetConsoleScreenshot* | instances | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **GetPasswordData** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportImage* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportInstance* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **ImportKeyPair** | keypairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportSnapshot* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportVolume* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyIdentityIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | ModifyImageAttribute | images | productCodes attribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | ModifyInstanceAttribute | instances | only disableApiTermination, |
|
||||
| | | | sourceDestCheck,instanceType supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyInstancePlacement* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ModifyNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyReservedInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ModifySnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifySpotFleetRequest* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifySubnetAttribute* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ModifyVolumeAttribute* | volumes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifyVpcAttribute* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifyVpcEndpoint* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyVpcPeeringConnectionOptions* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *MonitorInstances* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *MoveAddressToVpc* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *PurchaseReservedInstancesOffering* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *PurchaseScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **RebootInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | RegisterImage | images | virtualizationType, sriovNetSupport |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *RejectVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **ReleaseAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ReleaseHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ReplaceNetworkAclAssociation* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ReplaceNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ReplaceRoute** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ReplaceRouteTableAssociation** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ReportInstanceStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RequestSpotFleet* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RequestSpotInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **ResetImageAttribute** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | ResetInstanceAttribute | instances | same limitations as for |
|
||||
| | | | ModifyInstanceAttribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ResetNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ResetSnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *RestoreAddressToClassic* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | RevokeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | RevokeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, | RunInstances | instances | placement, block_device_mapping partial|
|
||||
| VPC, EBS | | | support, monitoring, |
|
||||
| | | | iamInstanceProfile, ebsOptimized, |
|
||||
| | | | shutdownInitiatedInstanceBehavior |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RunScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **StartInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **StopInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **TerminateInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *UnassignIpv6Addresses* | network | not supported |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **UnassignPrivateIpAddresses** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *UnmonitorInstances* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
Documentation:
|
||||
https://docs.openstack.org/ec2-api/latest/
|
||||
|
||||
Wiki:
|
||||
https://wiki.openstack.org/wiki/EC2API
|
||||
|
||||
Bugs:
|
||||
https://launchpad.net/ec2-api
|
||||
|
||||
Source:
|
||||
https://opendev.org/openstack/ec2-api
|
||||
|
||||
Blueprint:
|
||||
https://blueprints.launchpad.net/nova/+spec/ec2-api
|
||||
|
||||
Spec:
|
||||
https://review.opendev.org/#/c/147882/
|
||||
For any further questions, please email
|
||||
openstack-discuss@lists.openstack.org or join #openstack-dev on
|
||||
OFTC.
|
||||
|
@ -1,221 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# nova documentation build configuration file, created by
|
||||
# sphinx-quickstart on Sat May 1 15:17:47 2010.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to
|
||||
# its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
extensions = [
|
||||
'os_api_ref',
|
||||
'openstackdocstheme'
|
||||
]
|
||||
|
||||
|
||||
html_theme = 'openstackdocs'
|
||||
html_theme_options = {
|
||||
"sidebar_mode": "toc",
|
||||
}
|
||||
# openstackdocstheme options
|
||||
openstackdocs_repo_name = 'openstack/ec2-api'
|
||||
openstackdocs_auto_name = False
|
||||
openstackdocs_bug_project = 'ec2-api'
|
||||
openstackdocs_bug_tag = ''
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('../../'))
|
||||
sys.path.insert(0, os.path.abspath('../'))
|
||||
sys.path.insert(0, os.path.abspath('./'))
|
||||
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#
|
||||
# source_encoding = 'utf-8'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'EC2 API Reference'
|
||||
copyright = u'OpenStack Foundation'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
|
||||
# from ec2-api.version import version_info
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# release = version_info.release_string()
|
||||
# The short X.Y version.
|
||||
# version = version_info.version_string()
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use
|
||||
# for all documents.
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = False
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'native'
|
||||
|
||||
# -- Options for man page output ----------------------------------------------
|
||||
|
||||
# Grouping the document tree for man pages.
|
||||
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
|
||||
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
# html_static_path = ['_static']
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
# html_use_modindex = True
|
||||
|
||||
# If false, no index is generated.
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
# html_file_suffix = ''
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'ec2apidoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output -------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
# latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
# latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'Ec2api.tex', u'OpenStack EC2 API Documentation',
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
# latex_use_parts = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
# latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# latex_use_modindex = True
|
@ -1,13 +0,0 @@
|
||||
:tocdepth: 2
|
||||
|
||||
========
|
||||
EC2 API
|
||||
========
|
||||
|
||||
Provides a standalone EC2 API service.
|
||||
|
||||
Amazon EC2 API Reference can be found `here
|
||||
<http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Welcome.html>`_.
|
||||
|
||||
.. include:: supported_features.inc
|
||||
|
@ -1,503 +0,0 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
Supported features and limitations
|
||||
----------------------------------
|
||||
|
||||
General:
|
||||
* DryRun option is not supported.
|
||||
* Some exceptions are not exactly the same as reported by AWS.
|
||||
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| AWS | Command | Functionality | Limitations |
|
||||
| Component| | group | |
|
||||
+==========+==========================================+=================+========================================+
|
||||
| | **bold** - supported, normal - supported | | |
|
||||
| | with limitations, *italic* -not supported| | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *AcceptVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **AllocateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AllocateHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssignIpv6Addresses* | network | not supported |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | AssignPrivateIpAddresses | network | allowReassignment parameter |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **AssociateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AssociateDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AssociateRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssociateSubnetCidrBlock* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *AssociateVpcCidrBlock* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *AttachClassicLinkVpc* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachInternetGateway** | internet | |
|
||||
| | | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, EBS | **AttachVolume** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **AttachVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | AuthorizeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | AuthorizeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *BundleInstance* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelBundleTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelConversionTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelExportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelImportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelReservedInstancesListing* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelSpotFleetRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CancelSpotInstanceRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ConfirmProductInstance* | product codes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *CopyImage* | image | not supported |
|
||||
| | | provisioning | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *CopySnapshot* | snapshots,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateCustomerGateway | VPC gateways | BGP dynamic routing |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateEgressOnlyInternetGateway* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | CreateImage | images | blockDeviceMapping parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateInstanceExportTask* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateInternetGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **CreateKeyPair** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateNatGateway* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateNetworkAcl* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreatePlacementGroup* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateReservedInstancesListing* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateRoute | routes | vpcPeeringConnection parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **CreateSecurityGroup** | security groups | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **CreateSnapshot** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *CreateSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateSubnet | subnets | availabilityZone parameter |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **CreateTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | CreateVolume | volumes | iops, encrypted, kmsKeyId parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateVpc** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateVpcEndpoint* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *CreateVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateVpnConnection | VPN | BGP dynamic routing |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **CreateVpnConnectionRoute** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | CreateVpnGateway | VPN | BGP dynamic routing |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteCustomerGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteEgressOnlyInternetGateway* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteInternetGateway** | VPC gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DeleteKeyPair** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteNatGateway* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteNetworkAcl* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | *DeletePlacementGroup* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteRoute** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteRouteTable** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DeleteSecurityGroup** | security groups | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeleteSnapshot** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DeleteSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteSubnet** | subnets | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DeleteTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeleteVolume** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpc** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteVpcEndpoints* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DeleteVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnConnection** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnConnectionRoute** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DeleteVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DeregisterImage** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeAccountAttributes | infrastructural | vpc-max-security-groups-per-interface, |
|
||||
| | | | max-elastic-ips, |
|
||||
| | | | vpc-max-elastic-ips attributes |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DescribeAddresses** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeAvailabilityZones** | availability | |
|
||||
| | | zones | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeBundleTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeClassicLinkInstances* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeConversionTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeCustomerGateways** | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeDhcpOptions** | DHCP options | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeEgressOnlyInternetGateways* | VPC gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeExportTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeFlowLogs* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeIdentityIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | DescribeImageAttribute | images | productCodes, sriovNetSupport |
|
||||
| | | | attributes |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeImages** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeImportImageTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeImportSnapshotTasks* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeInstanceAttribute | instances | same limitations as for |
|
||||
| | | | ModifyInstanceAttribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, | **DescribeInstances** | instances | |
|
||||
| EBS, VPC | | | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeInstanceStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeInternetGateways** | gateways | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeKeyPairs** | key pairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeMovingAddresses* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeNatGateways* | NAT gateways | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeNetworkAcls* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeNetworkInterfaces** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | *DescribePlacementGroups* | clusters | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribePrefixLists* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | DescribeRegions | availability | RegionNameparameter |
|
||||
| | | zones | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesListings* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesModifications* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeReservedInstancesOfferings* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeRouteTables** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeScheduledInstanceAvailability* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSecurityGroupReferences* | security groups | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | DescribeSecurityGroups | security groups | cidr, protocol, port, sourceGroup |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *DescribeSnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeSnapshots** | snapshots | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotDatafeedSubscription* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetRequestHistory* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotFleetRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotInstanceRequests* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeSpotPriceHistory* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeStaleSecurityGroups* | security groups | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeSubnets** | subnets | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **DescribeTags** | tags | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *DescribeVolumeAttribute* | volumes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **DescribeVolumes** | volumes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeVolumeStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcAttribute* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DescribeVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcEndpoints* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcEndpointServices* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DescribeVpcPeeringConnections* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpcs** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpnConnections** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DescribeVpnGateways** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DetachClassicLinkVpc* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachInternetGateway** | VPC | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachNetworkInterface** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, EBS | DetachVolume | volumes | instance_id, device, force parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DetachVpnGateway** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DisableVgwRoutePropagation** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *DisableVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DisableVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **DisassociateAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **DisassociateRouteTable** | routes | |
|
||||
| | *DisassociateSubnetCidrBlock* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *DisassociateVpcCidrBlock* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **EnableVgwRoutePropagation** | VPN | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *EnableVolumeIO* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *EnableVpcClassicLink* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *EnableVpcClassicLinkDnsSupport* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **GetConsoleOutput** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *GetConsoleScreenshot* | instances | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **GetPasswordData** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportImage* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportInstance* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **ImportKeyPair** | keypairs | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportSnapshot* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ImportVolume* | tasks,s3 | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyIdentityIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyIdFormat* | resource IDs | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | ModifyImageAttribute | images | productCodes attribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | ModifyInstanceAttribute | instances | only disableApiTermination, |
|
||||
| | | | sourceDestCheck,instanceType supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyInstancePlacement* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ModifyNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyReservedInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ModifySnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifySpotFleetRequest* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifySubnetAttribute* | subnets | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ModifyVolumeAttribute* | volumes | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifyVpcAttribute* | VPC | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ModifyVpcEndpoint* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ModifyVpcPeeringConnectionOptions* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *MonitorInstances* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *MoveAddressToVpc* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *PurchaseReservedInstancesOffering* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *PurchaseScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **RebootInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | RegisterImage | images | virtualizationType, sriovNetSupport |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *RejectVpcPeeringConnection* | cross-VPC | not supported |
|
||||
| | | connectivity | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | **ReleaseAddress** | addresses | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ReleaseHosts* | dedicated hosts | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ReplaceNetworkAclAssociation* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *ReplaceNetworkAclEntry* | ACL | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ReplaceRoute** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ReplaceRouteTableAssociation** | routes | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *ReportInstanceStatus* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RequestSpotFleet* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RequestSpotInstances* | market | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | **ResetImageAttribute** | images | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | ResetInstanceAttribute | instances | same limitations as for |
|
||||
| | | | ModifyInstanceAttribute |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **ResetNetworkInterfaceAttribute** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EBS | *ResetSnapshotAttribute* | snapshots | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | *RestoreAddressToClassic* | infrastructural | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | RevokeSecurityGroupEgress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, VPC | RevokeSecurityGroupIngress | security groups | EC2 classic way to pass cidr, protocol,|
|
||||
| | | | sourceGroup, ports parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2, | RunInstances | instances | placement, block_device_mapping partial|
|
||||
| VPC, EBS | | | support, monitoring, |
|
||||
| | | | iamInstanceProfile, ebsOptimized, |
|
||||
| | | | shutdownInitiatedInstanceBehavior |
|
||||
| | | | parameters |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *RunScheduledInstances* | scheduled | not supported |
|
||||
| | | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **StartInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **StopInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| EC2 | **TerminateInstances** | instances | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *UnassignIpv6Addresses* | network | not supported |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| VPC | **UnassignPrivateIpAddresses** | network | |
|
||||
| | | interfaces | |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
||||
| | *UnmonitorInstances* | monitoring | not supported |
|
||||
+----------+------------------------------------------+-----------------+----------------------------------------+
|
@ -1,16 +0,0 @@
|
||||
======================
|
||||
Enabling in Devstack
|
||||
======================
|
||||
|
||||
1. Download DevStack
|
||||
|
||||
git clone https://opendev.org/openstack/devstack
|
||||
cd devstack
|
||||
|
||||
2. Add this repo as an external repository::
|
||||
|
||||
> cat local.conf
|
||||
[[local|localrc]]
|
||||
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
|
||||
|
||||
3. run ``stack.sh``
|
@ -1,306 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# create_config script for devstack plugin script
|
||||
# Build config for run functional tests with or wuthout tempest
|
||||
|
||||
set -o xtrace
|
||||
set +o errexit
|
||||
|
||||
TEST_CONFIG="$1"
|
||||
if [[ -z "$TEST_CONFIG" ]]; then
|
||||
die $LINENO "Please pass config name"
|
||||
fi
|
||||
sudo rm -f $EC2API_DIR/$TEST_CONFIG
|
||||
|
||||
REGULAR_IMAGE_URL="https://cloud-images.ubuntu.com/precise/current/precise-server-cloudimg-i386-disk1.img"
|
||||
REGULAR_IMAGE_FNAME="precise-server-cloudimg-i386-disk1.img"
|
||||
REGULAR_IMAGE_NAME="precise"
|
||||
|
||||
CIRROS_IMAGE_URL="http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
|
||||
CIRROS_IMAGE_FNAME="cirros-0.3.4-x86_64-disk.img"
|
||||
CIRROS_IMAGE_NAME="cirros"
|
||||
|
||||
MAX_FAIL=20
|
||||
FLAVOR_NAME="m1.ec2api"
|
||||
FLAVOR_NAME_ALT="m1.ec2api-alt"
|
||||
|
||||
if [[ -n "$TOP_DIR" ]]; then
|
||||
source $TOP_DIR/openrc admin admin
|
||||
unset OS_CLOUD
|
||||
#unset OS_AUTH_TYPE
|
||||
fi
|
||||
|
||||
openstack endpoint list
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
die $LINENO "OpenStack CLI doesn't work. Looks like credentials are absent."
|
||||
fi
|
||||
|
||||
EC2_URL=`openstack endpoint list --service ec2 --interface public --os-identity-api-version=3 -c URL -f value`
|
||||
S3_URL=`openstack endpoint list --service s3 --interface public --os-identity-api-version=3 -c URL -f value`
|
||||
|
||||
venv_dir="$(pwd)/.venv_awscli"
|
||||
virtualenv "$venv_dir"
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
die $LINENO "Can't setup virtual env."
|
||||
fi
|
||||
source "$venv_dir/bin/activate"
|
||||
pip install awscli
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
die $LINENO "Can't install awscli in virtual env."
|
||||
fi
|
||||
aws --version
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
die $LINENO "awscli doesn't work correctly."
|
||||
fi
|
||||
deactivate
|
||||
|
||||
project_id=`openstack project show $OS_PROJECT_NAME -c id -f value`
|
||||
openstack ec2 credentials create 1>&2
|
||||
line=`openstack ec2 credentials list | grep " $project_id "`
|
||||
read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'`
|
||||
source "$venv_dir/bin/activate"
|
||||
aws configure set aws_access_key_id $ec2_access_key --profile admin
|
||||
aws configure set aws_secret_access_key $ec2_secret_key --profile admin
|
||||
deactivate
|
||||
AWS_PARAMS="--region $REGION_NAME --endpoint-url $EC2_URL"
|
||||
|
||||
neutron_item=$(openstack service list | grep neutron)
|
||||
|
||||
# prepare flavors
|
||||
openstack flavor create --public --id 16 --ram 512 --disk 1 --vcpus 1 $FLAVOR_NAME
|
||||
openstack flavor create --public --id 17 --ram 256 --disk 1 --vcpus 1 $FLAVOR_NAME_ALT
|
||||
|
||||
# prepare cirros image for register_image test. uploading it to S3.
|
||||
sudo rm /tmp/$CIRROS_IMAGE_FNAME
|
||||
wget -nv -P /tmp $CIRROS_IMAGE_URL &
|
||||
cirros_image_wget_pid=$!
|
||||
|
||||
# find simple image
|
||||
source "$venv_dir/bin/activate"
|
||||
image_id=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=cirros* --query 'Images[0].ImageId' --output text`
|
||||
deactivate
|
||||
|
||||
if [[ "$image_id" == 'None' || -z "$image_id" ]]; then
|
||||
wait $cirros_image_wget_pid
|
||||
if [[ "$?" -eq "0" ]]; then
|
||||
openstack image create --disk-format raw --container-format bare --public --file /tmp/$CIRROS_IMAGE_FNAME $CIRROS_IMAGE_NAME
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
echo "Creation of openstack image failed."
|
||||
fi
|
||||
source "$venv_dir/bin/activate"
|
||||
image_id=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=cirros* --query 'Images[0].ImageId' --output text`
|
||||
deactivate
|
||||
fi
|
||||
fi
|
||||
|
||||
# prepare ubuntu image
|
||||
if [[ $RUN_LONG_TESTS == "1" ]]; then
|
||||
sudo rm /tmp/$REGULAR_IMAGE_FNAME
|
||||
wget -nv -P /tmp $REGULAR_IMAGE_URL
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
echo "Downloading of precise image failed."
|
||||
exit 1
|
||||
fi
|
||||
openstack image create --disk-format raw --container-format bare --public --file /tmp/$REGULAR_IMAGE_FNAME $REGULAR_IMAGE_NAME
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
echo "Creation of precise image failed."
|
||||
exit 1
|
||||
fi
|
||||
# find this image
|
||||
source "$venv_dir/bin/activate"
|
||||
image_id_ubuntu=`aws $AWS_PARAMS --profile admin ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=$REGULAR_IMAGE_NAME --query 'Images[0].ImageId' --output text`
|
||||
deactivate
|
||||
fi
|
||||
|
||||
# create separate user/project
|
||||
project_name="project-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
eval $(openstack project create -f shell -c id $project_name)
|
||||
project_id=$id
|
||||
[[ -n "$project_id" ]] || { echo "Can't create project"; exit 1; }
|
||||
user_name="user-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
eval $(openstack user create "$user_name" --project "$project_id" --password "password" --email "$user_name@example.com" -f shell -c id)
|
||||
user_id=$id
|
||||
[[ -n "$user_id" ]] || { echo "Can't create user"; exit 1; }
|
||||
# add 'Member' role for swift access
|
||||
role_id=$(openstack role show Member -c id -f value)
|
||||
openstack role add --project $project_id --user $user_id $role_id
|
||||
# create network
|
||||
if [[ -n "$neutron_item" ]]; then
|
||||
net_id=$(openstack network create --project $project_id private | grep ' id ' | awk '{print $4}')
|
||||
[[ -n "$net_id" ]] || { echo "net-create failed"; exit 1; }
|
||||
subnet_id=$(openstack subnet create --project $project_id --ip-version 4 --gateway 10.0.0.1 --network $net_id --subnet-range 10.0.0.0/24 private_subnet | grep ' id ' | awk '{print $4}')
|
||||
[[ -n "$subnet_id" ]] || { echo "subnet-create failed"; exit 1; }
|
||||
router_id=$(openstack router create --project $project_id private_router | grep ' id ' | awk '{print $4}')
|
||||
[[ -n "$router_id" ]] || { echo "router-create failed"; exit 1; }
|
||||
sleep 2
|
||||
openstack router add subnet $router_id $subnet_id
|
||||
[[ "$?" -eq 0 ]] || { echo "router-interface-add failed"; exit 1; }
|
||||
public_net_id=$(openstack network list | awk '/public/{print $2}')
|
||||
[[ -n "$public_net_id" ]] || { echo "can't find public network"; exit 1; }
|
||||
openstack router set --external-gateway $public_net_id $router_id
|
||||
[[ "$?" -eq 0 ]] || { echo "router-gateway-set failed"; exit 1; }
|
||||
fi
|
||||
# populate credentials
|
||||
openstack ec2 credentials create --user $user_id --project $project_id 1>&2
|
||||
line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
|
||||
read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'`
|
||||
source "$venv_dir/bin/activate"
|
||||
aws configure set aws_access_key_id $ec2_access_key --profile user
|
||||
aws configure set aws_secret_access_key $ec2_secret_key --profile user
|
||||
deactivate
|
||||
|
||||
env|sort
|
||||
auth="--os-project-name $project_name --os-username $user_name --os-password password"
|
||||
|
||||
# create EBS image
|
||||
volume_status() { openstack $auth volume show $1 | awk '/ status / {print $4}'; }
|
||||
instance_status() { openstack $auth server show $1 | awk '/ status / {print $4}'; }
|
||||
|
||||
openstack_image_id=$(openstack $auth image list --long | grep "cirros" | grep " bare " | head -1 | awk '{print $2}')
|
||||
if [[ -n "$openstack_image_id" ]]; then
|
||||
volume_name="vol-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
volume_id=$(openstack $auth volume create --image $openstack_image_id --size 1 $volume_name | awk '/ id / {print $4}')
|
||||
[[ -n "$volume_id" ]] || { echo "can't create volume for EBS image creation"; exit 1; }
|
||||
fail=0
|
||||
while [[ true ]] ; do
|
||||
if ((fail >= MAX_FAIL)); then
|
||||
die $LINENO "Volume creation fails (timeout)"
|
||||
fi
|
||||
echo "attempt "$fail" of "$MAX_FAIL
|
||||
status=$(volume_status $volume_id)
|
||||
if [[ $status == "available" ]]; then
|
||||
break
|
||||
fi
|
||||
if [[ $status == "error" || -z "$status" ]]; then
|
||||
openstack $auth volume show $volume_id
|
||||
die $LINENO 'Volume creation error'
|
||||
fi
|
||||
sleep 10
|
||||
((++fail))
|
||||
done
|
||||
|
||||
instance_name="i-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
instance_id=$(nova $auth boot \
|
||||
--flavor "$FLAVOR_NAME" \
|
||||
--nic net-id=$net_id \
|
||||
--block-device "device=/dev/vda,id=$volume_id,shutdown=remove,source=volume,dest=volume,bootindex=0" \
|
||||
"$instance_name" | awk '/ id / {print $4}')
|
||||
# TODO: find a way how to run with delete-on-terminate or set it after run with openstack client
|
||||
# instance_id=$(openstack $auth server create \
|
||||
# --flavor "$FLAVOR_NAME" \
|
||||
# --volume $volume_id \
|
||||
# --nic net-id=$net_id \
|
||||
# "$instance_name" | awk '/ id / {print $4}')
|
||||
[[ -n "$instance_id" ]] || { echo "can't boot EBS instance"; exit 1; }
|
||||
fail=0
|
||||
while [[ true ]] ; do
|
||||
if ((fail >= MAX_FAIL)); then
|
||||
die $LINENO "Instance active status wait timeout occurred"
|
||||
fi
|
||||
echo "attempt "$fail" of "$MAX_FAIL
|
||||
status=$(instance_status $instance_id)
|
||||
if [[ "$status" == "ACTIVE" ]]; then
|
||||
break
|
||||
fi
|
||||
if [[ "$status" == "ERROR" || -z "$status" ]]; then
|
||||
openstack $auth server show $instance_id
|
||||
die $LINENO 'Instance booting error'
|
||||
fi
|
||||
sleep 10
|
||||
((++fail))
|
||||
done
|
||||
|
||||
image_name="image-$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
openstack $auth server image create --name $image_name --wait $instance_name
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
die $LINENO "Image creation from instance fails"
|
||||
fi
|
||||
source "$venv_dir/bin/activate"
|
||||
ebs_image_id=`aws $AWS_PARAMS --profile user ec2 describe-images --filters Name=image-type,Values=machine Name=name,Values=$image_name --query 'Images[0].ImageId' --output text`
|
||||
deactivate
|
||||
openstack $auth server delete $instance_id
|
||||
fi
|
||||
|
||||
timeout="600"
|
||||
run_long_tests="False"
|
||||
if [[ $RUN_LONG_TESTS == "1" ]]; then
|
||||
run_long_tests="True"
|
||||
fi
|
||||
|
||||
# right now nova-network is very unstable to run tests that want to ssh into instance
|
||||
run_ssh="False"
|
||||
if [[ -n "$neutron_item" ]]; then
|
||||
run_ssh="True"
|
||||
fi
|
||||
|
||||
wait $cirros_image_wget_pid
|
||||
if [[ "$?" -eq "0" && "$CA_CERT" && -e "$CA_CERT" ]]; then
|
||||
sudo apt-get -fy install ruby
|
||||
ID="$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8)"
|
||||
WORKING_DIR="/tmp/bi-$ID"
|
||||
mkdir -p $WORKING_DIR
|
||||
wget -t 2 -T 60 -q -P $WORKING_DIR http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.zip
|
||||
unzip -d $WORKING_DIR $WORKING_DIR/ec2-ami-tools.zip
|
||||
TOOLS_DIR="$WORKING_DIR/$(ls $WORKING_DIR | grep -Eo "ec2-ami-tools-[0-9\.]*")"
|
||||
|
||||
IMAGES_DIR="$WORKING_DIR/images"
|
||||
# IMPORTANT! bucket name should contain '.' - in this case ami-tools will not build s3 url with bucket name.
|
||||
AWS_AMI_BUCKET="tmp-bundle.$ID"
|
||||
|
||||
EC2_USER_ID=42424242424242 # ec2api does not use user id, but bundling requires it
|
||||
EC2_PRIVATE_KEY="$WORKING_DIR/private/pk.pem"
|
||||
EC2_CSR="$WORKING_DIR/cert.csr"
|
||||
EC2_CERT="$WORKING_DIR/cert.pem"
|
||||
|
||||
mkdir -p "$WORKING_DIR/private/"
|
||||
|
||||
# generate user certificate
|
||||
openssl genrsa -out "$EC2_PRIVATE_KEY" 2048
|
||||
openssl req -new -key "$EC2_PRIVATE_KEY" -subj "/C=RU/ST=Moscow/L=Moscow/O=Progmatic/CN=functional-tests" -out "$EC2_CSR"
|
||||
openssl x509 -req -in "$EC2_CSR" -CA "$CA_CERT" -CAkey "$CA_KEY" -CAcreateserial -out "$EC2_CERT" -days 365
|
||||
|
||||
mkdir -p "$IMAGES_DIR"
|
||||
$TOOLS_DIR/bin/ec2-bundle-image --cert $EC2_CERT --privatekey $EC2_PRIVATE_KEY --ec2cert $CA_CERT --image /tmp/$CIRROS_IMAGE_FNAME --prefix $CIRROS_IMAGE_FNAME --user $EC2_USER_ID --destination "$IMAGES_DIR" --arch x86_64
|
||||
if [[ "$?" -eq "0" ]]; then
|
||||
$TOOLS_DIR/bin/ec2-upload-bundle --url "$S3_URL" --access-key $ec2_access_key --secret-key $ec2_secret_key --bucket "$AWS_AMI_BUCKET" --manifest "$IMAGES_DIR/$CIRROS_IMAGE_FNAME.manifest.xml" --acl "public-read" --sigv 2
|
||||
if [[ "$?" -eq "0" ]]; then
|
||||
cirros_image_manifest="$AWS_AMI_BUCKET/$CIRROS_IMAGE_FNAME.manifest.xml"
|
||||
else
|
||||
warn $LINENO "Uploading of image $CIRROS_IMAGE_URL to S3 failed."
|
||||
fi
|
||||
else
|
||||
warn $LINENO "Bundling of image $CIRROS_IMAGE_URL failed."
|
||||
fi
|
||||
# next line is example how to register this image in the cloud
|
||||
#source "$venv_dir/bin/activate"
|
||||
#aws --endpoint-url $EC2_URL --region RegionOne --profile admin ec2 register-image --image-location "$AWS_AMI_BUCKET/$CIRROS_IMAGE_FNAME.manifest.xml" --name "$CIRROS_IMAGE_FNAME" --architecture x86_64
|
||||
#deactivate
|
||||
else
|
||||
warn $LINENO "Downloading of image $CIRROS_IMAGE_URL failed."
|
||||
fi
|
||||
|
||||
vpnaas_enabled='False'
|
||||
if openstack extension list | grep -q " vpnaas " ; then
|
||||
vpnaas_enabled='True'
|
||||
fi
|
||||
|
||||
sudo bash -c "cat > $EC2API_DIR/$TEST_CONFIG <<EOF
|
||||
[aws]
|
||||
ec2_url = $EC2_URL
|
||||
s3_url = $S3_URL
|
||||
aws_access = $ec2_access_key
|
||||
aws_secret = $ec2_secret_key
|
||||
image_id = $image_id
|
||||
image_id_ubuntu = $image_id_ubuntu
|
||||
ebs_image_id = $ebs_image_id
|
||||
build_timeout = $timeout
|
||||
run_long_tests = $run_long_tests
|
||||
instance_type = $FLAVOR_NAME
|
||||
instance_type_alt = $FLAVOR_NAME_ALT
|
||||
ami_image_location = $cirros_image_manifest
|
||||
run_ssh = $run_ssh
|
||||
vpnaas_enabled = $vpnaas_enabled
|
||||
ca_bundle = $OS_CACERT
|
||||
EOF"
|
||||
|
||||
sudo chown -f $STACK_USER $EC2API_DIR/$TEST_CONFIG
|
@ -1,2 +0,0 @@
|
||||
# Enable VPNAAS service and set type of ipsec package
|
||||
IPSEC_PACKAGE=strongswan
|
@ -1,316 +0,0 @@
|
||||
# lib/ec2-api
|
||||
|
||||
# Dependencies:
|
||||
# ``functions`` file
|
||||
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
|
||||
|
||||
# ``stack.sh`` calls the entry points in this order:
|
||||
#
|
||||
# install_ec2api
|
||||
# configure_ec2api
|
||||
# start_ec2api
|
||||
# stop_ec2api
|
||||
|
||||
|
||||
env | sort
|
||||
|
||||
# Save trace setting
|
||||
XTRACE=$(set +o | grep xtrace)
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Defaults
|
||||
# --------
|
||||
|
||||
# Set up default directories
|
||||
EC2API_DIR=$DEST/ec2-api
|
||||
EC2API_CONF_DIR=${EC2API_CONF_DIR:-/etc/ec2api}
|
||||
EC2API_CONF_FILE=${EC2API_CONF_DIR}/ec2api.conf
|
||||
EC2API_DEBUG=${EC2API_DEBUG:-True}
|
||||
EC2API_STATE_PATH=${EC2API_STATE_PATH:=$DATA_DIR/ec2api}
|
||||
EC2API_AUTH_CACHE_DIR=${EC2API_AUTH_CACHE_DIR:-/var/cache/ec2api}
|
||||
|
||||
EC2API_SERVICE_PORT=${EC2API_SERVICE_PORT:-8788}
|
||||
EC2API_S3_SERVICE_PORT=${EC2API_S3_SERVICE_PORT:-3334}
|
||||
|
||||
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
|
||||
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
|
||||
SERVICE_PROTOCOL="https"
|
||||
fi
|
||||
|
||||
EC2API_RABBIT_VHOST=${EC2API_RABBIT_VHOST:-''}
|
||||
|
||||
EC2API_ADMIN_USER=${EC2API_ADMIN_USER:-ec2api}
|
||||
|
||||
EC2API_KEYSTONE_SIGNING_DIR=${EC2API_KEYSTONE_SIGNING_DIR:-/tmp/keystone-signing-ec2api}
|
||||
|
||||
# Support entry points installation of console scripts
|
||||
if [[ -d $EC2API_DIR/bin ]]; then
|
||||
EC2API_BIN_DIR=$EC2API_DIR/bin
|
||||
else
|
||||
EC2API_BIN_DIR=$(get_python_exec_prefix)
|
||||
fi
|
||||
|
||||
|
||||
function recreate_endpoint {
|
||||
local endpoint=$1
|
||||
local description=$2
|
||||
local port=$3
|
||||
local protocol=$4
|
||||
|
||||
# Remove nova's service/endpoint
|
||||
local endpoint_ids=$(openstack --os-identity-api-version 3 endpoint list \
|
||||
--service "$endpoint" --region "$REGION_NAME" -c ID -f value)
|
||||
if [[ -n "$endpoint_ids" ]]; then
|
||||
for endpoint_id in $endpoint_ids ; do
|
||||
openstack --os-identity-api-version 3 endpoint delete $endpoint_id
|
||||
done
|
||||
fi
|
||||
local service_id=$(openstack --os-identity-api-version 3 service list \
|
||||
-c "ID" -c "Name" \
|
||||
| grep " $endpoint " | get_field 1)
|
||||
if [[ -n "$service_id" ]]; then
|
||||
openstack --os-identity-api-version 3 service delete $service_id
|
||||
fi
|
||||
|
||||
local service_id=$(openstack service create \
|
||||
$endpoint \
|
||||
--name "$endpoint" \
|
||||
--description="$description" \
|
||||
-f value -c id)
|
||||
openstack --os-identity-api-version 3 endpoint create --region "$REGION_NAME" \
|
||||
$service_id public "$protocol://$SERVICE_HOST:$port/"
|
||||
openstack --os-identity-api-version 3 endpoint create --region "$REGION_NAME" \
|
||||
$service_id admin "$protocol://$SERVICE_HOST:$port/"
|
||||
openstack --os-identity-api-version 3 endpoint create --region "$REGION_NAME" \
|
||||
$service_id internal "$protocol://$SERVICE_HOST:$port/"
|
||||
}
|
||||
|
||||
|
||||
# create_ec2api_accounts() - Set up common required ec2api accounts
|
||||
#
|
||||
# Tenant User Roles
|
||||
# ------------------------------
|
||||
# service ec2api admin
|
||||
function create_ec2api_accounts() {
|
||||
if ! is_service_enabled key; then
|
||||
return
|
||||
fi
|
||||
|
||||
SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
|
||||
ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
|
||||
|
||||
EC2API_USER=$(openstack user create \
|
||||
$EC2API_ADMIN_USER \
|
||||
--password "$SERVICE_PASSWORD" \
|
||||
--project $SERVICE_TENANT \
|
||||
--email ec2api@example.com \
|
||||
| grep " id " | get_field 2)
|
||||
|
||||
openstack role add \
|
||||
$ADMIN_ROLE \
|
||||
--project $SERVICE_TENANT \
|
||||
--user $EC2API_USER
|
||||
|
||||
recreate_endpoint "ec2" "EC2 Compatibility Layer" $EC2API_SERVICE_PORT $SERVICE_PROTOCOL
|
||||
if ! is_service_enabled swift3; then
|
||||
recreate_endpoint "s3" "S3" $EC2API_S3_SERVICE_PORT "http"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function mkdir_chown_stack {
|
||||
if [[ ! -d "$1" ]]; then
|
||||
sudo mkdir -p "$1"
|
||||
fi
|
||||
sudo chown $STACK_USER "$1"
|
||||
}
|
||||
|
||||
|
||||
function configure_ec2api_rpc_backend() {
|
||||
# Configure the rpc service.
|
||||
iniset_rpc_backend ec2api $EC2API_CONF_FILE DEFAULT
|
||||
|
||||
# TODO(ruhe): get rid of this ugly workaround.
|
||||
inicomment $EC2API_CONF_FILE DEFAULT rpc_backend
|
||||
|
||||
# Set non-default rabbit virtual host if required.
|
||||
if [[ -n "$EC2API_RABBIT_VHOST" ]]; then
|
||||
iniset $EC2API_CONF_FILE DEFAULT rabbit_virtual_host $EC2API_RABBIT_VHOST
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_ec2api_networking {
|
||||
# Use keyword 'public' if ec2api external network was not set.
|
||||
# If it was set but the network is not exist then
|
||||
# first available external network will be selected.
|
||||
local ext_net=${EC2API_EXTERNAL_NETWORK:-'public'}
|
||||
# Configure networking options for ec2api
|
||||
if [[ -n "$ext_net" ]]; then
|
||||
iniset $EC2API_CONF_FILE DEFAULT external_network $ext_net
|
||||
fi
|
||||
iniset $EC2API_CONF_FILE DEFAULT disable_ec2_classic True
|
||||
}
|
||||
|
||||
function create_x509_server_key() {
|
||||
export CA_KEY="$EC2API_STATE_PATH/private/ca_key.pem"
|
||||
export CA_CERT="$EC2API_STATE_PATH/cacert.pem"
|
||||
|
||||
mkdir -p "$EC2API_STATE_PATH/private/"
|
||||
|
||||
# generate root certificate
|
||||
openssl genrsa -out "$CA_KEY" 2048
|
||||
openssl req -x509 -new -key "$CA_KEY" -days 365 -out "$CA_CERT" -subj "/C=RU/ST=Moscow/L=Moscow/O=Progmatic/CN=ec2api-devstack"
|
||||
}
|
||||
|
||||
# Entry points
|
||||
# ------------
|
||||
|
||||
# configure_ec2api() - Set config files, create data dirs, etc
|
||||
function configure_ec2api {
|
||||
mkdir_chown_stack "$EC2API_CONF_DIR"
|
||||
|
||||
# Generate ec2api configuration file and configure common parameters.
|
||||
sudo rm -f $EC2API_CONF_FILE
|
||||
touch $EC2API_CONF_FILE
|
||||
cp $EC2API_DIR/etc/ec2api/api-paste.ini $EC2API_CONF_DIR
|
||||
|
||||
cleanup_ec2api
|
||||
|
||||
iniset $EC2API_CONF_FILE DEFAULT debug $EC2API_DEBUG
|
||||
iniset $EC2API_CONF_FILE DEFAULT use_syslog $SYSLOG
|
||||
iniset $EC2API_CONF_FILE DEFAULT state_path $EC2API_STATE_PATH
|
||||
|
||||
|
||||
# ec2api Api Configuration
|
||||
#-------------------------
|
||||
|
||||
configure_auth_token_middleware $EC2API_CONF_FILE $EC2API_ADMIN_USER $EC2API_AUTH_CACHE_DIR
|
||||
|
||||
iniset $EC2API_CONF_FILE DEFAULT ec2api_workers "$API_WORKERS"
|
||||
iniset $EC2API_CONF_FILE DEFAULT keystone_ec2_tokens_url "$KEYSTONE_SERVICE_URI_V3/ec2tokens"
|
||||
iniset $EC2API_CONF_FILE DEFAULT region_list "$REGION_NAME"
|
||||
|
||||
iniset $EC2API_CONF_FILE DEFAULT ec2api_listen_port "$EC2API_SERVICE_PORT"
|
||||
iniset $EC2API_CONF_FILE DEFAULT ec2_port "$EC2API_SERVICE_PORT"
|
||||
|
||||
local s3_port="$EC2API_S3_SERVICE_PORT"
|
||||
local s3_protocol="http"
|
||||
if is_service_enabled swift3; then
|
||||
s3_port="$S3_SERVICE_PORT"
|
||||
s3_protocol="$SWIFT_SERVICE_PROTOCOL"
|
||||
fi
|
||||
iniset $EC2API_CONF_FILE DEFAULT s3_url "$s3_protocol://$SERVICE_HOST:$s3_port"
|
||||
|
||||
configure_ec2api_rpc_backend
|
||||
|
||||
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
|
||||
ensure_certificates EC2API
|
||||
|
||||
iniset $NOVA_CONF DEFAULT ssl_cert_file "$NOVA_SSL_CERT"
|
||||
iniset $NOVA_CONF DEFAULT ssl_key_file "$NOVA_SSL_KEY"
|
||||
iniset $NOVA_CONF DEFAULT ec2api_use_ssl "True"
|
||||
iniset $NOVA_CONF DEFAULT metadata_use_ssl "True"
|
||||
fi
|
||||
|
||||
iniset $EC2API_CONF_FILE oslo_concurrency lock_path $EC2API_STATE_PATH
|
||||
|
||||
# configure the database.
|
||||
iniset $EC2API_CONF_FILE database connection `database_connection_url ec2api`
|
||||
|
||||
configure_ec2api_networking
|
||||
|
||||
# metadata configuring
|
||||
iniset $EC2API_CONF_FILE DEFAULT metadata_workers "$API_WORKERS"
|
||||
if [[ ,${ENABLED_SERVICES} =~ ,"q-" ]]; then
|
||||
# with neutron (legacy and OVN)
|
||||
iniset $Q_META_CONF_FILE DEFAULT nova_metadata_port 8789
|
||||
iniset $OVN_META_CONF DEFAULT nova_metadata_port 8789
|
||||
else
|
||||
# with nova-network
|
||||
iniset $NOVA_CONF DEFAULT metadata_port 8789
|
||||
iniset $NOVA_CONF neutron service_metadata_proxy True
|
||||
fi
|
||||
iniset $EC2API_CONF_FILE cache enabled True
|
||||
|
||||
if create_x509_server_key; then
|
||||
iniset $EC2API_CONF_FILE DEFAULT x509_root_private_key "$CA_KEY"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# init_ec2api() - Initialize databases, etc.
|
||||
function init_ec2api() {
|
||||
# (re)create ec2api database
|
||||
recreate_database ec2api
|
||||
|
||||
$EC2API_BIN_DIR/ec2-api-manage --config-file $EC2API_CONF_FILE db_sync
|
||||
}
|
||||
|
||||
|
||||
# install_ec2api() - Collect source and prepare
|
||||
function install_ec2api() {
|
||||
# TODO(ruhe): use setup_develop once ec2api requirements match with global-requirement.txt
|
||||
# both functions (setup_develop and setup_package) are defined at:
|
||||
# https://opendev.org/openstack/devstack/src/branch/master/functions-common
|
||||
setup_package $EC2API_DIR -e
|
||||
}
|
||||
|
||||
|
||||
# start_ec2api() - Start running processes, including screen
|
||||
function start_ec2api() {
|
||||
run_process ec2-api "$EC2API_BIN_DIR/ec2-api --config-file $EC2API_CONF_DIR/ec2api.conf"
|
||||
run_process ec2-api-metadata "$EC2API_BIN_DIR/ec2-api-metadata --config-file $EC2API_CONF_DIR/ec2api.conf"
|
||||
run_process ec2-api-s3 "$EC2API_BIN_DIR/ec2-api-s3 --config-file $EC2API_CONF_DIR/ec2api.conf"
|
||||
}
|
||||
|
||||
|
||||
# stop_ec2api() - Stop running processes
|
||||
function stop_ec2api() {
|
||||
stop_process ec2-api
|
||||
stop_process ec2-api-metadata
|
||||
stop_process ec2-api-s3
|
||||
}
|
||||
|
||||
function cleanup_ec2api() {
|
||||
|
||||
# Cleanup keystone signing dir
|
||||
sudo rm -rf $EC2API_KEYSTONE_SIGNING_DIR
|
||||
}
|
||||
|
||||
function configure_functional_tests() {
|
||||
(source $EC2API_DIR/devstack/create_config "functional_tests.conf")
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
warn $LINENO "EC2 API tests config could not be created."
|
||||
elif is_service_enabled tempest; then
|
||||
cat "$EC2API_DIR/functional_tests.conf" >> $TEMPEST_CONFIG
|
||||
fi
|
||||
}
|
||||
|
||||
# main dispatcher
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
echo_summary "Installing ec2-api"
|
||||
install_ec2api
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
echo_summary "Configuring ec2-api"
|
||||
configure_ec2api
|
||||
create_ec2api_accounts
|
||||
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
echo_summary "Initializing ec2-api"
|
||||
init_ec2api
|
||||
start_ec2api
|
||||
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
|
||||
configure_functional_tests
|
||||
fi
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
stop_ec2api
|
||||
cleanup_ec2api
|
||||
fi
|
||||
|
||||
# Restore xtrace
|
||||
$XTRACE
|
||||
|
||||
# Local variables:
|
||||
# mode: shell-script
|
||||
# End:
|
@ -1,9 +0,0 @@
|
||||
# Devstack settings
|
||||
|
||||
# we have to add ec2-api to enabled services for screen_it to work
|
||||
enable_service ec2-api
|
||||
enable_service ec2-api-metadata
|
||||
enable_service ec2-api-s3
|
||||
|
||||
# Enable VPNAAS service and set type of ipsec package
|
||||
IPSEC_PACKAGE=strongswan
|
@ -1,7 +0,0 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||
os-api-ref>=1.5.0 # Apache-2.0
|
@ -1,82 +0,0 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
|
||||
|
||||
sys.path.insert(0, ROOT)
|
||||
sys.path.insert(0, BASE_DIR)
|
||||
|
||||
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
|
||||
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
|
||||
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.doctest',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
'openstackdocstheme']
|
||||
|
||||
# openstackdocstheme options
|
||||
openstackdocs_repo_name = 'openstack/ec2-api'
|
||||
openstackdocs_auto_name = False
|
||||
openstackdocs_bug_project = 'ec2-api'
|
||||
openstackdocs_bug_tag = ''
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'EC2API Service'
|
||||
copyright = '2015, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'native'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
html_theme = 'openstackdocs'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {"sidebar_mode": "toc"}
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
'%s Documentation' % project,
|
||||
'OpenStack Foundation', 'manual'),
|
||||
]
|
@ -1,143 +0,0 @@
|
||||
admin_password disable
|
||||
admin_tenant_name disable
|
||||
admin_user disable
|
||||
api_paste_config common
|
||||
api_rate_limit disable
|
||||
bindir disable
|
||||
buckets_path disable
|
||||
cert_topic disable
|
||||
cinder_service_type clients
|
||||
debug disable
|
||||
default_flavor ec2
|
||||
default_log_levels disable
|
||||
disable_ec2_classic ec2
|
||||
ec2_host ec2
|
||||
ec2_path ec2
|
||||
ec2_port ec2
|
||||
ec2_private_dns_show_ip ec2
|
||||
ec2_scheme ec2
|
||||
ec2_timestamp_expiry clients
|
||||
ec2api_listen service
|
||||
ec2api_listen_port service
|
||||
ec2api_use_ssl service
|
||||
ec2api_workers service
|
||||
external_network ec2
|
||||
fatal_deprecations disable
|
||||
fatal_exception_format_errors disable
|
||||
image_decryption_dir s3
|
||||
instance_format disable
|
||||
instance_uuid_format disable
|
||||
internal_service_availability_zone ec2
|
||||
keystone_ec2_tokens_url clients
|
||||
keystone_url disable
|
||||
log_config_append disable
|
||||
log_date_format disable
|
||||
log_dir disable
|
||||
log_file disable
|
||||
logging_context_format_string disable
|
||||
logging_debug_format_suffix disable
|
||||
logging_default_format_string disable
|
||||
logging_exception_prefix disable
|
||||
logging_user_identity_format disable
|
||||
max_header_line common
|
||||
metadata_listen metadata
|
||||
metadata_listen_port metadata
|
||||
metadata_use_ssl metadata
|
||||
metadata_workers metadata
|
||||
my_ip ec2
|
||||
network_device_mtu ec2
|
||||
nova_service_type clients
|
||||
publish_errors disable
|
||||
pybasedir disable
|
||||
rate_limit_burst disable
|
||||
rate_limit_except_level disable
|
||||
rate_limit_interval disable
|
||||
region_list ec2
|
||||
s3_listen disable
|
||||
s3_listen_port disable
|
||||
s3_region s3
|
||||
s3_url s3
|
||||
service_down_time disable
|
||||
ssl_ca_file disable
|
||||
ssl_cert_file service
|
||||
ssl_insecure disable
|
||||
ssl_key_file service
|
||||
state_path disable
|
||||
syslog_log_facility disable
|
||||
tcp_keepidle common
|
||||
tempdir disable
|
||||
use_forwarded_for service
|
||||
use_journal disable
|
||||
use_stderr disable
|
||||
use_syslog disable
|
||||
watch_log_file disable
|
||||
wsgi_default_pool_size common
|
||||
wsgi_log_format common
|
||||
x509_root_private_key s3
|
||||
cache/backend disable
|
||||
cache/backend_argument disable
|
||||
cache/config_prefix disable
|
||||
cache/debug_cache_backend disable
|
||||
cache/enabled disable
|
||||
cache/expiration_time disable
|
||||
cache/memcache_dead_retry disable
|
||||
cache/memcache_pool_connection_get_timeout disable
|
||||
cache/memcache_pool_maxsize disable
|
||||
cache/memcache_pool_unused_timeout disable
|
||||
cache/memcache_servers disable
|
||||
cache/memcache_socket_timeout disable
|
||||
cache/proxies disable
|
||||
database/backend disable
|
||||
database/connection disable
|
||||
database/connection_debug disable
|
||||
database/connection_recycle_time disable
|
||||
database/connection_trace disable
|
||||
database/db_inc_retry_interval disable
|
||||
database/db_max_retries disable
|
||||
database/db_max_retry_interval disable
|
||||
database/db_retry_interval disable
|
||||
database/max_overflow disable
|
||||
database/max_pool_size disable
|
||||
database/max_retries disable
|
||||
database/min_pool_size disable
|
||||
database/mysql_enable_ndb disable
|
||||
database/mysql_sql_mode disable
|
||||
database/pool_timeout disable
|
||||
database/retry_interval disable
|
||||
database/slave_connection disable
|
||||
database/sqlite_synchronous disable
|
||||
database/use_db_reconnect disable
|
||||
database/use_tpool database
|
||||
keystone_authtoken/admin_password disable
|
||||
keystone_authtoken/admin_tenant_name disable
|
||||
keystone_authtoken/admin_token disable
|
||||
keystone_authtoken/admin_user disable
|
||||
keystone_authtoken/auth_admin_prefix disable
|
||||
keystone_authtoken/auth_host disable
|
||||
keystone_authtoken/auth_port disable
|
||||
keystone_authtoken/auth_protocol disable
|
||||
keystone_authtoken/auth_section disable
|
||||
keystone_authtoken/auth_type disable
|
||||
keystone_authtoken/www_authenticate_uri disable
|
||||
keystone_authtoken/auth_version disable
|
||||
keystone_authtoken/cache disable
|
||||
keystone_authtoken/cafile disable
|
||||
keystone_authtoken/certfile disable
|
||||
keystone_authtoken/delay_auth_decision disable
|
||||
keystone_authtoken/http_connect_timeout disable
|
||||
keystone_authtoken/http_request_max_retries disable
|
||||
keystone_authtoken/identity_uri disable
|
||||
keystone_authtoken/insecure disable
|
||||
keystone_authtoken/keyfile disable
|
||||
keystone_authtoken/timeout disable
|
||||
metadata/auth_ca_cert metadata
|
||||
metadata/cache_expiration metadata
|
||||
metadata/metadata_proxy_shared_secret metadata
|
||||
metadata/nova_client_cert metadata
|
||||
metadata/nova_client_priv_key metadata
|
||||
metadata/nova_metadata_insecure metadata
|
||||
metadata/nova_metadata_ip metadata
|
||||
metadata/nova_metadata_port metadata
|
||||
metadata/nova_metadata_protocol metadata
|
||||
oslo_concurrency/disable_process_locking disable
|
||||
oslo_concurrency/lock_path disable
|
@ -1,7 +0,0 @@
|
||||
clients OpenStack Clients
|
||||
common Common Service
|
||||
database additional Database Client
|
||||
ec2 EC2API
|
||||
metadata Metadata
|
||||
service EC2API Service
|
||||
s3 S3 Client
|
@ -1,18 +0,0 @@
|
||||
===============================
|
||||
EC2API configuration
|
||||
===============================
|
||||
|
||||
Configuration options
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following options allow configuration that EC2API supports.
|
||||
|
||||
|
||||
.. include:: ./tables/ec2api-clients.inc
|
||||
.. include:: ./tables/ec2api-database.inc
|
||||
.. include:: ./tables/ec2api-service.inc
|
||||
.. include:: ./tables/ec2api-ec2.inc
|
||||
.. include:: ./tables/ec2api-s3.inc
|
||||
.. include:: ./tables/ec2api-common.inc
|
||||
|
||||
|
@ -1,19 +0,0 @@
|
||||
.. _configuring:
|
||||
|
||||
===================
|
||||
Configuring EC2-API
|
||||
===================
|
||||
|
||||
To configure your EC2API installation, you must define configuration options in these files:
|
||||
|
||||
* ``ec2api.conf`` contains EC2API configuration options and resides in the ``/etc/ec2api`` directory.
|
||||
|
||||
* ``api-paste.ini`` defines EC2API limits and resides in the ``/etc/ec2api`` directory.
|
||||
|
||||
A list of config options based on different topics can be found below:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
api.rst
|
||||
metadata.rst
|
@ -1,16 +0,0 @@
|
||||
==============================
|
||||
EC2API Metadata configuration
|
||||
==============================
|
||||
|
||||
Configuration options
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To configure Metadata Service for ec2api, EC2 API configuration_ common sections
|
||||
(such as OpenStack Clients, Database Client, EC2API and Common sections)
|
||||
must be configured as well.
|
||||
|
||||
.. _configuration: ./api.html
|
||||
|
||||
The additional configuration options for EC2 Metadata:
|
||||
|
||||
.. include:: ./tables/ec2api-metadata.inc
|
@ -1,37 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-clients:
|
||||
|
||||
.. list-table:: Description of OpenStack Clients configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``cinder_service_type`` = ``volumev3``
|
||||
|
||||
- (String) Service type of Volume API, registered in Keystone catalog.
|
||||
|
||||
* - ``ec2_timestamp_expiry`` = ``300``
|
||||
|
||||
- (Integer) Time in seconds before ec2 timestamp expires
|
||||
|
||||
* - ``keystone_ec2_tokens_url`` = ``http://localhost:5000/v3/ec2tokens``
|
||||
|
||||
- (String) URL to authenticate token from ec2 request.
|
||||
|
||||
* - ``nova_service_type`` = ``compute``
|
||||
|
||||
- (String) Service type of Compute API, registered in Keystone catalog. Should be v2.1 with microversion support. If it is obsolete v2, a lot of useful EC2 compliant instance properties will be unavailable.
|
@ -1,41 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-common:
|
||||
|
||||
.. list-table:: Description of Common Service configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``api_paste_config`` = ``api-paste.ini``
|
||||
|
||||
- (String) File name for the paste.deploy config for ec2api
|
||||
|
||||
* - ``max_header_line`` = ``16384``
|
||||
|
||||
- (Integer) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
|
||||
|
||||
* - ``tcp_keepidle`` = ``600``
|
||||
|
||||
- (Integer) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
|
||||
|
||||
* - ``wsgi_default_pool_size`` = ``1000``
|
||||
|
||||
- (Integer) Size of the pool of greenthreads used by wsgi
|
||||
|
||||
* - ``wsgi_log_format`` = ``%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f``
|
||||
|
||||
- (String) A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds.
|
@ -1,25 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-database:
|
||||
|
||||
.. list-table:: Description of additional Database Client configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[database]**
|
||||
-
|
||||
|
||||
* - ``use_tpool`` = ``False``
|
||||
|
||||
- (Boolean) Enable the experimental use of thread pooling for all DB API calls
|
@ -1,69 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-ec2:
|
||||
|
||||
.. list-table:: Description of EC2API configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``default_flavor`` = ``m1.small``
|
||||
|
||||
- (String) A flavor to use as a default instance type
|
||||
|
||||
* - ``disable_ec2_classic`` = ``None``
|
||||
|
||||
- (Boolean) True if server does not support EC2 Classic mode in favor of default VPC
|
||||
|
||||
* - ``ec2_host`` = ``$my_ip``
|
||||
|
||||
- (String) The IP address of the EC2 API server
|
||||
|
||||
* - ``ec2_path`` = ``/``
|
||||
|
||||
- (String) The path prefix used to call the ec2 API server
|
||||
|
||||
* - ``ec2_port`` = ``8788``
|
||||
|
||||
- (Integer) The port of the EC2 API server
|
||||
|
||||
* - ``ec2_private_dns_show_ip`` = ``False``
|
||||
|
||||
- (Boolean) Return the IP address as private dns hostname in describe instances
|
||||
|
||||
* - ``ec2_scheme`` = ``http``
|
||||
|
||||
- (String) The protocol to use when connecting to the EC2 API server (http, https)
|
||||
|
||||
* - ``external_network`` = ``None``
|
||||
|
||||
- (String) Name of the external network, which is used to connectVPCs to Internet and to allocate Elastic IPs.
|
||||
|
||||
* - ``internal_service_availability_zone`` = ``internal``
|
||||
|
||||
- (String) The availability_zone to show internal services under
|
||||
|
||||
* - ``my_ip`` = ``10.0.0.1``
|
||||
|
||||
- (String) IP address of this host
|
||||
|
||||
* - ``network_device_mtu`` = ``1500``
|
||||
|
||||
- (Integer) MTU size to set by DHCP for instances. Corresponds with the network_device_mtu in ec2api.conf.
|
||||
|
||||
* - ``region_list`` =
|
||||
|
||||
- (List) List of region=fqdn pairs separated by commas
|
@ -1,76 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-metadata:
|
||||
|
||||
.. list-table:: Description of Metadata configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``metadata_listen`` = ``0.0.0.0``
|
||||
|
||||
- (String) The IP address on which the metadata API will listen.
|
||||
|
||||
* - ``metadata_listen_port`` = ``8789``
|
||||
|
||||
- (Integer) The port on which the metadata API will listen.
|
||||
|
||||
* - ``metadata_use_ssl`` = ``False``
|
||||
|
||||
- (Boolean) Enable ssl connections or not for EC2 API Metadata
|
||||
|
||||
* - ``metadata_workers`` = ``None``
|
||||
|
||||
- (Integer) Number of workers for metadata service. The default will be the number of CPUs available.
|
||||
|
||||
* - **[metadata]**
|
||||
-
|
||||
|
||||
* - ``auth_ca_cert`` = ``None``
|
||||
|
||||
- (String) Certificate Authority public key (CA cert) file for ssl
|
||||
|
||||
* - ``cache_expiration`` = ``15``
|
||||
|
||||
- (Integer) This option is the time (in seconds) to cache metadata. Increasing this setting should improve response times of the metadata API when under heavy load. Higher values may increase memory usage, and result in longer times for host metadata changes to take effect.
|
||||
|
||||
* - ``metadata_proxy_shared_secret`` =
|
||||
|
||||
- (String) Shared secret to sign instance-id request
|
||||
|
||||
* - ``nova_client_cert`` =
|
||||
|
||||
- (String) Client certificate for nova metadata api server.
|
||||
|
||||
* - ``nova_client_priv_key`` =
|
||||
|
||||
- (String) Private key of client certificate.
|
||||
|
||||
* - ``nova_metadata_insecure`` = ``False``
|
||||
|
||||
- (Boolean) Allow to perform insecure SSL (https) requests to nova metadata
|
||||
|
||||
* - ``nova_metadata_ip`` = ``127.0.0.1``
|
||||
|
||||
- (String) IP address used by Nova metadata server.
|
||||
|
||||
* - ``nova_metadata_port`` = ``8775``
|
||||
|
||||
- (Integer) TCP Port used by Nova metadata server.
|
||||
|
||||
* - ``nova_metadata_protocol`` = ``http``
|
||||
|
||||
- (String) Protocol to access nova metadata, http or https
|
@ -1,37 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-s3:
|
||||
|
||||
.. list-table:: Description of S3 Client configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``image_decryption_dir`` = ``/tmp``
|
||||
|
||||
- (String) Parent directory for tempdir used for image decryption
|
||||
|
||||
* - ``s3_region`` = ``RegionOne``
|
||||
|
||||
- (String) Region of S3 server
|
||||
|
||||
* - ``s3_url`` = ``http://$my_ip:3334``
|
||||
|
||||
- (String) URL to S3 server
|
||||
|
||||
* - ``x509_root_private_key`` = ``None``
|
||||
|
||||
- (String) Path to ca private key file
|
@ -1,49 +0,0 @@
|
||||
..
|
||||
Warning: Do not edit this file. It is automatically generated from the
|
||||
software project's code and your changes will be overwritten.
|
||||
|
||||
The tool to generate this file lives in openstack-doc-tools repository.
|
||||
|
||||
Please make any changes needed in the code, then run the
|
||||
autogenerate-config-doc tool from the openstack-doc-tools repository, or
|
||||
ask for help on the documentation mailing list, IRC channel or meeting.
|
||||
|
||||
.. _ec2api-service:
|
||||
|
||||
.. list-table:: Description of EC2API Service configuration options
|
||||
:header-rows: 1
|
||||
:class: config-ref-table
|
||||
|
||||
* - Configuration option = Default value
|
||||
- Description
|
||||
|
||||
* - **[DEFAULT]**
|
||||
-
|
||||
|
||||
* - ``ec2api_listen`` = ``0.0.0.0``
|
||||
|
||||
- (String) The IP address on which the EC2 API will listen.
|
||||
|
||||
* - ``ec2api_listen_port`` = ``8788``
|
||||
|
||||
- (Integer) The port on which the EC2 API will listen.
|
||||
|
||||
* - ``ec2api_use_ssl`` = ``False``
|
||||
|
||||
- (Boolean) Enable ssl connections or not for EC2 API
|
||||
|
||||
* - ``ec2api_workers`` = ``None``
|
||||
|
||||
- (Integer) Number of workers for EC2 API service. The default will be equal to the number of CPUs available.
|
||||
|
||||
* - ``ssl_cert_file`` = ``None``
|
||||
|
||||
- (String) SSL certificate of API server
|
||||
|
||||
* - ``ssl_key_file`` = ``None``
|
||||
|
||||
- (String) SSL private key of API server
|
||||
|
||||
* - ``use_forwarded_for`` = ``False``
|
||||
|
||||
- (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
|
@ -1,47 +0,0 @@
|
||||
============================
|
||||
So You Want to Contribute...
|
||||
============================
|
||||
For general information on contributing to OpenStack, please check out the
|
||||
`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
|
||||
It covers all the basics that are common to all OpenStack projects: the accounts
|
||||
you need, the basics of interacting with our Gerrit review system, how we
|
||||
communicate as a community, etc.
|
||||
Below will cover the more project specific information you need to get started
|
||||
with ec2-api.
|
||||
|
||||
Communication
|
||||
~~~~~~~~~~~~~
|
||||
* IRC channel #openstack-ec2api at OFTC
|
||||
* Mailing list (prefix subjects with ``[ec2-api]`` for faster responses)
|
||||
http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
|
||||
|
||||
Contacting the Core Team
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Please refer the `ec2-api Core Team
|
||||
<https://review.opendev.org/admin/groups/243cf9ceaa59c417ffd4e421a88afa4d9a415dcb,members>`_ contacts.
|
||||
|
||||
New Feature Planning
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
ec2-api features are tracked on `Launchpad <https://bugs.launchpad.net/ec2-api>`_.
|
||||
|
||||
Task Tracking
|
||||
~~~~~~~~~~~~~
|
||||
We track our tasks in `Launchpad <https://bugs.launchpad.net/ec2-api>`_.
|
||||
If you're looking for some smaller, easier work item to pick up and get started
|
||||
on, search for the 'low-hanging-fruit' tag.
|
||||
|
||||
Reporting a Bug
|
||||
~~~~~~~~~~~~~~~
|
||||
You found an issue and want to make sure we are aware of it? You can do so on
|
||||
`Launchpad <https://bugs.launchpad.net/ec2-api>`_.
|
||||
|
||||
Getting Your Patch Merged
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
All changes proposed to the ec2-api project require one +2 votes
|
||||
from ec2-api core reviewers with approving patch by giving
|
||||
``Workflow +1`` vote.
|
||||
|
||||
Project Team Lead Duties
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
All common PTL duties are enumerated in the `PTL guide
|
||||
<https://docs.openstack.org/project-team-guide/ptl.html>`_.
|
@ -1 +0,0 @@
|
||||
.. include:: ../../HACKING.rst
|
@ -1,69 +0,0 @@
|
||||
OpenStack EC2 API
|
||||
=====================
|
||||
|
||||
Support of EC2 API for OpenStack.
|
||||
This project provides a standalone EC2 API service which pursues two goals:
|
||||
|
||||
1. Implement VPC API which is now absent in nova's EC2 API
|
||||
|
||||
2. Create a standalone service for EC2 API support which accommodates
|
||||
not only the VPC API but the rest of the EC2 API currently present in nova as
|
||||
well.
|
||||
|
||||
It doesn't replace existing nova EC2 API service in deployment, it gets
|
||||
installed to a different port (8788 by default).
|
||||
|
||||
The ec2-api service consists of the following components:
|
||||
|
||||
``ec2-api`` service
|
||||
Accepts and responds to end user EC2 and VPC API calls.
|
||||
|
||||
``ec2-api-metadata`` service
|
||||
Provides the OpenStack Metadata API to servers. The metadata is used to
|
||||
configure the running servers.
|
||||
|
||||
|
||||
Installing EC2API
|
||||
=================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
install/index
|
||||
|
||||
Configuring EC2API
|
||||
==================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
configuration/index
|
||||
|
||||
EC2API Reference
|
||||
==================
|
||||
|
||||
- `EC2-API Reference`_
|
||||
|
||||
.. _`EC2-API Reference`: https://docs.openstack.org/api-ref/ec2-api/
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
hacking
|
||||
|
||||
For Contributors
|
||||
================
|
||||
|
||||
* If you are a new contributor to ec2-api please refer: :doc:`contributor/contributing`
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
contributor/contributing
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
@ -1,66 +0,0 @@
|
||||
.. _configuration:
|
||||
|
||||
To configure OpenStack for EC2 API service add to ``/etc/ec2api/ec2api.conf``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
external_network = public
|
||||
ec2_port = 8788
|
||||
ec2api_listen_port = 8788
|
||||
keystone_ec2_tokens_url = http://192.168.56.101/identity/v3/ec2tokens
|
||||
api_paste_config = /etc/ec2api/api-paste.ini
|
||||
disable_ec2_classic = True
|
||||
|
||||
.. [*] - ``external_network`` option specifies the name of the external network,
|
||||
which is used to Internet and to allocate Elastic IPs. It must be
|
||||
specified to get access into VMs from outside of the cloud.
|
||||
|
||||
- ``disable_ec2_classic`` option is not mandatory, but we strongly
|
||||
recommend it to be specified. It turns off EC2 Classic mode and forces
|
||||
objects to be created inside VPCs.
|
||||
|
||||
With ``disable_ec2_classic`` = True, any user of the cloud must have
|
||||
the only network (created with neutron directly and attached to a router
|
||||
to provide outside access for that VMS), which is used for launch
|
||||
ec2-classic instances.
|
||||
|
||||
Keep in mind that an operator is not able to change
|
||||
``disable_ec2_classic`` setting seamlessly.
|
||||
|
||||
In the *[keystone_authtoken]* section, configure Identity service access.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[keystone_authtoken]
|
||||
project_domain_name = Default
|
||||
project_name = service
|
||||
user_domain_name = Default
|
||||
password = password
|
||||
username = ec2api
|
||||
auth_type = password
|
||||
|
||||
Also you need to configure database connection:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[database]
|
||||
connection = mysql+pymysql://root:password@127.0.0.1/ec2api?charset=utf8
|
||||
|
||||
and you need to configure oslo_concurrency lock_path:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[oslo_concurrency]
|
||||
lock_path = /path/to/oslo_concurrency_lock_dir
|
||||
|
||||
and cache if you want to use it.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[cache]
|
||||
enabled = True
|
||||
|
||||
You can look for other configuration options in the `Configuration Reference`_
|
||||
|
||||
.. _`Configuration Reference`: ../configuration/api.html
|
@ -1,24 +0,0 @@
|
||||
.. _credentials-creation:
|
||||
|
||||
#. Source the ``admin`` credentials to gain access to
|
||||
admin-only CLI commands:
|
||||
|
||||
#. To create the service credentials, complete these steps:
|
||||
|
||||
* Create the ``ec2api`` user:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack user create --domain default --password-prompt ec2api
|
||||
|
||||
* Add the ``admin`` role to the ``ec2api`` user:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack role add --project service --user ec2api admin
|
||||
|
||||
* Create the ec2api service entities:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack service create --name ec2-api --description "ec2api" ec2api
|
@ -1,31 +0,0 @@
|
||||
.. _database-creation:
|
||||
|
||||
* Use the database access client to connect to the database
|
||||
server as the ``root`` user:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mysql -u root -p
|
||||
|
||||
* Create the ``ec2api`` database:
|
||||
|
||||
.. code-block:: mysql
|
||||
|
||||
CREATE DATABASE ec2api;
|
||||
|
||||
* Grant proper access to the ``ec2api`` database:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'localhost' \
|
||||
IDENTIFIED BY 'EC2-API_DBPASS';
|
||||
GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'%' \
|
||||
IDENTIFIED BY 'EC2-API_DBPASS';
|
||||
|
||||
Replace ``EC2-API_DBPASS`` with a suitable password.
|
||||
|
||||
* Exit the database access client.
|
||||
|
||||
.. code-block:: mysql
|
||||
|
||||
exit;
|
@ -1,13 +0,0 @@
|
||||
Create the ec2api service API endpoints:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack endpoint create --region RegionOne ec2api \
|
||||
public http://controller:XXXX/
|
||||
$ openstack endpoint create --region RegionOne ec2api \
|
||||
admin http://controller:XXXX/
|
||||
$ openstack endpoint create --region RegionOne ec2api \
|
||||
internal http://controller:XXXX/
|
||||
|
||||
- where 'controller' is address your ec2api is installed on
|
||||
- and 'XXXX' is port (8788 by default)
|
@ -1,27 +0,0 @@
|
||||
=====================
|
||||
Installing EC2-API
|
||||
=====================
|
||||
|
||||
This section describes how to install and configure the ec2-api service on the
|
||||
controller node for Ubuntu (LTS).
|
||||
|
||||
It assumes that you already have a working OpenStack environment with
|
||||
at least the following components installed: Compute, Networking, Block Storage,
|
||||
Identity, Image.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
install-sh.rst
|
||||
install-manual.rst
|
||||
install-devstack.rst
|
||||
install-ubuntu.rst
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
verify.rst
|
||||
next-steps.rst
|
||||
|
||||
This chapter assumes a working setup of OpenStack following the
|
||||
`OpenStack Installation Tutorial <https://docs.openstack.org/latest/install/>`_.
|
@ -1,10 +0,0 @@
|
||||
.. _install-devstack:
|
||||
|
||||
Installation on DevStack
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In order to install ec2-api with devstack the following should be added to the local.conf or localrc the following line:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
|
@ -1,47 +0,0 @@
|
||||
.. _install-manual:
|
||||
|
||||
Manual Installation
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Install and configure components
|
||||
--------------------------------
|
||||
|
||||
1. Install the packages in any way you prefer
|
||||
(**github+setup.py** / **pip** / **packages**)
|
||||
|
||||
2. Create the service credentials
|
||||
|
||||
.. include:: credentials-creation.rst
|
||||
|
||||
3. Create database
|
||||
|
||||
.. include:: database-creation.rst
|
||||
|
||||
There is a script creating 'ec2api' database that is accessible
|
||||
only on localhost by user 'ec2api' with password 'ec2api'.
|
||||
https://github.com/openstack/ec2-api/blob/master/tools/db/ec2api-db-setup
|
||||
|
||||
4. Create endpoints:
|
||||
|
||||
.. include:: endpoints-creation.rst
|
||||
|
||||
5. Create configuration files ``/etc/ec2api/api-paste.ini``
|
||||
(can be copied from
|
||||
https://github.com/openstack/ec2-api/blob/master/etc/ec2api/api-paste.ini)
|
||||
|
||||
and ``/etc/ec2api/ec2api.conf``
|
||||
|
||||
.. include:: configuration.rst
|
||||
|
||||
6. Configure metadata:
|
||||
|
||||
.. include:: metadata-configuration.rst
|
||||
|
||||
7. Start the services as binaries
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ /usr/local/bin/ec2-api
|
||||
$ /usr/local/bin/ec2-api-metadata
|
||||
|
||||
or set up as Linux services.
|
@ -1,38 +0,0 @@
|
||||
.. _install-sh:
|
||||
|
||||
Installation by install.sh
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Install and configure components
|
||||
--------------------------------
|
||||
|
||||
Install the packages:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# apt-get update
|
||||
# git clone https://github.com/openstack/ec2-api.git
|
||||
# cd ec2-api
|
||||
|
||||
Run install.sh
|
||||
|
||||
The EC2 API service gets installed on port 8788 by default. It can be changed
|
||||
before the installation in ``/etc/ec2api/ec2api.conf`` configuration file.
|
||||
|
||||
:ref:`configuring`.
|
||||
|
||||
The services afterwards can be started as binaries:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ /usr/local/bin/ec2-api
|
||||
$ /usr/local/bin/ec2-api-metadata
|
||||
|
||||
or set up as Linux services.
|
||||
|
||||
.. include:: endpoints-creation.rst
|
||||
|
||||
Configuring OpenStack for EC2 API metadata service
|
||||
---------------------------------------------------
|
||||
|
||||
.. include:: metadata-configuration.rst
|
@ -1,19 +0,0 @@
|
||||
.. _install-ubuntu:
|
||||
|
||||
|
||||
Install and configure
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section describes how to install and configure the ec2-api service on the
|
||||
controller node for Ubuntu (LTS).
|
||||
|
||||
It assumes that you already have a working OpenStack environment with
|
||||
at least the following components installed: Compute, Networking, Block Storage,
|
||||
Identity, Image.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
install-sh.rst
|
||||
install-manual.rst
|
||||
install-devstack.rst
|
@ -1,29 +0,0 @@
|
||||
EC2 metadata is built in between the nova-metadata and the neutron-metadata,
|
||||
so we need to configure Neutron so that it sends requests to ec2-api-metadata,
|
||||
not to the nova.
|
||||
|
||||
To configure OpenStack for EC2 API metadata service for Neutron add:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
nova_metadata_port = 8789
|
||||
|
||||
to ``/etc/neutron/metadata_agent.ini`` for legacy neutron or
|
||||
to ``neutron_ovn_metadata_agent.ini`` for OVN
|
||||
|
||||
then restart neutron-metadata service.
|
||||
|
||||
If you want to obtain metadata via SSL you need to configure neutron:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[DEFAULT]
|
||||
nova_metadata_protocol = https
|
||||
# in case of self-signed certs you may need to specify CA
|
||||
auth_ca_cert = /path/to/root/cert/if/self/signed
|
||||
# or skip certs checking
|
||||
nova_metadata_insecure = True
|
||||
|
||||
And then you'll be able to get EC2-API/Nova metadata from neutron via SSL.
|
||||
Anyway metadata URL inside the server still be http://169.254.169.254
|
@ -1,8 +0,0 @@
|
||||
.. _next-steps:
|
||||
|
||||
Next steps
|
||||
~~~~~~~~~~
|
||||
|
||||
Your OpenStack environment now includes the ec2-api service.
|
||||
|
||||
To add more services, see the additional documentation on installing OpenStack.
|
@ -1,52 +0,0 @@
|
||||
.. _verify:
|
||||
|
||||
Verify operation
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Verify operation of the ec2-api service.
|
||||
|
||||
.. note::
|
||||
|
||||
Perform these commands on the controller node.
|
||||
|
||||
#. Source the ``admin`` project credentials to gain access to
|
||||
admin-only CLI commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ . openrc admin admin
|
||||
|
||||
#. List service components to verify successful launch and registration
|
||||
of each process:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack service list
|
||||
|
||||
|
||||
#. Install aws cli.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pip install awscli --upgrade --user
|
||||
|
||||
#. Create configuration file for aws cli in your home directory
|
||||
``~/.aws/config`` or by "**aws configure**" command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[default]
|
||||
aws_access_key_id = 1b013f18d5ed47ae8ed0fbb8debc036b
|
||||
aws_secret_access_key = 9bbc6f270ffd4dfdbe0e896947f41df3
|
||||
region = RegionOne
|
||||
|
||||
Change the aws_access_key_id and aws_secret_acces_key above to the values
|
||||
appropriate for your cloud (can be obtained by
|
||||
"**openstack ec2 credentials list**" command).
|
||||
|
||||
#. Run aws cli commands using new EC2 API endpoint URL (can be obtained from
|
||||
keystone with the new port 8788) like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
aws --endpoint-url http://10.0.2.15:8788 ec2 describe-images
|
@ -1,26 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
:mod:`ec2api` -- Cloud IaaS Platform
|
||||
===================================
|
||||
|
||||
.. automodule:: ec2api
|
||||
:platform: Unix
|
||||
:synopsis: Infrastructure-as-a-Service Cloud platform.
|
||||
"""
|
||||
|
||||
import gettext
|
||||
|
||||
gettext.install('ec2api')
|
@ -1,398 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Starting point for routing EC2 requests.
|
||||
"""
|
||||
import hashlib
|
||||
import sys
|
||||
|
||||
import botocore
|
||||
from keystoneauth1 import session as keystone_session
|
||||
from keystoneclient import access as keystone_access
|
||||
from keystoneclient.auth.identity import access as keystone_identity_access
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context as common_context
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
import requests
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from ec2api.api import apirequest
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import faults
|
||||
from ec2api import clients
|
||||
from ec2api import context
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
from ec2api import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ec2_opts = [
|
||||
cfg.StrOpt('keystone_ec2_tokens_url',
|
||||
default='http://localhost:5000/v3/ec2tokens',
|
||||
help='URL to authenticate token from ec2 request.'),
|
||||
cfg.IntOpt('ec2_timestamp_expiry',
|
||||
default=300,
|
||||
help='Time in seconds before ec2 timestamp expires'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
CONF.import_opt('use_forwarded_for', 'ec2api.api.auth')
|
||||
|
||||
|
||||
# Fault Wrapper around all EC2 requests #
|
||||
class FaultWrapper(wsgi.Middleware):
|
||||
|
||||
"""Calls the middleware stack, captures any exceptions into faults."""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception:
|
||||
LOG.exception("FaultWrapper catches error")
|
||||
return faults.Fault(webob.exc.HTTPInternalServerError())
|
||||
|
||||
|
||||
class RequestLogging(wsgi.Middleware):
|
||||
|
||||
"""Access-Log akin logging for all EC2 API requests."""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
start = timeutils.utcnow()
|
||||
rv = req.get_response(self.application)
|
||||
self.log_request_completion(rv, req, start)
|
||||
return rv
|
||||
|
||||
def log_request_completion(self, response, request, start):
|
||||
apireq = request.environ.get('ec2.request', None)
|
||||
if apireq:
|
||||
action = apireq.action
|
||||
else:
|
||||
action = None
|
||||
ctxt = request.environ.get('ec2api.context', None)
|
||||
delta = timeutils.utcnow() - start
|
||||
seconds = delta.seconds
|
||||
microseconds = delta.microseconds
|
||||
LOG.info(
|
||||
"%s.%ss %s %s %s %s %s [%s] %s %s",
|
||||
seconds,
|
||||
microseconds,
|
||||
request.remote_addr,
|
||||
request.method,
|
||||
"%s%s" % (request.script_name, request.path_info),
|
||||
action,
|
||||
response.status_int,
|
||||
request.user_agent,
|
||||
request.content_type,
|
||||
response.content_type,
|
||||
context=ctxt)
|
||||
|
||||
|
||||
class EC2KeystoneAuth(wsgi.Middleware):
|
||||
|
||||
"""Authenticate an EC2 request with keystone and convert to context."""
|
||||
|
||||
def _get_signature(self, req):
|
||||
"""Extract the signature from the request.
|
||||
|
||||
This can be a get/post variable or for version 4 also in a header
|
||||
called 'Authorization'.
|
||||
- params['Signature'] == version 0,1,2,3
|
||||
- params['X-Amz-Signature'] == version 4
|
||||
- header 'Authorization' == version 4
|
||||
"""
|
||||
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
|
||||
if sig is not None:
|
||||
return sig
|
||||
|
||||
if 'Authorization' not in req.headers:
|
||||
return None
|
||||
|
||||
auth_str = req.headers['Authorization']
|
||||
if not auth_str.startswith('AWS4-HMAC-SHA256'):
|
||||
return None
|
||||
|
||||
return auth_str.partition("Signature=")[2].split(',')[0]
|
||||
|
||||
def _get_access(self, req):
|
||||
"""Extract the access key identifier.
|
||||
|
||||
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
|
||||
version 4 it is either an X-Amz-Credential parameter or a Credential=
|
||||
field in the 'Authorization' header string.
|
||||
"""
|
||||
access = req.params.get('AWSAccessKeyId')
|
||||
if access is not None:
|
||||
return access
|
||||
|
||||
cred_param = req.params.get('X-Amz-Credential')
|
||||
if cred_param:
|
||||
access = cred_param.split("/")[0]
|
||||
if access is not None:
|
||||
return access
|
||||
|
||||
if 'Authorization' not in req.headers:
|
||||
return None
|
||||
auth_str = req.headers['Authorization']
|
||||
if not auth_str.startswith('AWS4-HMAC-SHA256'):
|
||||
return None
|
||||
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
|
||||
return cred_str.split("/")[0]
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
request_id = common_context.generate_request_id()
|
||||
|
||||
# NOTE(alevine) We need to calculate the hash here because
|
||||
# subsequent access to request modifies the req.body so the hash
|
||||
# calculation will yield invalid results.
|
||||
body_hash = hashlib.sha256(req.body).hexdigest()
|
||||
|
||||
signature = self._get_signature(req)
|
||||
if not signature:
|
||||
msg = _("Signature not provided")
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
access = self._get_access(req)
|
||||
if not access:
|
||||
msg = _("Access key not provided")
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
|
||||
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
|
||||
params = {}
|
||||
else:
|
||||
# Make a copy of args for authentication and signature verification
|
||||
params = dict(req.params)
|
||||
# Not part of authentication args
|
||||
params.pop('Signature', None)
|
||||
|
||||
cred_dict = {
|
||||
'access': access,
|
||||
'signature': signature,
|
||||
'host': req.host,
|
||||
'verb': req.method,
|
||||
'path': req.path,
|
||||
'params': params,
|
||||
# python3 takes only keys for json from headers object
|
||||
'headers': {k: req.headers[k] for k in req.headers},
|
||||
'body_hash': body_hash
|
||||
}
|
||||
|
||||
token_url = CONF.keystone_ec2_tokens_url
|
||||
if "ec2" in token_url:
|
||||
creds = {'ec2Credentials': cred_dict}
|
||||
else:
|
||||
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
|
||||
creds_json = jsonutils.dumps(creds)
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
params = {'data': creds_json, 'headers': headers}
|
||||
clients.update_request_params_with_ssl(params)
|
||||
response = requests.request('POST', token_url, **params)
|
||||
status_code = response.status_code
|
||||
if status_code != 200:
|
||||
msg = response.reason
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=status_code)
|
||||
|
||||
try:
|
||||
auth_ref = keystone_access.AccessInfo.factory(resp=response,
|
||||
body=response.json())
|
||||
except (NotImplementedError, KeyError):
|
||||
LOG.exception("Keystone failure")
|
||||
msg = _("Failure communicating with keystone")
|
||||
return faults.ec2_error_response(request_id, "AuthFailure", msg,
|
||||
status=400)
|
||||
auth = keystone_identity_access.AccessInfoPlugin(auth_ref)
|
||||
params = {'auth': auth}
|
||||
clients.update_request_params_with_ssl(params)
|
||||
session = keystone_session.Session(**params)
|
||||
remote_address = req.remote_addr
|
||||
if CONF.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For',
|
||||
remote_address)
|
||||
|
||||
ctxt = context.RequestContext(auth_ref.user_id, auth_ref.project_id,
|
||||
request_id=request_id,
|
||||
user_name=auth_ref.username,
|
||||
project_name=auth_ref.project_name,
|
||||
remote_address=remote_address,
|
||||
session=session,
|
||||
api_version=req.params.get('Version'))
|
||||
|
||||
req.environ['ec2api.context'] = ctxt
|
||||
|
||||
return self.application
|
||||
|
||||
|
||||
class Requestify(wsgi.Middleware):
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
|
||||
'SignatureVersion', 'Version', 'Timestamp']
|
||||
args = dict(req.params)
|
||||
try:
|
||||
expired = ec2utils.is_ec2_timestamp_expired(
|
||||
req.params,
|
||||
expires=CONF.ec2_timestamp_expiry)
|
||||
if expired:
|
||||
msg = _("Timestamp failed validation.")
|
||||
LOG.exception(msg)
|
||||
raise webob.exc.HTTPForbidden(explanation=msg)
|
||||
|
||||
# Raise KeyError if omitted
|
||||
action = req.params['Action']
|
||||
# Fix bug lp:720157 for older (version 1) clients
|
||||
version = req.params.get('SignatureVersion')
|
||||
if version and int(version) == 1:
|
||||
non_args.remove('SignatureMethod')
|
||||
if 'SignatureMethod' in args:
|
||||
args.pop('SignatureMethod')
|
||||
for non_arg in non_args:
|
||||
args.pop(non_arg, None)
|
||||
except KeyError:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
except exception.InvalidRequest as err:
|
||||
raise webob.exc.HTTPBadRequest(explanation=err.format_message())
|
||||
|
||||
LOG.debug('action: %s', action)
|
||||
for key, value in args.items():
|
||||
LOG.debug('arg: %(key)s\t\tval: %(value)s',
|
||||
{'key': key, 'value': value})
|
||||
|
||||
# Success!
|
||||
api_request = apirequest.APIRequest(
|
||||
action, req.params['Version'], args)
|
||||
req.environ['ec2.request'] = api_request
|
||||
return self.application
|
||||
|
||||
|
||||
def exception_to_ec2code(ex):
|
||||
"""Helper to extract EC2 error code from exception.
|
||||
|
||||
For other than EC2 exceptions (those without ec2_code attribute),
|
||||
use exception name.
|
||||
"""
|
||||
if hasattr(ex, 'ec2_code'):
|
||||
code = ex.ec2_code
|
||||
else:
|
||||
code = type(ex).__name__
|
||||
return code
|
||||
|
||||
|
||||
def ec2_error_ex(ex, req, unexpected=False):
|
||||
"""Return an EC2 error response.
|
||||
|
||||
Return an EC2 error response based on passed exception and log
|
||||
the exception on an appropriate log level:
|
||||
|
||||
* DEBUG: expected errors
|
||||
* ERROR: unexpected errors
|
||||
|
||||
All expected errors are treated as client errors and 4xx HTTP
|
||||
status codes are always returned for them.
|
||||
|
||||
Unexpected 5xx errors may contain sensitive information,
|
||||
suppress their messages for security.
|
||||
"""
|
||||
code = exception_to_ec2code(ex)
|
||||
for status_name in ('code', 'status', 'status_code', 'http_status'):
|
||||
status = getattr(ex, status_name, None)
|
||||
if isinstance(status, int):
|
||||
break
|
||||
else:
|
||||
status = 500
|
||||
|
||||
if unexpected:
|
||||
log_fun = LOG.error
|
||||
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
|
||||
exc_info = sys.exc_info()
|
||||
else:
|
||||
log_fun = LOG.debug
|
||||
log_msg = _("%(ex_name)s raised: %(ex_str)s")
|
||||
exc_info = None
|
||||
|
||||
context = req.environ['ec2api.context']
|
||||
request_id = context.request_id
|
||||
log_msg_args = {
|
||||
'ex_name': type(ex).__name__,
|
||||
'ex_str': ex
|
||||
}
|
||||
log_fun(log_msg % log_msg_args, context=context, exc_info=exc_info)
|
||||
|
||||
if unexpected and status >= 500:
|
||||
message = _('Unknown error occurred.')
|
||||
elif getattr(ex, 'message', None):
|
||||
message = str(ex.message)
|
||||
elif ex.args and any(arg for arg in ex.args):
|
||||
message = " ".join(map(str, ex.args))
|
||||
else:
|
||||
message = str(ex)
|
||||
if unexpected:
|
||||
# Log filtered environment for unexpected errors.
|
||||
env = req.environ.copy()
|
||||
for k in list(env.keys()):
|
||||
if not isinstance(env[k], str):
|
||||
env.pop(k)
|
||||
log_fun(_('Environment: %s') % jsonutils.dumps(env))
|
||||
return faults.ec2_error_response(request_id, code, message, status=status)
|
||||
|
||||
|
||||
class Executor(wsgi.Application):
|
||||
|
||||
"""Execute an EC2 API request.
|
||||
|
||||
Executes 'ec2.action', passing 'ec2api.context' and
|
||||
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
|
||||
response, or a 400 upon failure.
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
context = req.environ['ec2api.context']
|
||||
api_request = req.environ['ec2.request']
|
||||
try:
|
||||
result = api_request.invoke(context)
|
||||
except botocore.exceptions.ClientError as ex:
|
||||
error = ex.response.get('Error', {})
|
||||
code = ex.response.get('Code', error.get('Code'))
|
||||
message = ex.response.get('Message', error.get('Message'))
|
||||
# the early versions of botocore didn't provide HTTPStatusCode
|
||||
# for 400 errors
|
||||
status = ex.response.get('ResponseMetadata', {}).get(
|
||||
'HTTPStatusCode', 400)
|
||||
if status < 400 or status > 499:
|
||||
LOG.exception("Exception from remote server")
|
||||
return faults.ec2_error_response(
|
||||
context.request_id, code, message, status=status)
|
||||
except Exception as ex:
|
||||
return ec2_error_ex(
|
||||
ex, req, unexpected=not isinstance(ex, exception.EC2Exception))
|
||||
else:
|
||||
resp = webob.Response()
|
||||
resp.status = 200
|
||||
resp.headers['Content-Type'] = 'text/xml'
|
||||
resp.body = bytes(result)
|
||||
|
||||
return resp
|
@ -1,459 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
except ImportError:
|
||||
pass # clients will log absense of neutronclient in this case
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import internet_gateway as internet_gateway_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
"""Address related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def get_address_engine():
|
||||
return AddressEngineNeutron()
|
||||
|
||||
|
||||
def allocate_address(context, domain=None):
|
||||
if domain and domain not in ['vpc', 'standard']:
|
||||
msg = _("Invalid value '%(domain)s' for domain.") % {'domain': domain}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
address, os_floating_ip = address_engine.allocate_address(context, domain)
|
||||
return _format_address(context, address, os_floating_ip)
|
||||
|
||||
|
||||
def associate_address(context, public_ip=None, instance_id=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False):
|
||||
if not public_ip and not allocation_id:
|
||||
msg = _('Either public IP or allocation id must be specified')
|
||||
raise exception.MissingParameter(msg)
|
||||
if public_ip and allocation_id:
|
||||
msg = _('You may specify public IP or allocation id, '
|
||||
'but not both in the same call')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
if not instance_id and not network_interface_id:
|
||||
msg = _('Either instance ID or network interface id must be specified')
|
||||
raise exception.MissingParameter(msg)
|
||||
associationId = address_engine.associate_address(
|
||||
context, public_ip, instance_id,
|
||||
allocation_id, network_interface_id,
|
||||
private_ip_address, allow_reassociation)
|
||||
if associationId:
|
||||
return {'return': True,
|
||||
'associationId': associationId}
|
||||
return {'return': True}
|
||||
|
||||
|
||||
def disassociate_address(context, public_ip=None, association_id=None):
|
||||
if not public_ip and not association_id:
|
||||
msg = _('Either public IP or association id must be specified')
|
||||
raise exception.MissingParameter(msg)
|
||||
if public_ip and association_id:
|
||||
msg = _('You may specify public IP or association id, '
|
||||
'but not both in the same call')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
address_engine.disassociate_address(context, public_ip, association_id)
|
||||
return True
|
||||
|
||||
|
||||
def release_address(context, public_ip=None, allocation_id=None):
|
||||
if not public_ip and not allocation_id:
|
||||
msg = _('Either public IP or allocation id must be specified')
|
||||
raise exception.MissingParameter(msg)
|
||||
if public_ip and allocation_id:
|
||||
msg = _('You may specify public IP or allocation id, '
|
||||
'but not both in the same call')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
|
||||
address_engine.release_address(context, public_ip, allocation_id)
|
||||
return True
|
||||
|
||||
|
||||
class AddressDescriber(common.UniversalDescriber):
|
||||
|
||||
KIND = 'eipalloc'
|
||||
FILTER_MAP = {'allocation-id': 'allocationId',
|
||||
'association-id': 'associationId',
|
||||
'domain': 'domain',
|
||||
'instance-id': 'instanceId',
|
||||
'network-interface-id': 'networkInterfaceId',
|
||||
'network-interface-owner-id': 'networkInterfaceOwnerId',
|
||||
'private-ip-address': 'privateIpAddress',
|
||||
'public-ip': 'publicIp'}
|
||||
|
||||
def __init__(self, os_ports, db_instances):
|
||||
self.os_ports = os_ports
|
||||
self.db_instances_dict = {i['os_id']: i for i in (db_instances or [])}
|
||||
|
||||
def format(self, item=None, os_item=None):
|
||||
return _format_address(self.context, item, os_item, self.os_ports,
|
||||
self.db_instances_dict)
|
||||
|
||||
def get_os_items(self):
|
||||
return address_engine.get_os_floating_ips(self.context)
|
||||
|
||||
def auto_update_db(self, item, os_item):
|
||||
item = super(AddressDescriber, self).auto_update_db(item, os_item)
|
||||
if (item and 'network_interface_id' in item and
|
||||
(not os_item.get('port_id') or
|
||||
os_item['fixed_ip_address'] != item['private_ip_address'])):
|
||||
_disassociate_address_item(self.context, item)
|
||||
return item
|
||||
|
||||
def get_name(self, os_item):
|
||||
return os_item['floating_ip_address']
|
||||
|
||||
|
||||
def describe_addresses(context, public_ip=None, allocation_id=None,
|
||||
filter=None):
|
||||
formatted_addresses = AddressDescriber(
|
||||
address_engine.get_os_ports(context),
|
||||
db_api.get_items(context, 'i')).describe(
|
||||
context, allocation_id, public_ip, filter)
|
||||
return {'addressesSet': formatted_addresses}
|
||||
|
||||
|
||||
def _format_address(context, address, os_floating_ip, os_ports=[],
|
||||
db_instances_dict=None):
|
||||
ec2_address = {'publicIp': os_floating_ip['floating_ip_address']}
|
||||
fixed_ip_address = os_floating_ip.get('fixed_ip_address')
|
||||
if fixed_ip_address:
|
||||
ec2_address['privateIpAddress'] = fixed_ip_address
|
||||
os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports)
|
||||
if os_instance_id:
|
||||
ec2_address['instanceId'] = (
|
||||
_get_instance_ec2_id_by_os_id(context, os_instance_id,
|
||||
db_instances_dict))
|
||||
if not address:
|
||||
ec2_address['domain'] = 'standard'
|
||||
else:
|
||||
ec2_address['domain'] = 'vpc'
|
||||
ec2_address['allocationId'] = address['id']
|
||||
if 'network_interface_id' in address:
|
||||
ec2_address.update({
|
||||
'associationId': ec2utils.change_ec2_id_kind(
|
||||
ec2_address['allocationId'], 'eipassoc'),
|
||||
'networkInterfaceId': address['network_interface_id'],
|
||||
'networkInterfaceOwnerId': context.project_id})
|
||||
return ec2_address
|
||||
|
||||
|
||||
def _get_instance_ec2_id_by_os_id(context, os_instance_id, db_instances_dict):
|
||||
db_item = ec2utils.get_db_item_by_os_id(context, 'i', os_instance_id,
|
||||
db_instances_dict)
|
||||
return db_item['id']
|
||||
|
||||
|
||||
def _is_address_valid(context, neutron, address):
|
||||
try:
|
||||
neutron.show_floatingip(address['os_id'])
|
||||
except neutron_exception.NotFound:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def _associate_address_item(context, address, network_interface_id,
|
||||
private_ip_address):
|
||||
address['network_interface_id'] = network_interface_id
|
||||
address['private_ip_address'] = private_ip_address
|
||||
db_api.update_item(context, address)
|
||||
|
||||
|
||||
def _disassociate_address_item(context, address):
|
||||
address.pop('network_interface_id')
|
||||
address.pop('private_ip_address')
|
||||
db_api.update_item(context, address)
|
||||
|
||||
|
||||
def _get_os_instance_id(context, os_floating_ip, os_ports=[]):
|
||||
port_id = os_floating_ip.get('port_id')
|
||||
os_instance_id = None
|
||||
if port_id:
|
||||
port = next((port for port in os_ports
|
||||
if port['id'] == port_id), None)
|
||||
if port and port.get('device_owner').startswith('compute:'):
|
||||
os_instance_id = port.get('device_id')
|
||||
return os_instance_id
|
||||
|
||||
|
||||
class AddressEngineNeutron(object):
|
||||
|
||||
def allocate_address(self, context, domain=None):
|
||||
os_public_network = ec2utils.get_os_public_network(context)
|
||||
neutron = clients.neutron(context)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_floating_ip = {'floating_network_id': os_public_network['id']}
|
||||
try:
|
||||
os_floating_ip = neutron.create_floatingip(
|
||||
{'floatingip': os_floating_ip})
|
||||
except neutron_exception.OverQuotaClient:
|
||||
raise exception.AddressLimitExceeded()
|
||||
os_floating_ip = os_floating_ip['floatingip']
|
||||
if ((not domain or domain == 'standard') and
|
||||
not CONF.disable_ec2_classic):
|
||||
return None, os_floating_ip
|
||||
cleaner.addCleanup(neutron.delete_floatingip, os_floating_ip['id'])
|
||||
|
||||
address = {'os_id': os_floating_ip['id'],
|
||||
'public_ip': os_floating_ip['floating_ip_address']}
|
||||
address = db_api.add_item(context, 'eipalloc', address)
|
||||
return address, os_floating_ip
|
||||
|
||||
def release_address(self, context, public_ip, allocation_id):
|
||||
neutron = clients.neutron(context)
|
||||
if public_ip:
|
||||
# TODO(ft): implement search in DB layer
|
||||
address = next((addr for addr in
|
||||
db_api.get_items(context, 'eipalloc')
|
||||
if addr['public_ip'] == public_ip), None)
|
||||
if address and _is_address_valid(context, neutron, address):
|
||||
msg = _('You must specify an allocation id when releasing a '
|
||||
'VPC elastic IP address')
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
os_floating_ip = self.get_os_floating_ip_by_public_ip(context,
|
||||
public_ip)
|
||||
try:
|
||||
neutron.delete_floatingip(os_floating_ip['id'])
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
return
|
||||
|
||||
address = ec2utils.get_db_item(context, allocation_id)
|
||||
if not _is_address_valid(context, neutron, address):
|
||||
raise exception.InvalidAllocationIDNotFound(
|
||||
id=allocation_id)
|
||||
|
||||
if 'network_interface_id' in address:
|
||||
if CONF.disable_ec2_classic:
|
||||
network_interface_id = address['network_interface_id']
|
||||
network_interface = db_api.get_item_by_id(context,
|
||||
network_interface_id)
|
||||
default_vpc = ec2utils.check_and_create_default_vpc(context)
|
||||
if default_vpc:
|
||||
default_vpc_id = default_vpc['id']
|
||||
if (network_interface and
|
||||
network_interface['vpc_id'] == default_vpc_id):
|
||||
association_id = ec2utils.change_ec2_id_kind(address['id'],
|
||||
'eipassoc')
|
||||
self.disassociate_address(
|
||||
context, association_id=association_id)
|
||||
else:
|
||||
raise exception.InvalidIPAddressInUse(
|
||||
ip_address=address['public_ip'])
|
||||
else:
|
||||
raise exception.InvalidIPAddressInUse(
|
||||
ip_address=address['public_ip'])
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.delete_item(context, address['id'])
|
||||
cleaner.addCleanup(db_api.restore_item, context,
|
||||
'eipalloc', address)
|
||||
try:
|
||||
neutron.delete_floatingip(address['os_id'])
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
|
||||
def associate_address(self, context, public_ip=None, instance_id=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False):
|
||||
instance_network_interfaces = []
|
||||
if instance_id:
|
||||
# TODO(ft): implement search in DB layer
|
||||
for eni in db_api.get_items(context, 'eni'):
|
||||
if eni.get('instance_id') == instance_id:
|
||||
instance_network_interfaces.append(eni)
|
||||
|
||||
neutron = clients.neutron(context)
|
||||
|
||||
if public_ip:
|
||||
# TODO(ft): implement search in DB layer
|
||||
address = next((addr for addr in db_api.get_items(context,
|
||||
'eipalloc')
|
||||
if addr['public_ip'] == public_ip), None)
|
||||
|
||||
if not CONF.disable_ec2_classic:
|
||||
if instance_network_interfaces:
|
||||
msg = _('You must specify an allocation id when mapping '
|
||||
'an address to a VPC instance')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
if address and _is_address_valid(context, neutron, address):
|
||||
msg = _(
|
||||
"The address '%(public_ip)s' does not belong to you.")
|
||||
raise exception.AuthFailure(msg % {'public_ip': public_ip})
|
||||
|
||||
os_instance_id = ec2utils.get_db_item(context,
|
||||
instance_id)['os_id']
|
||||
# NOTE(ft): check the public IP exists to raise AWS exception
|
||||
# otherwise
|
||||
self.get_os_floating_ip_by_public_ip(context, public_ip)
|
||||
nova = clients.nova(context)
|
||||
nova.servers.add_floating_ip(os_instance_id, public_ip)
|
||||
return None
|
||||
|
||||
if not address:
|
||||
msg = _("The address '%(public_ip)s' does not belong to you.")
|
||||
raise exception.AuthFailure(msg % {'public_ip': public_ip})
|
||||
allocation_id = address['id']
|
||||
|
||||
if instance_id:
|
||||
if not instance_network_interfaces:
|
||||
# NOTE(ft): check the instance exists
|
||||
ec2utils.get_db_item(context, instance_id)
|
||||
msg = _('You must specify an IP address when mapping '
|
||||
'to a non-VPC instance')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
if len(instance_network_interfaces) > 1:
|
||||
raise exception.InvalidInstanceId(instance_id=instance_id)
|
||||
network_interface = instance_network_interfaces[0]
|
||||
else:
|
||||
network_interface = ec2utils.get_db_item(context,
|
||||
network_interface_id)
|
||||
if not private_ip_address:
|
||||
private_ip_address = network_interface['private_ip_address']
|
||||
|
||||
address = ec2utils.get_db_item(context, allocation_id)
|
||||
if not _is_address_valid(context, neutron, address):
|
||||
raise exception.InvalidAllocationIDNotFound(
|
||||
id=allocation_id)
|
||||
|
||||
if address.get('network_interface_id') == network_interface['id']:
|
||||
# NOTE(ft): idempotent call
|
||||
pass
|
||||
elif address.get('network_interface_id') and not allow_reassociation:
|
||||
msg = _('resource %(eipalloc_id)s is already associated with '
|
||||
'associate-id %(eipassoc_id)s')
|
||||
msg = msg % {'eipalloc_id': allocation_id,
|
||||
'eipassoc_id': ec2utils.change_ec2_id_kind(
|
||||
address['id'], 'eipassoc')}
|
||||
raise exception.ResourceAlreadyAssociated(msg)
|
||||
else:
|
||||
internet_gateways = (
|
||||
internet_gateway_api.describe_internet_gateways(
|
||||
context,
|
||||
filter=[{'name': 'attachment.vpc-id',
|
||||
'value': [network_interface['vpc_id']]}])
|
||||
['internetGatewaySet'])
|
||||
if len(internet_gateways) == 0:
|
||||
msg = _('Network %(vpc_id)s is not attached to any internet '
|
||||
'gateway') % {'vpc_id': network_interface['vpc_id']}
|
||||
raise exception.GatewayNotAttached(msg)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_associate_address_item(context, address,
|
||||
network_interface['id'],
|
||||
private_ip_address)
|
||||
cleaner.addCleanup(_disassociate_address_item, context,
|
||||
address)
|
||||
|
||||
os_floating_ip = {'port_id': network_interface['os_id'],
|
||||
'fixed_ip_address': private_ip_address}
|
||||
neutron.update_floatingip(address['os_id'],
|
||||
{'floatingip': os_floating_ip})
|
||||
# TODO(ft): generate unique association id for each act of association
|
||||
return ec2utils.change_ec2_id_kind(address['id'], 'eipassoc')
|
||||
|
||||
def disassociate_address(self, context, public_ip=None,
|
||||
association_id=None):
|
||||
neutron = clients.neutron(context)
|
||||
|
||||
if public_ip:
|
||||
# TODO(ft): implement search in DB layer
|
||||
address = next((addr for addr in db_api.get_items(context,
|
||||
'eipalloc')
|
||||
if addr['public_ip'] == public_ip), None)
|
||||
|
||||
if not CONF.disable_ec2_classic:
|
||||
if address and _is_address_valid(context, neutron, address):
|
||||
msg = _('You must specify an association id when '
|
||||
'unmapping an address from a VPC instance')
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
# NOTE(tikitavi): check the public IP exists to raise AWS
|
||||
# exception otherwise
|
||||
os_floating_ip = self.get_os_floating_ip_by_public_ip(
|
||||
context, public_ip)
|
||||
os_ports = self.get_os_ports(context)
|
||||
os_instance_id = _get_os_instance_id(context, os_floating_ip,
|
||||
os_ports)
|
||||
if os_instance_id:
|
||||
nova = clients.nova(context)
|
||||
nova.servers.remove_floating_ip(os_instance_id, public_ip)
|
||||
return None
|
||||
|
||||
if not address:
|
||||
msg = _("The address '%(public_ip)s' does not belong to you.")
|
||||
raise exception.AuthFailure(msg % {'public_ip': public_ip})
|
||||
if 'network_interface_id' not in address:
|
||||
msg = _('You must specify an association id when unmapping '
|
||||
'an address from a VPC instance')
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
association_id = ec2utils.change_ec2_id_kind(address['id'],
|
||||
'eipassoc')
|
||||
|
||||
address = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc'))
|
||||
if address is None or not _is_address_valid(context, neutron, address):
|
||||
raise exception.InvalidAssociationIDNotFound(
|
||||
id=association_id)
|
||||
if 'network_interface_id' in address:
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
network_interface_id = address['network_interface_id']
|
||||
private_ip_address = address['private_ip_address']
|
||||
_disassociate_address_item(context, address)
|
||||
cleaner.addCleanup(_associate_address_item, context, address,
|
||||
network_interface_id, private_ip_address)
|
||||
|
||||
neutron.update_floatingip(address['os_id'],
|
||||
{'floatingip': {'port_id': None}})
|
||||
|
||||
def get_os_floating_ips(self, context):
|
||||
neutron = clients.neutron(context)
|
||||
return neutron.list_floatingips(
|
||||
tenant_id=context.project_id)['floatingips']
|
||||
|
||||
def get_os_ports(self, context):
|
||||
neutron = clients.neutron(context)
|
||||
return neutron.list_ports(tenant_id=context.project_id)['ports']
|
||||
|
||||
def get_os_floating_ip_by_public_ip(self, context, public_ip):
|
||||
os_floating_ip = next((addr for addr in
|
||||
self.get_os_floating_ips(context)
|
||||
if addr['floating_ip_address'] == public_ip),
|
||||
None)
|
||||
if not os_floating_ip:
|
||||
msg = _("The address '%(public_ip)s' does not belong to you.")
|
||||
raise exception.AuthFailure(msg % {'public_ip': public_ip})
|
||||
return os_floating_ip
|
||||
|
||||
|
||||
address_engine = get_address_engine()
|
@ -1,101 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
APIRequest class
|
||||
"""
|
||||
|
||||
from lxml import etree
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import cloud
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import exception
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _underscore_to_camelcase(st):
|
||||
return ''.join([x[:1].upper() + x[1:] for x in st.split('_')])
|
||||
|
||||
|
||||
def _database_to_isoformat(datetimeobj):
|
||||
"""Return a xs:dateTime parsable string from datatime."""
|
||||
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
|
||||
|
||||
|
||||
class APIRequest(object):
|
||||
|
||||
def __init__(self, action, version, args):
|
||||
self.action = action
|
||||
self.version = version
|
||||
self.args = args
|
||||
self.controller = cloud.VpcCloudController()
|
||||
|
||||
def invoke(self, context):
|
||||
try:
|
||||
method = getattr(self.controller,
|
||||
ec2utils.camelcase_to_underscore(self.action))
|
||||
except AttributeError:
|
||||
LOG.exception('Unsupported API request: action = %(action)s',
|
||||
{'action': self.action})
|
||||
raise exception.InvalidRequest()
|
||||
|
||||
args = ec2utils.dict_from_dotted_str(self.args.items())
|
||||
|
||||
def convert_dicts_to_lists(args):
|
||||
if not isinstance(args, dict):
|
||||
return args
|
||||
for key in args.keys():
|
||||
# NOTE(vish): Turn numeric dict keys into lists
|
||||
# NOTE(Alex): Turn "value"-only dict keys into values
|
||||
if isinstance(args[key], dict):
|
||||
if args[key] == {}:
|
||||
continue
|
||||
first_subkey = next(iter(args[key].keys()))
|
||||
if first_subkey.isdigit():
|
||||
s = args[key]
|
||||
args[key] = [convert_dicts_to_lists(s[k])
|
||||
for k in sorted(s)]
|
||||
elif (first_subkey == 'value' and
|
||||
len(args[key]) == 1):
|
||||
args[key] = args[key]['value']
|
||||
return args
|
||||
|
||||
args = convert_dicts_to_lists(args)
|
||||
result = method(context, **args)
|
||||
return self._render_response(result, context.request_id)
|
||||
|
||||
def _render_response(self, response_data, request_id):
|
||||
response_el = ec2utils.dict_to_xml(
|
||||
{'return': 'true'} if response_data is True else response_data,
|
||||
self.action + 'Response')
|
||||
response_el.attrib['xmlns'] = ('http://ec2.amazonaws.com/doc/%s/'
|
||||
% self.version)
|
||||
request_id_el = etree.Element('requestId')
|
||||
request_id_el.text = request_id
|
||||
response_el.insert(0, request_id_el)
|
||||
|
||||
response = etree.tostring(response_el, pretty_print=True)
|
||||
|
||||
# Don't write private key to log
|
||||
if self.action != "CreateKeyPair":
|
||||
LOG.debug(response)
|
||||
else:
|
||||
LOG.debug("CreateKeyPair: Return Private Key")
|
||||
|
||||
return response
|
@ -1,47 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common Auth Middleware.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
auth_opts = [
|
||||
cfg.BoolOpt('use_forwarded_for',
|
||||
default=False,
|
||||
help='Treat X-Forwarded-For as the canonical remote address. '
|
||||
'Only enable this if you have a sanitizing proxy.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(auth_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def pipeline_factory(loader, global_conf, **local_conf):
|
||||
"""A paste pipeline replica that keys off of auth_strategy."""
|
||||
auth_strategy = "keystone"
|
||||
pipeline = local_conf[auth_strategy]
|
||||
pipeline = pipeline.split()
|
||||
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||
app = loader.get_app(pipeline[-1])
|
||||
filters.reverse()
|
||||
for fltr in filters:
|
||||
app = fltr(app)
|
||||
return app
|
@ -1,214 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import netutils
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api import exception
|
||||
|
||||
|
||||
availability_zone_opts = [
|
||||
cfg.StrOpt('internal_service_availability_zone',
|
||||
default='internal',
|
||||
help='The availability_zone to show internal services under'),
|
||||
cfg.StrOpt('my_ip',
|
||||
default=netutils.get_my_ipv4(),
|
||||
help='IP address of this host'),
|
||||
cfg.StrOpt('ec2_host',
|
||||
default='$my_ip',
|
||||
help='The IP address of the EC2 API server'),
|
||||
cfg.IntOpt('ec2_port',
|
||||
default=8788,
|
||||
help='The port of the EC2 API server'),
|
||||
cfg.StrOpt('ec2_scheme',
|
||||
default='http',
|
||||
help='The protocol to use when connecting to the EC2 API '
|
||||
'server (http, https)'),
|
||||
cfg.StrOpt('ec2_path',
|
||||
default='/',
|
||||
help='The path prefix used to call the ec2 API server'),
|
||||
cfg.ListOpt('region_list',
|
||||
default=[],
|
||||
help='List of region=fqdn pairs separated by commas'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(availability_zone_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
"""Availability zones, regions, account attributes related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def get_account_attribute_engine():
|
||||
return AccountAttributeEngineNeutron()
|
||||
|
||||
|
||||
class AvailabilityZoneDescriber(common.UniversalDescriber):
|
||||
|
||||
KIND = 'az'
|
||||
FILTER_MAP = {'state': 'zoneState',
|
||||
'zone-name': 'zoneName'}
|
||||
|
||||
def format(self, item=None, os_item=None):
|
||||
return _format_availability_zone(os_item)
|
||||
|
||||
def get_db_items(self):
|
||||
return []
|
||||
|
||||
def get_os_items(self):
|
||||
nova = clients.nova(self.context)
|
||||
zones = nova.availability_zones.list(detailed=False)
|
||||
for zone in zones:
|
||||
if zone.zoneName == CONF.internal_service_availability_zone:
|
||||
zones.remove(zone)
|
||||
return zones
|
||||
|
||||
def get_name(self, os_item):
|
||||
return os_item.zoneName
|
||||
|
||||
def get_id(self, os_item):
|
||||
return ''
|
||||
|
||||
def auto_update_db(self, item, os_item):
|
||||
pass
|
||||
|
||||
|
||||
def describe_availability_zones(context, zone_name=None, filter=None):
|
||||
# NOTE(Alex): Openstack extension, AWS-incompability
|
||||
# Checking for 'verbose' in zone_name.
|
||||
if zone_name and 'verbose' in zone_name:
|
||||
return _describe_verbose(context)
|
||||
|
||||
formatted_availability_zones = AvailabilityZoneDescriber().describe(
|
||||
context, names=zone_name, filter=filter)
|
||||
return {'availabilityZoneInfo': formatted_availability_zones}
|
||||
|
||||
|
||||
def describe_regions(context, region_name=None, filter=None):
|
||||
# TODO(andrey-mp): collect regions from keystone catalog
|
||||
if CONF.region_list:
|
||||
regions = []
|
||||
for region in CONF.region_list:
|
||||
name, _sep, host = region.partition('=')
|
||||
if not host:
|
||||
host = CONF.ec2_host
|
||||
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
|
||||
host,
|
||||
CONF.ec2_port,
|
||||
CONF.ec2_path)
|
||||
regions.append({'regionName': name,
|
||||
'regionEndpoint': endpoint})
|
||||
else:
|
||||
# NOTE(andrey-mp): RegionOne is a default region name that is used
|
||||
# in keystone, nova and some other projects
|
||||
regions = [{'regionName': 'RegionOne',
|
||||
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
|
||||
CONF.ec2_host,
|
||||
CONF.ec2_port,
|
||||
CONF.ec2_path)}]
|
||||
return {'regionInfo': regions}
|
||||
|
||||
|
||||
def describe_account_attributes(context, attribute_name=None):
|
||||
def get_max_instances():
|
||||
nova = clients.nova(context)
|
||||
quotas = nova.quotas.get(context.project_id, context.user_id)
|
||||
return quotas.instances
|
||||
|
||||
attribute_getters = {
|
||||
'supported-platforms': (
|
||||
account_attribute_engine.get_supported_platforms),
|
||||
'default-vpc': functools.partial(
|
||||
account_attribute_engine.get_default_vpc, context),
|
||||
'max-instances': get_max_instances,
|
||||
}
|
||||
|
||||
formatted_attributes = []
|
||||
for attribute in (attribute_name or attribute_getters):
|
||||
if attribute not in attribute_getters:
|
||||
raise exception.InvalidParameter(name=attribute)
|
||||
formatted_attributes.append(
|
||||
_format_account_attribute(attribute,
|
||||
attribute_getters[attribute]()))
|
||||
return {'accountAttributeSet': formatted_attributes}
|
||||
|
||||
|
||||
def _format_availability_zone(zone):
|
||||
return {'zoneName': zone.zoneName,
|
||||
'zoneState': ('available'
|
||||
if zone.zoneState.get('available')
|
||||
else 'unavailable')
|
||||
}
|
||||
|
||||
|
||||
def _format_account_attribute(attribute, value):
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
return {'attributeName': attribute,
|
||||
'attributeValueSet': [{'attributeValue': val} for val in value]}
|
||||
|
||||
|
||||
# NOTE(Alex): Openstack extension, AWS-incompability
|
||||
# The whole function and its result is incompatible with AWS.
|
||||
|
||||
def _describe_verbose(context):
|
||||
nova = clients.nova(context)
|
||||
availability_zones = nova.availability_zones.list()
|
||||
|
||||
formatted_availability_zones = []
|
||||
for availability_zone in availability_zones:
|
||||
formatted_availability_zones.append(
|
||||
_format_availability_zone(availability_zone))
|
||||
for host, services in availability_zone.hosts.items():
|
||||
formatted_availability_zones.append(
|
||||
{'zoneName': '|- %s' % host,
|
||||
'zoneState': ''})
|
||||
for service, values in services.items():
|
||||
active = ":-)" if values['active'] else "XXX"
|
||||
enabled = 'enabled' if values['available'] else 'disabled'
|
||||
formatted_availability_zones.append(
|
||||
{'zoneName': '| |- %s' % service,
|
||||
'zoneState': ('%s %s %s' % (enabled, active,
|
||||
values['updated_at']))})
|
||||
|
||||
return {'availabilityZoneInfo': formatted_availability_zones}
|
||||
|
||||
|
||||
class AccountAttributeEngineNeutron(object):
|
||||
|
||||
def get_supported_platforms(self):
|
||||
if CONF.disable_ec2_classic:
|
||||
return ['VPC']
|
||||
else:
|
||||
return ['EC2', 'VPC']
|
||||
|
||||
def get_default_vpc(self, context):
|
||||
if CONF.disable_ec2_classic:
|
||||
default_vpc = ec2utils.check_and_create_default_vpc(context)
|
||||
if default_vpc:
|
||||
return default_vpc['id']
|
||||
return 'none'
|
||||
|
||||
|
||||
account_attribute_engine = get_account_attribute_engine()
|
2115
ec2api/api/cloud.py
2115
ec2api/api/cloud.py
File diff suppressed because it is too large
Load Diff
@ -1,534 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import fnmatch
|
||||
import inspect
|
||||
import operator
|
||||
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import validator
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
ec2_opts = [
|
||||
cfg.BoolOpt('disable_ec2_classic',
|
||||
help='True if server does not support EC2 Classic mode '
|
||||
'in favor of default VPC'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OnCrashCleaner(object):
|
||||
|
||||
def __init__(self):
|
||||
self._cleanups = []
|
||||
self._suppress_exception = False
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if exc_type is None:
|
||||
return
|
||||
self._run_cleanups(self._cleanups)
|
||||
return self._suppress_exception
|
||||
|
||||
def addCleanup(self, function, *args, **kwargs):
|
||||
self._cleanups.append((function, args, kwargs))
|
||||
|
||||
def approveChanges(self):
|
||||
del self._cleanups[:]
|
||||
self._suppress_exception = True
|
||||
|
||||
def _run_cleanups(self, cleanups):
|
||||
for function, args, kwargs in reversed(cleanups):
|
||||
try:
|
||||
function(*args, **kwargs)
|
||||
except Exception:
|
||||
if inspect.ismethod(function):
|
||||
cmodule = function.__self__.__class__.__module__
|
||||
cname = function.__self__.__class__.__name__
|
||||
name = '%s.%s.%s' % (cmodule, cname, function.__name__)
|
||||
elif inspect.isfunction(function):
|
||||
name = '%s.%s' % (function.__module__, function.__name__)
|
||||
else:
|
||||
name = '%s.%s' % (function.__class__.__module__,
|
||||
function.__class__.__name__)
|
||||
formatted_args = ''
|
||||
args_string = ', '.join([repr(arg) for arg in args])
|
||||
kwargs_string = ', '.join([
|
||||
'%s=%r' % (key, value) for key, value in kwargs.items()
|
||||
])
|
||||
if args_string:
|
||||
formatted_args = args_string
|
||||
if kwargs_string:
|
||||
if formatted_args:
|
||||
formatted_args += ', '
|
||||
formatted_args += kwargs_string
|
||||
LOG.warning(
|
||||
'Error cleaning up %(name)s(%(args)s)' %
|
||||
{'name': name, 'args': formatted_args},
|
||||
exc_info=True)
|
||||
pass
|
||||
|
||||
|
||||
class Validator(object):
|
||||
|
||||
def __init__(self, param_name="", action="", params=[]):
|
||||
self.param_name = param_name
|
||||
self.action = action
|
||||
self.params = params
|
||||
|
||||
def multi(self, items, validation_func):
|
||||
validator.validate_list(items, self.param_name)
|
||||
for item in items:
|
||||
validation_func(item)
|
||||
|
||||
def dummy(self, value):
|
||||
pass
|
||||
|
||||
def bool(self, value):
|
||||
validator.validate_bool(value, self.param_name)
|
||||
|
||||
def int(self, value):
|
||||
validator.validate_int(value, self.param_name)
|
||||
|
||||
def str(self, value):
|
||||
validator.validate_str(value, self.param_name)
|
||||
|
||||
def strs(self, values):
|
||||
self.multi(values, self.str)
|
||||
|
||||
def str64(self, value):
|
||||
validator.validate_str(value, self.param_name, 64)
|
||||
|
||||
def str255(self, value):
|
||||
validator.validate_str(value, self.param_name, 255)
|
||||
|
||||
def str255s(self, values):
|
||||
self.multi(values, self.str255)
|
||||
|
||||
def ip(self, ip):
|
||||
validator.validate_ipv4(ip, self.param_name)
|
||||
|
||||
def ips(self, ips):
|
||||
self.multi(ips, self.ip)
|
||||
|
||||
def cidr(self, cidr):
|
||||
validator.validate_cidr(cidr, self.param_name)
|
||||
|
||||
def subnet_cidr(self, cidr):
|
||||
validator.validate_subnet_cidr(cidr)
|
||||
|
||||
def vpc_cidr(self, cidr):
|
||||
validator.validate_vpc_cidr(cidr)
|
||||
|
||||
def filter(self, filter):
|
||||
validator.validate_filter(filter)
|
||||
|
||||
def key_value_dict_list(self, dict_list):
|
||||
validator.validate_key_value_dict_list(dict_list, self.param_name)
|
||||
|
||||
def ec2_id(self, id, prefices=None):
|
||||
validator.validate_ec2_id(id, self.param_name, prefices)
|
||||
|
||||
def ec2_ids(self, ids):
|
||||
self.multi(ids, self.ec2_id)
|
||||
|
||||
def i_id(self, id):
|
||||
self.ec2_id(id, ['i'])
|
||||
|
||||
def i_ids(self, ids):
|
||||
self.multi(ids, self.i_id)
|
||||
|
||||
def ami_id(self, id):
|
||||
self.ec2_id(id, ['ami'])
|
||||
|
||||
def aki_id(self, id):
|
||||
self.ec2_id(id, ['aki'])
|
||||
|
||||
def ari_id(self, id):
|
||||
self.ec2_id(id, ['ari'])
|
||||
|
||||
def amiariaki_id(self, id):
|
||||
self.ec2_id(id, ['ami', 'ari', 'aki'])
|
||||
|
||||
def amiariaki_ids(self, ids):
|
||||
self.multi(ids, self.amiariaki_id)
|
||||
|
||||
def sg_id(self, id):
|
||||
self.ec2_id(id, ['sg'])
|
||||
|
||||
def sg_ids(self, ids):
|
||||
self.multi(ids, self.sg_id)
|
||||
|
||||
def subnet_id(self, id):
|
||||
self.ec2_id(id, ['subnet'])
|
||||
|
||||
def subnet_ids(self, ids):
|
||||
self.multi(ids, self.subnet_id)
|
||||
|
||||
def igw_id(self, id):
|
||||
self.ec2_id(id, ['igw'])
|
||||
|
||||
def igw_ids(self, ids):
|
||||
self.multi(ids, self.igw_id)
|
||||
|
||||
def rtb_id(self, id):
|
||||
self.ec2_id(id, ['rtb'])
|
||||
|
||||
def rtb_ids(self, ids):
|
||||
self.multi(ids, self.rtb_id)
|
||||
|
||||
def eni_id(self, id):
|
||||
self.ec2_id(id, ['eni'])
|
||||
|
||||
def eni_ids(self, ids):
|
||||
self.multi(ids, self.eni_id)
|
||||
|
||||
def vpc_id(self, id):
|
||||
self.ec2_id(id, ['vpc'])
|
||||
|
||||
def vpc_ids(self, ids):
|
||||
self.multi(ids, self.vpc_id)
|
||||
|
||||
def eipalloc_id(self, id):
|
||||
self.ec2_id(id, ['eipalloc'])
|
||||
|
||||
def eipalloc_ids(self, ids):
|
||||
self.multi(ids, self.eipalloc_id)
|
||||
|
||||
def eipassoc_id(self, id):
|
||||
self.ec2_id(id, ['eipassoc'])
|
||||
|
||||
def rtbassoc_id(self, id):
|
||||
self.ec2_id(id, ['rtbassoc'])
|
||||
|
||||
def eni_attach_id(self, id):
|
||||
self.ec2_id(id, ['eni-attach'])
|
||||
|
||||
def snap_id(self, id):
|
||||
self.ec2_id(id, ['snap'])
|
||||
|
||||
def snap_ids(self, ids):
|
||||
self.multi(ids, self.snap_id)
|
||||
|
||||
def vol_id(self, id):
|
||||
self.ec2_id(id, ['vol'])
|
||||
|
||||
def vol_ids(self, ids):
|
||||
self.multi(ids, self.vol_id)
|
||||
|
||||
def dopt_id(self, id):
|
||||
self.ec2_id(id, ['dopt'])
|
||||
|
||||
def dopt_ids(self, ids):
|
||||
self.multi(ids, self.dopt_id)
|
||||
|
||||
def vgw_id(self, id):
|
||||
self.ec2_id(id, ['vgw'])
|
||||
|
||||
def vgw_ids(self, ids):
|
||||
self.multi(ids, self.vgw_id)
|
||||
|
||||
def cgw_id(self, id):
|
||||
self.ec2_id(id, ['cgw'])
|
||||
|
||||
def cgw_ids(self, ids):
|
||||
self.multi(ids, self.cgw_id)
|
||||
|
||||
def vpn_id(self, id):
|
||||
self.ec2_id(id, ['vpn'])
|
||||
|
||||
def vpn_ids(self, ids):
|
||||
self.multi(ids, self.vpn_id)
|
||||
|
||||
def security_group_str(self, value):
|
||||
validator.validate_security_group_str(value, self.param_name,
|
||||
self.params.get('vpc_id'))
|
||||
|
||||
def security_group_strs(self, values):
|
||||
self.multi(values, self.security_group_str)
|
||||
|
||||
def vpn_connection_type(self, value):
|
||||
validator.validate_vpn_connection_type(value)
|
||||
|
||||
|
||||
VPC_KINDS = ['vpc', 'igw', 'subnet', 'eni', 'dopt', 'eipalloc', 'rtb',
|
||||
'vgw', 'cgw', 'vpn']
|
||||
|
||||
|
||||
class UniversalDescriber(object):
|
||||
"""Abstract Describer class for various Describe implementations."""
|
||||
|
||||
KIND = ''
|
||||
SORT_KEY = ''
|
||||
FILTER_MAP = {}
|
||||
|
||||
def format(self, item=None, os_item=None):
|
||||
pass
|
||||
|
||||
def post_format(self, formatted_item, item):
|
||||
pass
|
||||
|
||||
def get_db_items(self):
|
||||
return ec2utils.get_db_items(self.context, self.KIND, self.ids)
|
||||
|
||||
def get_os_items(self):
|
||||
return []
|
||||
|
||||
def auto_update_db(self, item, os_item):
|
||||
if item is None and self.KIND not in VPC_KINDS:
|
||||
item = ec2utils.auto_create_db_item(self.context, self.KIND,
|
||||
self.get_id(os_item))
|
||||
LOG.info(
|
||||
'Item %(item)s was updated to %(os_item)s.',
|
||||
{'item': str(item), 'os_item': str(os_item)})
|
||||
return item
|
||||
|
||||
def get_id(self, os_item):
|
||||
return os_item['id'] if isinstance(os_item, dict) else os_item.id
|
||||
|
||||
def get_name(self, os_item):
|
||||
return os_item['name']
|
||||
|
||||
def delete_obsolete_item(self, item):
|
||||
LOG.info('Deleting obsolete item %(item)s', {'item': str(item)})
|
||||
db_api.delete_item(self.context, item['id'])
|
||||
|
||||
def is_filtering_value_found(self, filter_value, value):
|
||||
if fnmatch.fnmatch(str(value), str(filter_value)):
|
||||
return True
|
||||
|
||||
def filtered_out(self, item, filters):
|
||||
if filters is None:
|
||||
return False
|
||||
for filter in filters:
|
||||
filter_name = self.FILTER_MAP.get(filter['name'])
|
||||
if filter_name is None:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=filter['name'], parameter='filter',
|
||||
reason='invalid filter')
|
||||
values = self.get_values_by_filter(filter_name, item)
|
||||
if not values:
|
||||
return True
|
||||
filter_values = filter['value']
|
||||
for filter_value in filter_values:
|
||||
if any(self.is_filtering_value_found(filter_value, value)
|
||||
for value in values):
|
||||
break
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_values_by_filter(self, filter_name, item):
|
||||
if isinstance(filter_name, list):
|
||||
values = []
|
||||
value_set = item.get(filter_name[0], [])
|
||||
for value in value_set:
|
||||
vals = self.get_values_by_filter(filter_name[1], value)
|
||||
if vals:
|
||||
values += vals
|
||||
else:
|
||||
if isinstance(filter_name, tuple):
|
||||
value = item.get(filter_name[0], {}).get(filter_name[1])
|
||||
else:
|
||||
value = item.get(filter_name)
|
||||
values = [value] if value is not None else []
|
||||
return values
|
||||
|
||||
def get_paged(self, formatted_items, max_results, next_token):
|
||||
self.next_token = None
|
||||
if not max_results and not next_token:
|
||||
return formatted_items
|
||||
|
||||
if max_results and max_results > 1000:
|
||||
max_results = 1000
|
||||
formatted_items = sorted(formatted_items,
|
||||
key=operator.itemgetter(self.SORT_KEY))
|
||||
|
||||
next_item = 0
|
||||
if next_token:
|
||||
next_item = int(base64.b64decode(next_token).decode())
|
||||
if next_item:
|
||||
formatted_items = formatted_items[next_item:]
|
||||
if max_results and max_results < len(formatted_items):
|
||||
self.next_token = base64.b64encode(
|
||||
str(next_item + max_results).encode())
|
||||
formatted_items = formatted_items[:max_results]
|
||||
|
||||
return formatted_items
|
||||
|
||||
def is_selected_item(self, context, os_item_name, item):
|
||||
return (os_item_name in self.names or
|
||||
(item and item['id'] in self.ids))
|
||||
|
||||
def handle_unpaired_item(self, item):
|
||||
self.delete_obsolete_item(item)
|
||||
|
||||
def describe(self, context, ids=None, names=None, filter=None,
|
||||
max_results=None, next_token=None):
|
||||
if max_results and max_results < 5:
|
||||
msg = (_('Value ( %s ) for parameter maxResults is invalid. '
|
||||
'Expecting a value greater than 5.') % max_results)
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
self.context = context
|
||||
self.selective_describe = ids is not None or names is not None
|
||||
self.ids = set(ids or [])
|
||||
self.names = set(names or [])
|
||||
self.items = self.get_db_items()
|
||||
self.os_items = self.get_os_items()
|
||||
formatted_items = []
|
||||
|
||||
self.items_dict = {i['os_id']: i for i in (self.items or [])}
|
||||
paired_items_ids = set()
|
||||
for os_item in self.os_items:
|
||||
os_item_name = self.get_name(os_item)
|
||||
os_item_id = self.get_id(os_item)
|
||||
item = self.items_dict.get(os_item_id, None)
|
||||
if item:
|
||||
paired_items_ids.add(item['id'])
|
||||
# NOTE(Alex): Filter out items not requested in names or ids
|
||||
if (self.selective_describe and
|
||||
not self.is_selected_item(context, os_item_name, item)):
|
||||
continue
|
||||
# NOTE(Alex): Autoupdate DB for autoupdatable items
|
||||
item = self.auto_update_db(item, os_item)
|
||||
# NOTE(andrey-mp): save item id again
|
||||
# (if item has created by auto update)
|
||||
if item:
|
||||
paired_items_ids.add(item['id'])
|
||||
formatted_item = self.format(item, os_item)
|
||||
self.post_format(formatted_item, item)
|
||||
if os_item_name in self.names:
|
||||
self.names.remove(os_item_name)
|
||||
if item and item['id'] in self.ids:
|
||||
self.ids.remove(item['id'])
|
||||
if (formatted_item and
|
||||
not self.filtered_out(formatted_item, filter)):
|
||||
formatted_items.append(formatted_item)
|
||||
# NOTE(Alex): delete obsolete items
|
||||
for item in self.items:
|
||||
if item['id'] in paired_items_ids:
|
||||
continue
|
||||
formatted_item = self.handle_unpaired_item(item)
|
||||
if formatted_item:
|
||||
if not self.filtered_out(formatted_item, filter):
|
||||
formatted_items.append(formatted_item)
|
||||
if item['id'] in self.ids:
|
||||
self.ids.remove(item['id'])
|
||||
# NOTE(Alex): some requested items are not found
|
||||
if self.ids or self.names:
|
||||
params = {'id': next(iter(self.ids or self.names))}
|
||||
raise ec2utils.NOT_FOUND_EXCEPTION_MAP[self.KIND](**params)
|
||||
|
||||
return self.get_paged(formatted_items, max_results, next_token)
|
||||
|
||||
|
||||
class TaggableItemsDescriber(UniversalDescriber):
|
||||
|
||||
tags = None
|
||||
|
||||
def __init__(self):
|
||||
super(TaggableItemsDescriber, self).__init__()
|
||||
self.FILTER_MAP['tag-key'] = ['tagSet', 'key']
|
||||
self.FILTER_MAP['tag-value'] = ['tagSet', 'value']
|
||||
self.FILTER_MAP['tag'] = 'tagSet'
|
||||
|
||||
def get_tags(self):
|
||||
return db_api.get_tags(self.context, (self.KIND,), self.ids)
|
||||
|
||||
def post_format(self, formatted_item, item):
|
||||
if not item or not formatted_item:
|
||||
return
|
||||
|
||||
if self.tags is None:
|
||||
tags = collections.defaultdict(list)
|
||||
for tag in self.get_tags():
|
||||
tags[tag['item_id']].append(tag)
|
||||
self.tags = tags
|
||||
|
||||
formatted_tags = []
|
||||
for tag in self.tags[item['id']]:
|
||||
formatted_tags.append({'key': tag['key'],
|
||||
'value': tag['value']})
|
||||
if formatted_tags:
|
||||
# NOTE(ft): AWS returns tagSet element for all objects (there are
|
||||
# errors in AWS docs)
|
||||
formatted_item['tagSet'] = formatted_tags
|
||||
|
||||
def describe(self, context, ids=None, names=None, filter=None,
|
||||
max_results=None, next_token=None):
|
||||
if filter:
|
||||
for f in filter:
|
||||
if f['name'].startswith('tag:'):
|
||||
tag_key = f['name'].split(':')[1]
|
||||
tag_values = f['value']
|
||||
f['name'] = 'tag'
|
||||
f['value'] = [{'key': tag_key,
|
||||
'value': tag_values}]
|
||||
return super(TaggableItemsDescriber, self).describe(
|
||||
context, ids=ids, names=names, filter=filter,
|
||||
max_results=max_results, next_token=next_token)
|
||||
|
||||
def is_filtering_value_found(self, filter_value, value):
|
||||
if isinstance(filter_value, dict):
|
||||
for tag_pair in value:
|
||||
if (not isinstance(tag_pair, dict) or
|
||||
filter_value.get('key') != tag_pair.get('key')):
|
||||
continue
|
||||
for filter_dict_value in filter_value.get('value'):
|
||||
if super(TaggableItemsDescriber,
|
||||
self).is_filtering_value_found(
|
||||
filter_dict_value,
|
||||
tag_pair.get('value')):
|
||||
return True
|
||||
return False
|
||||
return super(TaggableItemsDescriber,
|
||||
self).is_filtering_value_found(filter_value, value)
|
||||
|
||||
|
||||
class NonOpenstackItemsDescriber(UniversalDescriber):
|
||||
"""Describer class for non-Openstack items Describe implementations."""
|
||||
|
||||
def describe(self, context, ids=None, names=None, filter=None,
|
||||
max_results=None, next_token=None):
|
||||
if max_results and max_results < 5:
|
||||
msg = (_('Value ( %s ) for parameter maxResults is invalid. '
|
||||
'Expecting a value greater than 5.') % max_results)
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
self.context = context
|
||||
self.ids = ids
|
||||
self.items = self.get_db_items()
|
||||
formatted_items = []
|
||||
|
||||
for item in self.items:
|
||||
formatted_item = self.format(item)
|
||||
self.post_format(formatted_item, item)
|
||||
if (formatted_item and
|
||||
not self.filtered_out(formatted_item, filter)):
|
||||
formatted_items.append(formatted_item)
|
||||
|
||||
return self.get_paged(formatted_items, max_results, next_token)
|
@ -1,88 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
"""Customer gateways related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
DEFAULT_BGP_ASN = 65000
|
||||
|
||||
|
||||
def create_customer_gateway(context, type, bgp_asn=None,
|
||||
ip_address=None, public_ip=None):
|
||||
if ip_address:
|
||||
ip_addr = ip_address
|
||||
elif (ip_address == None) and public_ip:
|
||||
ip_addr = public_ip
|
||||
elif (ip_address == None) and (public_ip == None):
|
||||
raise exception.Unsupported("GW without ip not supported")
|
||||
if bgp_asn and bgp_asn != DEFAULT_BGP_ASN:
|
||||
raise exception.Unsupported("BGP dynamic routing is unsupported")
|
||||
# testing output to get ec2 failures
|
||||
customer_gateway = next((cgw for cgw in db_api.get_items(context, 'cgw')
|
||||
if cgw['ip_address'] == ip_addr), None)
|
||||
if not customer_gateway:
|
||||
customer_gateway = db_api.add_item(context, 'cgw',
|
||||
{'ip_address': ip_addr})
|
||||
return {'customerGateway': _format_customer_gateway(customer_gateway)}
|
||||
|
||||
|
||||
def delete_customer_gateway(context, customer_gateway_id):
|
||||
customer_gateway = ec2utils.get_db_item(context, customer_gateway_id)
|
||||
vpn_connections = db_api.get_items(context, 'vpn')
|
||||
if any(vpn['customer_gateway_id'] == customer_gateway['id']
|
||||
for vpn in vpn_connections):
|
||||
raise exception.IncorrectState(
|
||||
reason=_('The customer gateway is in use.'))
|
||||
db_api.delete_item(context, customer_gateway['id'])
|
||||
return True
|
||||
|
||||
|
||||
def describe_customer_gateways(context, customer_gateway_id=None,
|
||||
filter=None):
|
||||
formatted_cgws = CustomerGatewayDescriber().describe(
|
||||
context, ids=customer_gateway_id, filter=filter)
|
||||
return {'customerGatewaySet': formatted_cgws}
|
||||
|
||||
|
||||
class CustomerGatewayDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'cgw'
|
||||
FILTER_MAP = {'bgp-asn': 'bgpAsn',
|
||||
'customer-gateway-id': 'customerGatewayId',
|
||||
'ip-address': 'ipAddress',
|
||||
'state': 'state',
|
||||
'type': 'type'}
|
||||
|
||||
def format(self, customer_gateway):
|
||||
return _format_customer_gateway(customer_gateway)
|
||||
|
||||
|
||||
def _format_customer_gateway(customer_gateway):
|
||||
return {'customerGatewayId': customer_gateway['id'],
|
||||
'ipAddress': customer_gateway['ip_address'],
|
||||
'state': 'available',
|
||||
'type': 'ipsec.1',
|
||||
'bgpAsn': DEFAULT_BGP_ASN}
|
@ -1,186 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import netaddr
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ec2_opts = [
|
||||
cfg.IntOpt('network_device_mtu',
|
||||
default=1500,
|
||||
help='MTU size to set by DHCP for instances. Corresponds '
|
||||
'with the network_device_mtu in ec2api.conf.')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
|
||||
|
||||
"""DHCP options related API implementation
|
||||
"""
|
||||
|
||||
|
||||
class Validator(common.Validator):
|
||||
|
||||
def dopt_id_or_default(self, id):
|
||||
if id == 'default':
|
||||
return
|
||||
self.ec2_id(id, ['dopt'])
|
||||
|
||||
|
||||
DHCP_OPTIONS_MAP = {'domain-name-servers': 'dns-server',
|
||||
'domain-name': 'domain-name',
|
||||
'ntp-servers': 'ntp-server',
|
||||
'netbios-name-servers': 'netbios-ns',
|
||||
'netbios-node-type': 'netbios-nodetype'}
|
||||
|
||||
|
||||
def create_dhcp_options(context, dhcp_configuration):
|
||||
dhcp_options = {}
|
||||
for dhcp_option in dhcp_configuration:
|
||||
key = dhcp_option['key']
|
||||
values = dhcp_option['value']
|
||||
if key not in DHCP_OPTIONS_MAP:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=values,
|
||||
parameter=key,
|
||||
reason='Unrecognized key is specified')
|
||||
if not type(values) is list:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=values,
|
||||
parameter=key,
|
||||
reason='List of values is expected')
|
||||
if key not in ['domain-name', 'netbios-node-type']:
|
||||
ips = []
|
||||
for ip in values:
|
||||
ip_address = netaddr.IPAddress(ip)
|
||||
if not ip_address:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=ip,
|
||||
parameter=key,
|
||||
reason='Invalid list of IPs is specified')
|
||||
ips.append(ip)
|
||||
dhcp_options[key] = ips
|
||||
else:
|
||||
dhcp_options[key] = values
|
||||
dhcp_options = db_api.add_item(context, 'dopt',
|
||||
{'dhcp_configuration': dhcp_options})
|
||||
return {'dhcpOptions': _format_dhcp_options(context, dhcp_options)}
|
||||
|
||||
|
||||
def delete_dhcp_options(context, dhcp_options_id):
|
||||
if not dhcp_options_id:
|
||||
raise exception.MissingParameter(
|
||||
_('DHCP options ID must be specified'))
|
||||
dhcp_options = ec2utils.get_db_item(context, dhcp_options_id)
|
||||
vpcs = db_api.get_items(context, 'vpc')
|
||||
for vpc in vpcs:
|
||||
if dhcp_options['id'] == vpc.get('dhcp_options_id'):
|
||||
raise exception.DependencyViolation(
|
||||
obj1_id=dhcp_options['id'],
|
||||
obj2_id=vpc['id'])
|
||||
db_api.delete_item(context, dhcp_options['id'])
|
||||
return True
|
||||
|
||||
|
||||
class DhcpOptionsDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'dopt'
|
||||
FILTER_MAP = {'dhcp_options_id': 'dhcpOptionsId',
|
||||
'key': ['dhcpConfigurationSet', 'key'],
|
||||
'value': ['dhcpConfigurationSet', ['valueSet', 'value']]}
|
||||
|
||||
def format(self, dhcp_options):
|
||||
return _format_dhcp_options(self.context, dhcp_options)
|
||||
|
||||
|
||||
def describe_dhcp_options(context, dhcp_options_id=None,
|
||||
filter=None):
|
||||
formatted_dhcp_options = DhcpOptionsDescriber().describe(
|
||||
context, ids=dhcp_options_id, filter=filter)
|
||||
return {'dhcpOptionsSet': formatted_dhcp_options}
|
||||
|
||||
|
||||
def associate_dhcp_options(context, dhcp_options_id, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
rollback_dhcp_options_id = vpc.get('dhcp_options_id')
|
||||
if dhcp_options_id == 'default':
|
||||
dhcp_options_id = None
|
||||
dhcp_options = None
|
||||
else:
|
||||
dhcp_options = ec2utils.get_db_item(context, dhcp_options_id)
|
||||
dhcp_options_id = dhcp_options['id']
|
||||
neutron = clients.neutron(context)
|
||||
os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
|
||||
network_interfaces = db_api.get_items(context, 'eni')
|
||||
rollback_dhcp_options_object = (
|
||||
db_api.get_item_by_id(context, rollback_dhcp_options_id)
|
||||
if dhcp_options_id is not None else
|
||||
None)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_associate_vpc_item(context, vpc, dhcp_options_id)
|
||||
cleaner.addCleanup(_associate_vpc_item, context, vpc,
|
||||
rollback_dhcp_options_id)
|
||||
for network_interface in network_interfaces:
|
||||
os_port = next((p for p in os_ports
|
||||
if p['id'] == network_interface['os_id']), None)
|
||||
if not os_port:
|
||||
continue
|
||||
_add_dhcp_opts_to_port(context, dhcp_options,
|
||||
network_interface, os_port, neutron)
|
||||
cleaner.addCleanup(_add_dhcp_opts_to_port, context,
|
||||
rollback_dhcp_options_object, network_interface,
|
||||
os_port, neutron)
|
||||
return True
|
||||
|
||||
|
||||
def _add_dhcp_opts_to_port(context, dhcp_options, network_interface, os_port,
|
||||
neutron=None):
|
||||
dhcp_opts = [{'opt_name': 'mtu',
|
||||
'opt_value': str(CONF.network_device_mtu)}]
|
||||
if dhcp_options is not None:
|
||||
for key, values in dhcp_options['dhcp_configuration'].items():
|
||||
strvalues = [str(v) for v in values]
|
||||
dhcp_opts.append({'opt_name': DHCP_OPTIONS_MAP[key],
|
||||
'opt_value': ','.join(strvalues)})
|
||||
if not neutron:
|
||||
neutron = clients.neutron(context)
|
||||
neutron.update_port(os_port['id'],
|
||||
{'port': {'extra_dhcp_opts': dhcp_opts}})
|
||||
|
||||
|
||||
def _format_dhcp_options(context, dhcp_options):
|
||||
dhcp_configuration = []
|
||||
for key, values in dhcp_options['dhcp_configuration'].items():
|
||||
items = [{'value': v} for v in values]
|
||||
dhcp_configuration.append({'key': key, 'valueSet': items})
|
||||
return {'dhcpOptionsId': dhcp_options['id'],
|
||||
'dhcpConfigurationSet': dhcp_configuration}
|
||||
|
||||
|
||||
def _associate_vpc_item(context, vpc, dhcp_options_id):
|
||||
vpc['dhcp_options_id'] = dhcp_options_id
|
||||
db_api.update_item(context, vpc)
|
@ -1,560 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from glanceclient.common import exceptions as glance_exception
|
||||
from lxml import etree
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ec2_opts = [
|
||||
cfg.StrOpt('external_network',
|
||||
default=None,
|
||||
help='Name of the external network, which is used to connect'
|
||||
'VPCs to Internet and to allocate Elastic IPs.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
|
||||
LEGACY_BDM_FIELDS = set(['device_name', 'delete_on_termination', 'snapshot_id',
|
||||
'volume_id', 'volume_size', 'no_device'])
|
||||
|
||||
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
|
||||
|
||||
|
||||
def camelcase_to_underscore(str):
|
||||
return _c2u.sub(r'_\1', str).lower().strip('_')
|
||||
|
||||
|
||||
def _try_convert(value):
|
||||
"""Return a non-string from a string or unicode, if possible.
|
||||
|
||||
============= =====================================================
|
||||
When value is returns
|
||||
============= =====================================================
|
||||
zero-length ''
|
||||
'None' None
|
||||
'True' True case insensitive
|
||||
'False' False case insensitive
|
||||
'0', '-0' 0
|
||||
0xN, -0xN int from hex (positive) (N is any number)
|
||||
0bN, -0bN int from binary (positive) (N is any number)
|
||||
* try conversion to int, float, complex, fallback value
|
||||
|
||||
"""
|
||||
def _negative_zero(value):
|
||||
epsilon = 1e-7
|
||||
return 0 if abs(value) < epsilon else value
|
||||
|
||||
if len(value) == 0:
|
||||
return ''
|
||||
if value == 'None':
|
||||
return None
|
||||
lowered_value = value.lower()
|
||||
if lowered_value == 'true':
|
||||
return True
|
||||
if lowered_value == 'false':
|
||||
return False
|
||||
for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]:
|
||||
try:
|
||||
if lowered_value.startswith((prefix, "-" + prefix)):
|
||||
return int(lowered_value, base)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return _negative_zero(float(value))
|
||||
except ValueError:
|
||||
return value
|
||||
|
||||
|
||||
def dict_from_dotted_str(items):
|
||||
"""parse multi dot-separated argument into dict.
|
||||
|
||||
EBS boot uses multi dot-separated arguments like
|
||||
BlockDeviceMapping.1.DeviceName=snap-id
|
||||
Convert the above into
|
||||
{'block_device_mapping': {'1': {'device_name': snap-id}}}
|
||||
"""
|
||||
args = {}
|
||||
for key, value in items:
|
||||
parts = key.split(".")
|
||||
key = str(camelcase_to_underscore(parts[0]))
|
||||
if isinstance(value, str):
|
||||
# NOTE(vish): Automatically convert strings back
|
||||
# into their respective values
|
||||
value = _try_convert(value)
|
||||
|
||||
if len(parts) > 1:
|
||||
d = args.get(key, {})
|
||||
args[key] = d
|
||||
for k in parts[1:-1]:
|
||||
k = camelcase_to_underscore(k)
|
||||
v = d.get(k, {})
|
||||
d[k] = v
|
||||
d = v
|
||||
d[camelcase_to_underscore(parts[-1])] = value
|
||||
else:
|
||||
args[key] = value
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def _render_dict(el, data):
|
||||
try:
|
||||
for key, val in data.items():
|
||||
sub_el = etree.SubElement(el, key)
|
||||
_render_data(sub_el, val)
|
||||
except Exception:
|
||||
LOG.debug(data)
|
||||
raise
|
||||
|
||||
|
||||
def _render_data(el, data):
|
||||
if isinstance(data, list):
|
||||
for item in data:
|
||||
sub_el = etree.SubElement(el, 'item')
|
||||
_render_data(sub_el, item)
|
||||
elif isinstance(data, dict):
|
||||
_render_dict(el, data)
|
||||
elif hasattr(data, '__dict__'):
|
||||
_render_dict(el, data.__dict__)
|
||||
elif isinstance(data, bool):
|
||||
el.text = str(data).lower()
|
||||
elif isinstance(data, datetime.datetime):
|
||||
el.text = _database_to_isoformat(data)
|
||||
elif isinstance(data, bytes):
|
||||
el.text = data.decode("utf-8")
|
||||
elif data is not None:
|
||||
el.text = str(data)
|
||||
|
||||
|
||||
def _database_to_isoformat(datetimeobj):
|
||||
"""Return a xs:dateTime parsable string from datatime."""
|
||||
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
|
||||
|
||||
|
||||
def dict_to_xml(data_dict, root_tag):
|
||||
root = etree.Element(root_tag)
|
||||
_render_dict(root, data_dict)
|
||||
return root
|
||||
|
||||
|
||||
_ms_time_regex = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$')
|
||||
|
||||
|
||||
def is_ec2_timestamp_expired(request, expires=None):
|
||||
"""Checks the timestamp or expiry time included in an EC2 request
|
||||
|
||||
and returns true if the request is expired
|
||||
"""
|
||||
query_time = None
|
||||
timestamp = request.get('Timestamp')
|
||||
expiry_time = request.get('Expires')
|
||||
|
||||
def parse_strtime(strtime):
|
||||
if _ms_time_regex.match(strtime):
|
||||
# NOTE(MotoKen): time format for aws-sdk-java contains millisecond
|
||||
time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
|
||||
else:
|
||||
time_format = "%Y-%m-%dT%H:%M:%SZ"
|
||||
return timeutils.parse_strtime(strtime, time_format)
|
||||
|
||||
try:
|
||||
if timestamp and expiry_time:
|
||||
msg = _("Request must include either Timestamp or Expires,"
|
||||
" but cannot contain both")
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidRequest(msg)
|
||||
elif expiry_time:
|
||||
query_time = parse_strtime(expiry_time)
|
||||
return timeutils.is_older_than(query_time, -1)
|
||||
elif timestamp:
|
||||
query_time = parse_strtime(timestamp)
|
||||
|
||||
# Check if the difference between the timestamp in the request
|
||||
# and the time on our servers is larger than 5 minutes, the
|
||||
# request is too old (or too new).
|
||||
if query_time and expires:
|
||||
return (timeutils.is_older_than(query_time, expires) or
|
||||
timeutils.is_newer_than(query_time, expires))
|
||||
return False
|
||||
except ValueError:
|
||||
LOG.exception("Timestamp is invalid: ")
|
||||
return True
|
||||
|
||||
|
||||
# NOTE(ft): extra functions to use in vpc specific code or instead of
|
||||
# malformed existed functions
|
||||
|
||||
|
||||
def get_ec2_id_kind(obj_id):
|
||||
return obj_id.split('-')[0]
|
||||
|
||||
|
||||
def change_ec2_id_kind(obj_id, new_kind):
|
||||
return '%(kind)s-%(id)s' % {'kind': new_kind,
|
||||
'id': obj_id.split('-')[-1]}
|
||||
|
||||
|
||||
NOT_FOUND_EXCEPTION_MAP = {
|
||||
'vpc': exception.InvalidVpcIDNotFound,
|
||||
'igw': exception.InvalidInternetGatewayIDNotFound,
|
||||
'subnet': exception.InvalidSubnetIDNotFound,
|
||||
'eni': exception.InvalidNetworkInterfaceIDNotFound,
|
||||
'dopt': exception.InvalidDhcpOptionsIDNotFound,
|
||||
'eipalloc': exception.InvalidAllocationIDNotFound,
|
||||
'sg': exception.InvalidGroupNotFound,
|
||||
'rtb': exception.InvalidRouteTableIDNotFound,
|
||||
'i': exception.InvalidInstanceIDNotFound,
|
||||
'kp': exception.InvalidKeypairNotFound,
|
||||
'az': exception.InvalidAvailabilityZoneNotFound,
|
||||
'vol': exception.InvalidVolumeNotFound,
|
||||
'snap': exception.InvalidSnapshotNotFound,
|
||||
'ami': exception.InvalidAMIIDNotFound,
|
||||
'aki': exception.InvalidAMIIDNotFound,
|
||||
'ari': exception.InvalidAMIIDNotFound,
|
||||
'vgw': exception.InvalidVpnGatewayIDNotFound,
|
||||
'cgw': exception.InvalidCustomerGatewayIDNotFound,
|
||||
'vpn': exception.InvalidVpnConnectionIDNotFound,
|
||||
}
|
||||
|
||||
|
||||
def get_db_item(context, ec2_id, expected_kind=None):
|
||||
"""Get an DB item, raise AWS compliant exception if it's not found.
|
||||
|
||||
Args:
|
||||
context (RequestContext): The request context.
|
||||
ec2_id (str): The ID of the requested item.
|
||||
expected_kind (str): The expected kind of the requested item.
|
||||
It should be specified for a kind of ec2_id to be validated,
|
||||
if you need it.
|
||||
|
||||
Returns:
|
||||
The DB item.
|
||||
"""
|
||||
item = db_api.get_item_by_id(context, ec2_id)
|
||||
if (item is None or
|
||||
expected_kind and get_ec2_id_kind(ec2_id) != expected_kind):
|
||||
kind = expected_kind or get_ec2_id_kind(ec2_id)
|
||||
params = {'id': ec2_id}
|
||||
raise NOT_FOUND_EXCEPTION_MAP[kind](**params)
|
||||
return item
|
||||
|
||||
|
||||
def get_db_items(context, kind, ec2_ids):
|
||||
if not ec2_ids:
|
||||
return db_api.get_items(context, kind)
|
||||
|
||||
if not isinstance(ec2_ids, set):
|
||||
ec2_ids = set(ec2_ids)
|
||||
items = db_api.get_items_by_ids(context, ec2_ids)
|
||||
if len(items) < len(ec2_ids):
|
||||
missed_ids = ec2_ids - set((item['id'] for item in items))
|
||||
params = {'id': next(iter(missed_ids))}
|
||||
raise NOT_FOUND_EXCEPTION_MAP[kind](**params)
|
||||
return items
|
||||
|
||||
|
||||
_auto_create_db_item_extensions = {}
|
||||
|
||||
|
||||
def register_auto_create_db_item_extension(kind, extension):
|
||||
_auto_create_db_item_extensions[kind] = extension
|
||||
|
||||
|
||||
def auto_create_db_item(context, kind, os_id, **extension_kwargs):
|
||||
item = {'os_id': os_id}
|
||||
extension = _auto_create_db_item_extensions.get(kind)
|
||||
if extension:
|
||||
extension(context, item, **extension_kwargs)
|
||||
return db_api.add_item(context, kind, item)
|
||||
|
||||
|
||||
def get_db_item_by_os_id(context, kind, os_id, items_by_os_id=None,
|
||||
**extension_kwargs):
|
||||
"""Get DB item by OS id (create if it doesn't exist).
|
||||
|
||||
Args:
|
||||
context (RequestContext): The request context.
|
||||
kind (str): The kind of item.
|
||||
os_id (str): OS id of an object.
|
||||
items_by_os_id (dict of items): The dict of known DB items,
|
||||
OS id is used as a key.
|
||||
extension_kwargs (dict): Additional parameters passed to
|
||||
a registered extension at creating item.
|
||||
|
||||
Returns:
|
||||
A found or created item.
|
||||
|
||||
Search item in passed dict. If it's not found - create a new item, and
|
||||
add it to the dict (if it's passed).
|
||||
If an extension is registered on corresponding item kind, call it
|
||||
passing extension_kwargs to it.
|
||||
"""
|
||||
if os_id is None:
|
||||
return None
|
||||
if items_by_os_id is not None:
|
||||
item = items_by_os_id.get(os_id)
|
||||
if item:
|
||||
return item
|
||||
else:
|
||||
item = next((i for i in db_api.get_items(context, kind)
|
||||
if i['os_id'] == os_id), None)
|
||||
if not item:
|
||||
item = auto_create_db_item(context, kind, os_id, **extension_kwargs)
|
||||
if items_by_os_id is not None:
|
||||
items_by_os_id[os_id] = item
|
||||
return item
|
||||
|
||||
|
||||
# TODO(Alex): The project_id passing mechanism can be potentially
|
||||
# reconsidered in future.
|
||||
def os_id_to_ec2_id(context, kind, os_id, items_by_os_id=None,
|
||||
ids_by_os_id=None, project_id=None):
|
||||
if os_id is None:
|
||||
return None
|
||||
if ids_by_os_id is not None:
|
||||
item_id = ids_by_os_id.get(os_id)
|
||||
if item_id:
|
||||
return item_id
|
||||
if items_by_os_id is not None:
|
||||
item = items_by_os_id.get(os_id)
|
||||
if item:
|
||||
return item['id']
|
||||
ids = db_api.get_items_ids(context, kind, item_os_ids=(os_id,))
|
||||
if len(ids):
|
||||
item_id, _os_id = ids[0]
|
||||
else:
|
||||
item_id = db_api.add_item_id(context, kind, os_id,
|
||||
project_id=project_id)
|
||||
if ids_by_os_id is not None:
|
||||
ids_by_os_id[os_id] = item_id
|
||||
return item_id
|
||||
|
||||
|
||||
def get_os_image(context, ec2_image_id):
|
||||
kind = get_ec2_id_kind(ec2_image_id)
|
||||
ids = db_api.get_items_ids(context, kind, item_ids=(ec2_image_id,))
|
||||
if not ids:
|
||||
raise exception.InvalidAMIIDNotFound(id=ec2_image_id)
|
||||
_id, os_id = ids[0]
|
||||
if not os_id:
|
||||
return None
|
||||
glance = clients.glance(context)
|
||||
try:
|
||||
return glance.images.get(os_id)
|
||||
except glance_exception.HTTPNotFound:
|
||||
raise exception.InvalidAMIIDNotFound(id=ec2_image_id)
|
||||
|
||||
|
||||
def deserialize_os_image_properties(os_image):
|
||||
def prepare_property(property_name):
|
||||
if property_name in os_image_dict:
|
||||
os_image_dict[property_name] = jsonutils.loads(
|
||||
os_image_dict[property_name])
|
||||
|
||||
os_image_dict = dict(os_image)
|
||||
prepare_property('mappings')
|
||||
prepare_property('block_device_mapping')
|
||||
return os_image_dict
|
||||
|
||||
|
||||
def create_virtual_bdm(device_name, virtual_name):
|
||||
bdm = {'device_name': device_name,
|
||||
'source_type': 'blank',
|
||||
'destination_type': 'local',
|
||||
'device_type': 'disk',
|
||||
'delete_on_termination': True,
|
||||
'boot_index': -1,
|
||||
'virtual_name': virtual_name}
|
||||
if virtual_name == 'swap':
|
||||
bdm['guest_format'] = 'swap'
|
||||
return bdm
|
||||
|
||||
|
||||
def get_os_image_mappings(os_image_properties):
|
||||
mappings = []
|
||||
names = set()
|
||||
# TODO(ft): validate device names for both virtual and block device
|
||||
# mappings
|
||||
|
||||
def is_virtual(virtual_name):
|
||||
return virtual_name == 'swap' or (virtual_name and
|
||||
_ephemeral.match(virtual_name))
|
||||
|
||||
# NOTE(ft): substitute mapping if the same device name is specified
|
||||
def add_mapping(mapping):
|
||||
device_name = block_device_strip_dev(mapping.get('device_name'))
|
||||
if device_name in names:
|
||||
for i, m in enumerate(mappings):
|
||||
if (device_name ==
|
||||
block_device_strip_dev(m.get('device_name'))):
|
||||
mappings[i] = mapping
|
||||
break
|
||||
else:
|
||||
if device_name:
|
||||
names.add(device_name)
|
||||
mappings.append(mapping)
|
||||
|
||||
# TODO(ft): From Juno virtual device mapping has precedence of block one
|
||||
# in boot logic. This function should do the same, despite Nova EC2
|
||||
# behavior.
|
||||
|
||||
# NOTE(ft): Nova EC2 prepended device names for virtual device mappings.
|
||||
# But AWS doesn't do it.
|
||||
for vdm in os_image_properties.get('mappings', []):
|
||||
if is_virtual(vdm.get('virtual')):
|
||||
add_mapping(create_virtual_bdm(
|
||||
block_device_prepend_dev(vdm.get('device')), vdm['virtual']))
|
||||
|
||||
legacy_mapping = not os_image_properties.get('bdm_v2', False)
|
||||
for bdm in os_image_properties.get('block_device_mapping', []):
|
||||
if legacy_mapping:
|
||||
virtual_name = bdm.get('virtual_name')
|
||||
if is_virtual(virtual_name):
|
||||
new_bdm = create_virtual_bdm(bdm.get('device_name'),
|
||||
virtual_name)
|
||||
else:
|
||||
new_bdm = {key: val for key, val in bdm.items()
|
||||
if key in LEGACY_BDM_FIELDS}
|
||||
if bdm.get('snapshot_id'):
|
||||
new_bdm.update({'source_type': 'snapshot',
|
||||
'destination_type': 'volume'})
|
||||
elif bdm.get('volume_id'):
|
||||
new_bdm.update({'source_type': 'volume',
|
||||
'destination_type': 'volume'})
|
||||
bdm = new_bdm
|
||||
|
||||
bdm.setdefault('delete_on_termination', False)
|
||||
add_mapping(bdm)
|
||||
|
||||
return mappings
|
||||
|
||||
|
||||
def get_os_public_network(context):
|
||||
neutron = clients.neutron(context)
|
||||
search_opts = {'router:external': True, 'name': CONF.external_network}
|
||||
os_networks = neutron.list_networks(**search_opts)['networks']
|
||||
if len(os_networks) != 1:
|
||||
if CONF.external_network:
|
||||
if len(os_networks) == 0:
|
||||
msg = "No external network with name '%s' is found"
|
||||
else:
|
||||
msg = "More than one external network with name '%s' is found"
|
||||
LOG.error(msg, CONF.external_network)
|
||||
else:
|
||||
if len(os_networks) == 0:
|
||||
msg = 'No external network is found'
|
||||
else:
|
||||
msg = 'More than one external network is found'
|
||||
LOG.error(msg)
|
||||
raise exception.Unsupported(_('Feature is restricted by OS admin'))
|
||||
return os_networks[0]
|
||||
|
||||
|
||||
def get_attached_gateway(context, vpc_id, gateway_kind):
|
||||
# TODO(ft): move search by vpc_id to DB api
|
||||
return next((gw for gw in db_api.get_items(context, gateway_kind)
|
||||
if gw['vpc_id'] == vpc_id), None)
|
||||
|
||||
|
||||
_check_and_create_default_vpc = None
|
||||
|
||||
|
||||
def check_and_create_default_vpc(context):
|
||||
return _check_and_create_default_vpc(context)
|
||||
|
||||
|
||||
def set_check_and_create_default_vpc(check_and_create_default_vpc):
|
||||
global _check_and_create_default_vpc
|
||||
_check_and_create_default_vpc = check_and_create_default_vpc
|
||||
|
||||
|
||||
def get_default_vpc(context):
|
||||
default_vpc = check_and_create_default_vpc(context)
|
||||
if not default_vpc:
|
||||
raise exception.VPCIdNotSpecified()
|
||||
return default_vpc
|
||||
|
||||
|
||||
# NOTE(ft): following functions are copied from various parts of Nova
|
||||
|
||||
_ephemeral = re.compile(r'^ephemeral(\d|[1-9]\d+)$')
|
||||
|
||||
_dev = re.compile(r'^/dev/')
|
||||
|
||||
|
||||
def block_device_strip_dev(device_name):
|
||||
"""remove leading '/dev/'."""
|
||||
return _dev.sub('', device_name) if device_name else device_name
|
||||
|
||||
|
||||
def block_device_prepend_dev(device_name):
|
||||
"""Make sure there is a leading '/dev/'."""
|
||||
return device_name and '/dev/' + block_device_strip_dev(device_name)
|
||||
|
||||
|
||||
def block_device_properties_root_device_name(properties):
|
||||
"""get root device name from image meta data.
|
||||
|
||||
If it isn't specified, return None.
|
||||
"""
|
||||
if 'root_device_name' in properties:
|
||||
return properties.get('root_device_name')
|
||||
elif 'mappings' in properties:
|
||||
return next((bdm['device'] for bdm in properties['mappings']
|
||||
if bdm['virtual'] == 'root'), None)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
|
||||
# Python provides a similar instance method for datetime.datetime objects
|
||||
# called isoformat(). The format of the strings generated by isoformat()
|
||||
# have a couple of problems:
|
||||
# 1) The strings generated by isotime are used in tokens and other public
|
||||
# APIs that we can't change without a deprecation period. The strings
|
||||
# generated by isoformat are not the same format, so we can't just
|
||||
# change to it.
|
||||
# 2) The strings generated by isoformat do not include the microseconds if
|
||||
# the value happens to be 0. This will likely show up as random failures
|
||||
# as parsers may be written to always expect microseconds, and it will
|
||||
# parse correctly most of the time.
|
||||
|
||||
if not at:
|
||||
at = timeutils.utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
@ -1,69 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context as common_context
|
||||
from oslo_log import log as logging
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
import ec2api.api
|
||||
from ec2api import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ec2_error_response(request_id, code, message, status=500):
|
||||
"""Helper to construct an EC2 compatible error response."""
|
||||
LOG.debug('EC2 error response: %(code)s: %(message)s',
|
||||
{'code': code, 'message': message})
|
||||
resp = webob.Response()
|
||||
resp.status = status
|
||||
resp.headers['Content-Type'] = 'text/xml'
|
||||
resp.body = (
|
||||
'<?xml version="1.0"?>\n'
|
||||
'<Response><Errors><Error><Code>%s</Code>'
|
||||
'<Message>%s</Message></Error></Errors>'
|
||||
'<RequestID>%s</RequestID></Response>' %
|
||||
(utils.xhtml_escape(code),
|
||||
utils.xhtml_escape(message),
|
||||
utils.xhtml_escape(request_id))).encode()
|
||||
return resp
|
||||
|
||||
|
||||
class Fault(webob.exc.HTTPException):
|
||||
|
||||
"""Captures exception and return REST Response."""
|
||||
|
||||
def __init__(self, exception):
|
||||
"""Create a response for the given webob.exc.exception."""
|
||||
self.wrapped_exc = exception
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Generate a WSGI response based on the exception passed to ctor."""
|
||||
code = ec2api.api.exception_to_ec2code(self.wrapped_exc)
|
||||
status = self.wrapped_exc.status_int
|
||||
message = self.wrapped_exc.explanation
|
||||
|
||||
if status == 501:
|
||||
message = "The requested function is not supported"
|
||||
|
||||
if 'AWSAccessKeyId' not in req.params:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
resp = ec2_error_response(common_context.generate_request_id(), code,
|
||||
message=message, status=status)
|
||||
return resp
|
1100
ec2api/api/image.py
1100
ec2api/api/image.py
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,147 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Cloud Controller: Implementation of EC2 REST API calls, which are
|
||||
dispatched to other nodes via AMQP RPC. State is via distributed
|
||||
datastore.
|
||||
"""
|
||||
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
"""Internet gateway related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_internet_gateway(context):
|
||||
igw = db_api.add_item(context, 'igw', {})
|
||||
return {'internetGateway': _format_internet_gateway(igw)}
|
||||
|
||||
|
||||
def attach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
if igw.get('vpc_id'):
|
||||
msg_params = {'igw_id': igw['id'],
|
||||
'vpc_id': igw['vpc_id']}
|
||||
msg = _('resource %(igw_id)s is already attached to '
|
||||
'network %(vpc_id)s') % msg_params
|
||||
raise exception.ResourceAlreadyAssociated(msg)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
if ec2utils.get_attached_gateway(context, vpc['id'], 'igw'):
|
||||
msg = _('Network %(vpc_id)s already has an internet gateway '
|
||||
'attached') % {'vpc_id': vpc['id']}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
external_network_id = None
|
||||
if not ec2utils.get_attached_gateway(context, vpc['id'], 'vgw'):
|
||||
external_network_id = ec2utils.get_os_public_network(context)['id']
|
||||
neutron = clients.neutron(context)
|
||||
|
||||
# TODO(ft): set attaching state into db
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_attach_internet_gateway_item(context, igw, vpc['id'])
|
||||
cleaner.addCleanup(_detach_internet_gateway_item, context, igw)
|
||||
if external_network_id:
|
||||
neutron.add_gateway_router(vpc['os_id'],
|
||||
{'network_id': external_network_id})
|
||||
return True
|
||||
|
||||
|
||||
def detach_internet_gateway(context, internet_gateway_id, vpc_id):
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
if igw.get('vpc_id') != vpc['id']:
|
||||
raise exception.GatewayNotAttached(gw_id=igw['id'],
|
||||
vpc_id=vpc['id'])
|
||||
|
||||
remove_os_gateway_router = (
|
||||
ec2utils.get_attached_gateway(context, vpc_id, 'vgw') is None)
|
||||
neutron = clients.neutron(context)
|
||||
# TODO(ft): set detaching state into db
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_detach_internet_gateway_item(context, igw)
|
||||
cleaner.addCleanup(_attach_internet_gateway_item,
|
||||
context, igw, vpc['id'])
|
||||
if remove_os_gateway_router:
|
||||
try:
|
||||
neutron.remove_gateway_router(vpc['os_id'])
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def delete_internet_gateway(context, internet_gateway_id):
|
||||
igw = ec2utils.get_db_item(context, internet_gateway_id)
|
||||
if igw.get('vpc_id'):
|
||||
msg = _("The internetGateway '%(igw_id)s' has dependencies and "
|
||||
"cannot be deleted.") % {'igw_id': igw['id']}
|
||||
raise exception.DependencyViolation(msg)
|
||||
db_api.delete_item(context, igw['id'])
|
||||
return True
|
||||
|
||||
|
||||
class InternetGatewayDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'igw'
|
||||
FILTER_MAP = {'internet-gateway-id': 'internetGatewayId',
|
||||
'attachment.state': ['attachmentSet', 'state'],
|
||||
'attachment.vpc-id': ['attachmentSet', 'vpcId']}
|
||||
|
||||
def format(self, igw):
|
||||
return _format_internet_gateway(igw)
|
||||
|
||||
|
||||
def describe_internet_gateways(context, internet_gateway_id=None,
|
||||
filter=None):
|
||||
ec2utils.check_and_create_default_vpc(context)
|
||||
formatted_igws = InternetGatewayDescriber().describe(
|
||||
context, ids=internet_gateway_id, filter=filter)
|
||||
return {'internetGatewaySet': formatted_igws}
|
||||
|
||||
|
||||
def _format_internet_gateway(igw):
|
||||
ec2_igw = {'internetGatewayId': igw['id'],
|
||||
'attachmentSet': []}
|
||||
if igw.get('vpc_id'):
|
||||
# NOTE(ft): AWS actually returns 'available' state rather than
|
||||
# documented 'attached' one
|
||||
attachment_state = 'available'
|
||||
attachment = {'vpcId': igw['vpc_id'],
|
||||
'state': attachment_state}
|
||||
ec2_igw['attachmentSet'].append(attachment)
|
||||
return ec2_igw
|
||||
|
||||
|
||||
def _attach_internet_gateway_item(context, igw, vpc_id):
|
||||
igw['vpc_id'] = vpc_id
|
||||
db_api.update_item(context, igw)
|
||||
|
||||
|
||||
def _detach_internet_gateway_item(context, igw):
|
||||
igw['vpc_id'] = None
|
||||
db_api.update_item(context, igw)
|
@ -1,151 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from cryptography.hazmat import backends
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.hazmat.primitives import serialization as crypt_serialization
|
||||
from novaclient import exceptions as nova_exception
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api import clients
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Keypair-object related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
class KeyPairDescriber(common.UniversalDescriber):
|
||||
|
||||
KIND = 'kp'
|
||||
FILTER_MAP = {'fingerprint': 'keyFingerprint',
|
||||
'key-name': 'keyName'}
|
||||
|
||||
def format(self, _item, key_pair):
|
||||
return _format_key_pair(key_pair)
|
||||
|
||||
def get_db_items(self):
|
||||
return []
|
||||
|
||||
def get_os_items(self):
|
||||
# Original EC2 in nova filters out vpn keys for admin user.
|
||||
# We're not filtering out the vpn keys for now.
|
||||
# In order to implement this we'd have to configure vpn_key_suffix
|
||||
# in our config which we consider an overkill.
|
||||
# suffix = CONF.vpn_key_suffix
|
||||
# if context.is_admin or not key_pair['name'].endswith(suffix):
|
||||
nova = clients.nova(self.context)
|
||||
return nova.keypairs.list()
|
||||
|
||||
def auto_update_db(self, item, os_item):
|
||||
pass
|
||||
|
||||
def get_id(self, os_item):
|
||||
return ''
|
||||
|
||||
def get_name(self, key_pair):
|
||||
return key_pair.name
|
||||
|
||||
|
||||
def describe_key_pairs(context, key_name=None, filter=None):
|
||||
formatted_key_pairs = KeyPairDescriber().describe(context, names=key_name,
|
||||
filter=filter)
|
||||
return {'keySet': formatted_key_pairs}
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
if len(name) > 255:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=name,
|
||||
parameter='KeyName',
|
||||
reason='lenght is exceeds maximum of 255')
|
||||
|
||||
|
||||
# We may wish to make the algorithm configurable. This would require API
|
||||
# changes.
|
||||
def _generate_key_pair():
|
||||
key = rsa.generate_private_key(
|
||||
backend=backends.default_backend(),
|
||||
public_exponent=65537,
|
||||
key_size=2048
|
||||
)
|
||||
private_key = key.private_bytes(
|
||||
crypt_serialization.Encoding.PEM,
|
||||
crypt_serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
crypt_serialization.NoEncryption(),
|
||||
).decode()
|
||||
public_key = key.public_key().public_bytes(
|
||||
crypt_serialization.Encoding.OpenSSH,
|
||||
crypt_serialization.PublicFormat.OpenSSH,
|
||||
).decode()
|
||||
return private_key, public_key
|
||||
|
||||
|
||||
def create_key_pair(context, key_name):
|
||||
_validate_name(key_name)
|
||||
nova = clients.nova(context)
|
||||
private_key, public_key = _generate_key_pair()
|
||||
try:
|
||||
key_pair = nova.keypairs.create(key_name, public_key)
|
||||
except nova_exception.OverLimit:
|
||||
raise exception.ResourceLimitExceeded(resource='keypairs')
|
||||
except nova_exception.Conflict:
|
||||
raise exception.InvalidKeyPairDuplicate(key_name=key_name)
|
||||
formatted_key_pair = _format_key_pair(key_pair)
|
||||
formatted_key_pair['keyMaterial'] = private_key
|
||||
return formatted_key_pair
|
||||
|
||||
|
||||
def import_key_pair(context, key_name, public_key_material):
|
||||
_validate_name(key_name)
|
||||
if not public_key_material:
|
||||
raise exception.MissingParameter(
|
||||
_('The request must contain the parameter PublicKeyMaterial'))
|
||||
nova = clients.nova(context)
|
||||
public_key = base64.b64decode(public_key_material).decode("utf-8")
|
||||
try:
|
||||
key_pair = nova.keypairs.create(key_name, public_key)
|
||||
except nova_exception.OverLimit:
|
||||
raise exception.ResourceLimitExceeded(resource='keypairs')
|
||||
except nova_exception.Conflict:
|
||||
raise exception.InvalidKeyPairDuplicate(key_name=key_name)
|
||||
return _format_key_pair(key_pair)
|
||||
|
||||
|
||||
def delete_key_pair(context, key_name):
|
||||
nova = clients.nova(context)
|
||||
try:
|
||||
nova.keypairs.delete(key_name)
|
||||
except nova_exception.NotFound:
|
||||
# aws returns true even if the key doesn't exist
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def _format_key_pair(key_pair):
|
||||
return {'keyName': key_pair.name,
|
||||
'keyFingerprint': key_pair.fingerprint
|
||||
}
|
@ -1,586 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import collections
|
||||
|
||||
import netaddr
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import address as address_api
|
||||
from ec2api.api import common
|
||||
from ec2api.api import dhcp_options
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import security_group as security_group_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Network interface related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_network_interface(context, subnet_id,
|
||||
private_ip_address=None,
|
||||
private_ip_addresses=None,
|
||||
secondary_private_ip_address_count=None,
|
||||
description=None,
|
||||
security_group_id=None,
|
||||
client_token=None):
|
||||
|
||||
if client_token:
|
||||
result = describe_network_interfaces(context,
|
||||
filter=[{'name': 'client-token',
|
||||
'value': [client_token]}])
|
||||
if result['networkInterfaceSet']:
|
||||
if len(result['networkInterfaceSet']) > 1:
|
||||
LOG.error('describe_network_interfaces returns %s '
|
||||
'network_interfaces, but 1 is expected.',
|
||||
len(result['networkInterfaceSet']))
|
||||
LOG.error('Requested client token: %s', client_token)
|
||||
LOG.error('Result: %s', result)
|
||||
return result['networkInterfaceSet'][0]
|
||||
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
if subnet is None:
|
||||
raise exception.InvalidSubnetIDNotFound(id=subnet_id)
|
||||
neutron = clients.neutron(context)
|
||||
os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
|
||||
# NOTE(Alex): Combine and check ip addresses. Neutron will accept
|
||||
# ip_address as a parameter for specified address and subnet_id for
|
||||
# address to auto-allocate.
|
||||
# TODO(Alex): Implement better diagnostics.
|
||||
subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr'])
|
||||
if not private_ip_addresses:
|
||||
private_ip_addresses = []
|
||||
if private_ip_address is not None:
|
||||
private_ip_addresses.insert(0,
|
||||
{'private_ip_address': private_ip_address,
|
||||
'primary': True})
|
||||
primary_ip = None
|
||||
fixed_ips = []
|
||||
for ip in private_ip_addresses:
|
||||
ip_address = netaddr.IPAddress(ip['private_ip_address'])
|
||||
if ip_address not in subnet_ipnet:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=str(ip_address),
|
||||
parameter='PrivateIpAddresses',
|
||||
reason='IP address is out of the subnet range')
|
||||
if ip.get('primary', False):
|
||||
if primary_ip is not None:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=str(ip_address),
|
||||
parameter='PrivateIpAddresses',
|
||||
reason='More than one primary ip is supplied')
|
||||
else:
|
||||
primary_ip = str(ip_address)
|
||||
fixed_ips.insert(0, {'ip_address': primary_ip})
|
||||
else:
|
||||
fixed_ips.append({'ip_address': str(ip_address)})
|
||||
if not fixed_ips and not secondary_private_ip_address_count:
|
||||
secondary_private_ip_address_count = 1
|
||||
if secondary_private_ip_address_count is None:
|
||||
secondary_private_ip_address_count = 0
|
||||
if secondary_private_ip_address_count > 0:
|
||||
for _i in range(secondary_private_ip_address_count):
|
||||
fixed_ips.append({'subnet_id': os_subnet['id']})
|
||||
vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
|
||||
vpc_id = vpc['id']
|
||||
dhcp_options_id = vpc.get('dhcp_options_id', None)
|
||||
if not security_group_id:
|
||||
default_groups = security_group_api.describe_security_groups(
|
||||
context,
|
||||
filter=[{'name': 'vpc-id', 'value': [vpc_id]},
|
||||
{'name': 'group-name', 'value': ['default']}]
|
||||
)['securityGroupInfo']
|
||||
security_group_id = [default_group['groupId']
|
||||
for default_group in default_groups]
|
||||
security_groups = db_api.get_items_by_ids(context, security_group_id)
|
||||
if any(security_group['vpc_id'] != vpc['id']
|
||||
for security_group in security_groups):
|
||||
msg = _('You have specified two resources that belong to '
|
||||
'different networks.')
|
||||
raise exception.InvalidGroupNotFound(msg)
|
||||
os_groups = [security_group['os_id'] for security_group in security_groups]
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_port_body = {'port': {'network_id': os_subnet['network_id'],
|
||||
'security_groups': os_groups}}
|
||||
os_port_body['port']['fixed_ips'] = fixed_ips
|
||||
try:
|
||||
os_port = neutron.create_port(os_port_body)['port']
|
||||
except (neutron_exception.IpAddressGenerationFailureClient,
|
||||
neutron_exception.OverQuotaClient):
|
||||
raise exception.InsufficientFreeAddressesInSubnet()
|
||||
except (neutron_exception.IpAddressInUseClient,
|
||||
neutron_exception.BadRequest) as ex:
|
||||
# NOTE(ft): AWS returns InvalidIPAddress.InUse for a primary IP
|
||||
# address, but InvalidParameterValue for secondary one.
|
||||
# AWS returns PrivateIpAddressLimitExceeded, but Neutron does
|
||||
# general InvalidInput (converted to BadRequest) in the same case.
|
||||
msg = _('Specified network interface parameters are invalid. '
|
||||
'Reason: %(reason)s') % {'reason': ex.message}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
cleaner.addCleanup(neutron.delete_port, os_port['id'])
|
||||
if primary_ip is None:
|
||||
primary_ip = os_port['fixed_ips'][0]['ip_address']
|
||||
network_interface = db_api.add_item(context, 'eni',
|
||||
{'os_id': os_port['id'],
|
||||
'vpc_id': subnet['vpc_id'],
|
||||
'subnet_id': subnet['id'],
|
||||
'description': description,
|
||||
'private_ip_address': primary_ip})
|
||||
cleaner.addCleanup(db_api.delete_item,
|
||||
context, network_interface['id'])
|
||||
|
||||
network_interface_id = network_interface['id']
|
||||
neutron.update_port(os_port['id'],
|
||||
{'port': {'name': network_interface_id}})
|
||||
if dhcp_options_id:
|
||||
dhcp_options._add_dhcp_opts_to_port(
|
||||
context,
|
||||
db_api.get_item_by_id(context, dhcp_options_id),
|
||||
network_interface,
|
||||
os_port)
|
||||
security_groups = security_group_api._format_security_groups_ids_names(
|
||||
context)
|
||||
return {'networkInterface':
|
||||
_format_network_interface(context,
|
||||
network_interface,
|
||||
os_port,
|
||||
security_groups=security_groups)}
|
||||
|
||||
|
||||
def delete_network_interface(context, network_interface_id):
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if 'instance_id' in network_interface:
|
||||
msg = _("Network interface '%(eni_id)s' is currently in use.")
|
||||
msg = msg % {'eni_id': network_interface_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
for address in db_api.get_items(context, 'eipalloc'):
|
||||
if address.get('network_interface_id') == network_interface['id']:
|
||||
address_api._disassociate_address_item(context, address)
|
||||
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.delete_item(context, network_interface['id'])
|
||||
cleaner.addCleanup(db_api.restore_item, context, 'eni',
|
||||
network_interface)
|
||||
try:
|
||||
neutron.delete_port(network_interface['os_id'])
|
||||
except neutron_exception.PortNotFoundClient:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
class NetworkInterfaceDescriber(common.TaggableItemsDescriber):
|
||||
|
||||
KIND = 'eni'
|
||||
FILTER_MAP = {'addresses.private-ip-address': ['privateIpAddressesSet',
|
||||
'privateIpAddress'],
|
||||
'addresses.primary': ['privateIpAddressesSet', 'primary'],
|
||||
'addresses.association.public-ip': ['privateIpAddressesSet',
|
||||
('association',
|
||||
'publicIp')],
|
||||
'addresses.association.owner-id': ['privateIpAddressesSet',
|
||||
('association',
|
||||
'ipOwnerId')],
|
||||
'association.association-id': ('association',
|
||||
'associationId'),
|
||||
'association.allocation-id': ('association', 'allocationId'),
|
||||
'association.ip-owner-id': ('association', 'ipOwnerId'),
|
||||
'association.public-ip': ('association', 'publicIp'),
|
||||
'attachment.attachment-id': ('attachment', 'attachmentId'),
|
||||
'attachment.instance-id': ('attachment', 'instanceId'),
|
||||
'attachment.instance-owner-id': ('attachment',
|
||||
'instanceOwnerId'),
|
||||
'attachment.device-index': ('attachment', 'deviceIndex'),
|
||||
'attachment.status': ('attachment', 'status'),
|
||||
'attachment.attach.time': ('attachment', 'attachTime'),
|
||||
'attachment.delete-on-termination': ('attachment',
|
||||
'deleteOnTermination'),
|
||||
'client-token': 'clientToken',
|
||||
'description': 'description',
|
||||
'group-id': ['groupSet', 'groupId'],
|
||||
'group-name': ['groupSet', 'groupName'],
|
||||
'mac-address': 'macAddress',
|
||||
'network-interface-id': 'networkInterfaceId',
|
||||
'owner-id': 'ownerId',
|
||||
'private-ip-address': 'privateIpAddress',
|
||||
'requester-managed': 'requesterManaged',
|
||||
'source-dest-check': 'sourceDestCheck',
|
||||
'status': 'status',
|
||||
'vpc-id': 'vpcId',
|
||||
'subnet-id': 'subnetId'}
|
||||
|
||||
def format(self, network_interface, os_port):
|
||||
if not network_interface:
|
||||
return None
|
||||
return _format_network_interface(
|
||||
self.context, network_interface, os_port,
|
||||
self.ec2_addresses[network_interface['id']],
|
||||
self.security_groups)
|
||||
|
||||
def get_os_items(self):
|
||||
addresses = address_api.describe_addresses(self.context)
|
||||
self.ec2_addresses = collections.defaultdict(list)
|
||||
for address in addresses['addressesSet']:
|
||||
if 'networkInterfaceId' in address:
|
||||
self.ec2_addresses[
|
||||
address['networkInterfaceId']].append(address)
|
||||
self.security_groups = (
|
||||
security_group_api._format_security_groups_ids_names(self.context))
|
||||
neutron = clients.neutron(self.context)
|
||||
return neutron.list_ports(tenant_id=self.context.project_id)['ports']
|
||||
|
||||
def get_name(self, os_item):
|
||||
return ''
|
||||
|
||||
|
||||
def describe_network_interfaces(context, network_interface_id=None,
|
||||
filter=None):
|
||||
formatted_network_interfaces = NetworkInterfaceDescriber().describe(
|
||||
context, ids=network_interface_id, filter=filter)
|
||||
return {'networkInterfaceSet': formatted_network_interfaces}
|
||||
|
||||
|
||||
def assign_private_ip_addresses(context, network_interface_id,
|
||||
private_ip_address=None,
|
||||
secondary_private_ip_address_count=None,
|
||||
allow_reassignment=False):
|
||||
# TODO(Alex): allow_reassignment is not supported at the moment
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
subnet = db_api.get_item_by_id(context, network_interface['subnet_id'])
|
||||
neutron = clients.neutron(context)
|
||||
os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
|
||||
os_port = neutron.show_port(network_interface['os_id'])['port']
|
||||
subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr'])
|
||||
fixed_ips = os_port['fixed_ips'] or []
|
||||
if private_ip_address is not None:
|
||||
for ip_address in private_ip_address:
|
||||
if netaddr.IPAddress(ip_address) not in subnet_ipnet:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=str(ip_address),
|
||||
parameter='PrivateIpAddress',
|
||||
reason='IP address is out of the subnet range')
|
||||
fixed_ips.append({'ip_address': str(ip_address)})
|
||||
elif secondary_private_ip_address_count > 0:
|
||||
for _i in range(secondary_private_ip_address_count):
|
||||
fixed_ips.append({'subnet_id': os_subnet['id']})
|
||||
try:
|
||||
neutron.update_port(os_port['id'],
|
||||
{'port': {'fixed_ips': fixed_ips}})
|
||||
except neutron_exception.IpAddressGenerationFailureClient:
|
||||
raise exception.InsufficientFreeAddressesInSubnet()
|
||||
except neutron_exception.IpAddressInUseClient:
|
||||
msg = _('Some of %(addresses)s is assigned, but move is not '
|
||||
'allowed.') % {'addresses': private_ip_address}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
except neutron_exception.BadRequest as ex:
|
||||
# NOTE(ft):AWS returns PrivateIpAddressLimitExceeded, but Neutron does
|
||||
# general InvalidInput (converted to BadRequest) in the same case.
|
||||
msg = _('Specified network interface parameters are invalid. '
|
||||
'Reason: %(reason)s') % {'reason': ex.message}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
return True
|
||||
|
||||
|
||||
def unassign_private_ip_addresses(context, network_interface_id,
|
||||
private_ip_address):
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if network_interface['private_ip_address'] in private_ip_address:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=str(network_interface['private_ip_address']),
|
||||
parameter='PrivateIpAddresses',
|
||||
reason='Primary IP address cannot be unassigned')
|
||||
neutron = clients.neutron(context)
|
||||
os_port = neutron.show_port(network_interface['os_id'])['port']
|
||||
fixed_ips = os_port['fixed_ips'] or []
|
||||
new_fixed_ips = [ip for ip in fixed_ips
|
||||
if ip['ip_address'] not in private_ip_address]
|
||||
if len(new_fixed_ips) + len(private_ip_address) != len(fixed_ips):
|
||||
msg = _('Some of the specified addresses are not assigned to '
|
||||
'interface %(id)s') % {'id': network_interface_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
os_port = neutron.update_port(os_port['id'],
|
||||
{'port': {'fixed_ips': new_fixed_ips}})
|
||||
return True
|
||||
|
||||
|
||||
def describe_network_interface_attribute(context, network_interface_id,
|
||||
attribute=None):
|
||||
if attribute is None:
|
||||
raise exception.InvalidParameterCombination(
|
||||
_('No attributes specified.'))
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
|
||||
def _format_attr_description(result):
|
||||
result['description'] = {
|
||||
'value': network_interface.get('description', '')}
|
||||
|
||||
def _format_attr_source_dest_check(result):
|
||||
result['sourceDestCheck'] = {
|
||||
'value': network_interface.get('source_dest_check', True)}
|
||||
|
||||
def _format_attr_group_set(result):
|
||||
ec2_network_interface = describe_network_interfaces(context,
|
||||
network_interface_id=[network_interface_id]
|
||||
)['networkInterfaceSet'][0]
|
||||
result['groupSet'] = ec2_network_interface['groupSet']
|
||||
|
||||
def _format_attr_attachment(result):
|
||||
ec2_network_interface = describe_network_interfaces(context,
|
||||
network_interface_id=[network_interface_id]
|
||||
)['networkInterfaceSet'][0]
|
||||
if 'attachment' in ec2_network_interface:
|
||||
result['attachment'] = ec2_network_interface['attachment']
|
||||
|
||||
attribute_formatter = {
|
||||
'description': _format_attr_description,
|
||||
'sourceDestCheck': _format_attr_source_dest_check,
|
||||
'groupSet': _format_attr_group_set,
|
||||
'attachment': _format_attr_attachment,
|
||||
}
|
||||
|
||||
fn = attribute_formatter.get(attribute)
|
||||
if fn is None:
|
||||
raise exception.InvalidParameterValue(value=attribute,
|
||||
parameter='attribute',
|
||||
reason='Unknown attribute.')
|
||||
|
||||
result = {'networkInterfaceId': network_interface['id']}
|
||||
fn(result)
|
||||
return result
|
||||
|
||||
|
||||
def modify_network_interface_attribute(context, network_interface_id,
|
||||
description=None,
|
||||
source_dest_check=None,
|
||||
security_group_id=None,
|
||||
attachment=None):
|
||||
params_count = (
|
||||
int(description is not None) +
|
||||
int(source_dest_check is not None) +
|
||||
int(security_group_id is not None) +
|
||||
int(attachment is not None))
|
||||
if params_count != 1:
|
||||
raise exception.InvalidParameterCombination(
|
||||
'Multiple attributes specified')
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if description is not None:
|
||||
network_interface['description'] = description
|
||||
db_api.update_item(context, network_interface)
|
||||
neutron = clients.neutron(context)
|
||||
if security_group_id is not None:
|
||||
os_groups = [sg['os_id']
|
||||
for sg in ec2utils.get_db_items(context, 'sg',
|
||||
security_group_id)]
|
||||
neutron.update_port(network_interface['os_id'],
|
||||
{'port': {'security_groups': os_groups}})
|
||||
if source_dest_check is not None:
|
||||
allowed = [] if source_dest_check else [{'ip_address': '0.0.0.0/0'}]
|
||||
neutron.update_port(network_interface['os_id'],
|
||||
{'port': {'allowed_address_pairs': allowed}})
|
||||
network_interface['source_dest_check'] = source_dest_check
|
||||
db_api.update_item(context, network_interface)
|
||||
if attachment:
|
||||
attachment_id = attachment.get('attachment_id')
|
||||
delete_on_termination = attachment.get('delete_on_termination')
|
||||
if attachment_id is None or delete_on_termination is None:
|
||||
raise exception.MissingParameter(
|
||||
_('The request must contain the parameter attachment '
|
||||
'deleteOnTermination'))
|
||||
attachment_id_own = ec2utils.change_ec2_id_kind(
|
||||
network_interface['id'], 'eni-attach')
|
||||
if ('instance_id' not in network_interface
|
||||
or attachment_id_own != attachment_id):
|
||||
raise exception.InvalidAttachmentIDNotFound(id=attachment_id)
|
||||
network_interface['delete_on_termination'] = delete_on_termination
|
||||
db_api.update_item(context, network_interface)
|
||||
return True
|
||||
|
||||
|
||||
def reset_network_interface_attribute(context, network_interface_id,
|
||||
attribute):
|
||||
# TODO(Alex) This is only a stub because it's not supported by
|
||||
# Openstack. True will be returned for now in any case.
|
||||
# NOTE(Alex) There is a bug in the AWS doc about this method -
|
||||
# "sourceDestCheck" should be used instead of "SourceDestCheck".
|
||||
# Also aws cli doesn't work with it because it doesn't comply with
|
||||
# the API.
|
||||
if attribute == 'sourceDestCheck':
|
||||
return modify_network_interface_attribute(context,
|
||||
network_interface_id,
|
||||
source_dest_check=True)
|
||||
return True
|
||||
|
||||
|
||||
def attach_network_interface(context, network_interface_id,
|
||||
instance_id, device_index):
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if 'instance_id' in network_interface:
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Network interface '%(id)s' is currently in use.") %
|
||||
{'id': network_interface_id})
|
||||
os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
|
||||
# TODO(Alex) Check that the instance is not yet attached to another VPC
|
||||
# TODO(Alex) Check that the instance is "our", not created via nova
|
||||
# (which means that it doesn't belong to any VPC and can't be attached)
|
||||
if any(eni['device_index'] == device_index
|
||||
for eni in db_api.get_items(context, 'eni')
|
||||
if eni.get('instance_id') == instance_id):
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Instance '%(id)s' already has an interface attached at "
|
||||
"device index '%(index)s'.") % {'id': instance_id,
|
||||
'index': device_index})
|
||||
neutron = clients.neutron(context)
|
||||
os_port = neutron.show_port(network_interface['os_id'])['port']
|
||||
nova = clients.nova(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
# TODO(Alex) nova inserts compute:%availability_zone into device_owner
|
||||
# 'device_owner': 'compute:None'}})
|
||||
_attach_network_interface_item(context, network_interface,
|
||||
instance_id, device_index)
|
||||
cleaner.addCleanup(_detach_network_interface_item, context,
|
||||
network_interface)
|
||||
nova.servers.interface_attach(os_instance_id, os_port['id'],
|
||||
None, None)
|
||||
return {'attachmentId': ec2utils.change_ec2_id_kind(
|
||||
network_interface['id'], 'eni-attach')}
|
||||
|
||||
|
||||
def detach_network_interface(context, attachment_id, force=None):
|
||||
network_interface = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(attachment_id, 'eni'))
|
||||
if not network_interface or 'instance_id' not in network_interface:
|
||||
raise exception.InvalidAttachmentIDNotFound(id=attachment_id)
|
||||
if network_interface['device_index'] == 0:
|
||||
raise exception.OperationNotPermitted(
|
||||
_('The network interface at device index 0 cannot be detached.'))
|
||||
neutron = clients.neutron(context)
|
||||
os_port = neutron.show_port(network_interface['os_id'])['port']
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
instance_id = network_interface['instance_id']
|
||||
device_index = network_interface['device_index']
|
||||
attach_time = network_interface['attach_time']
|
||||
delete_on_termination = network_interface['delete_on_termination']
|
||||
_detach_network_interface_item(context, network_interface)
|
||||
cleaner.addCleanup(_attach_network_interface_item,
|
||||
context, network_interface, instance_id,
|
||||
device_index, attach_time, delete_on_termination)
|
||||
neutron.update_port(os_port['id'],
|
||||
{'port': {'device_id': '',
|
||||
'device_owner': ''}})
|
||||
return True
|
||||
|
||||
|
||||
def _format_network_interface(context, network_interface, os_port,
|
||||
associated_ec2_addresses=[], security_groups={}):
|
||||
ec2_network_interface = {}
|
||||
ec2_network_interface['networkInterfaceId'] = network_interface['id']
|
||||
ec2_network_interface['subnetId'] = network_interface['subnet_id']
|
||||
ec2_network_interface['vpcId'] = network_interface['vpc_id']
|
||||
ec2_network_interface['description'] = network_interface['description']
|
||||
ec2_network_interface['sourceDestCheck'] = (
|
||||
network_interface.get('source_dest_check', True))
|
||||
ec2_network_interface['requesterManaged'] = (
|
||||
os_port.get('device_owner', '').startswith('network:'))
|
||||
ec2_network_interface['ownerId'] = context.project_id
|
||||
security_group_set = []
|
||||
for sg_id in os_port['security_groups']:
|
||||
if security_groups.get(sg_id):
|
||||
security_group_set.append(security_groups[sg_id])
|
||||
ec2_network_interface['groupSet'] = security_group_set
|
||||
if 'instance_id' in network_interface:
|
||||
ec2_network_interface['status'] = 'in-use'
|
||||
ec2_network_interface['attachment'] = {
|
||||
'attachmentId': ec2utils.change_ec2_id_kind(
|
||||
network_interface['id'], 'eni-attach'),
|
||||
'instanceId': network_interface['instance_id'],
|
||||
'deviceIndex': network_interface['device_index'],
|
||||
'status': 'attached',
|
||||
'deleteOnTermination': network_interface['delete_on_termination'],
|
||||
'attachTime': network_interface['attach_time'],
|
||||
'instanceOwnerId': context.project_id
|
||||
}
|
||||
else:
|
||||
ec2_network_interface['status'] = 'available'
|
||||
ec2_network_interface['macAddress'] = os_port['mac_address']
|
||||
if os_port['fixed_ips']:
|
||||
ipsSet = []
|
||||
for ip in os_port['fixed_ips']:
|
||||
primary = (
|
||||
network_interface.get('private_ip_address', '') ==
|
||||
ip['ip_address'])
|
||||
item = {'privateIpAddress': ip['ip_address'],
|
||||
'primary': primary}
|
||||
ec2_address = next(
|
||||
(addr for addr in associated_ec2_addresses
|
||||
if addr['privateIpAddress'] == ip['ip_address']),
|
||||
None)
|
||||
if ec2_address:
|
||||
item['association'] = {
|
||||
'associationId': ec2utils.change_ec2_id_kind(
|
||||
ec2_address['allocationId'], 'eipassoc'),
|
||||
'allocationId': ec2_address['allocationId'],
|
||||
'ipOwnerId': context.project_id,
|
||||
'publicDnsName': None,
|
||||
'publicIp': ec2_address['publicIp'],
|
||||
}
|
||||
if primary:
|
||||
ipsSet.insert(0, item)
|
||||
else:
|
||||
ipsSet.append(item)
|
||||
ec2_network_interface['privateIpAddressesSet'] = ipsSet
|
||||
primary_ip = ipsSet[0]
|
||||
ec2_network_interface['privateIpAddress'] = (
|
||||
primary_ip['privateIpAddress'])
|
||||
if 'association' in primary_ip:
|
||||
ec2_network_interface['association'] = primary_ip['association']
|
||||
# NOTE(ft): AWS returns empty tag set for a network interface
|
||||
# if no tag exists
|
||||
ec2_network_interface['tagSet'] = []
|
||||
return ec2_network_interface
|
||||
|
||||
|
||||
def _attach_network_interface_item(context, network_interface, instance_id,
|
||||
device_index, attach_time=None,
|
||||
delete_on_termination=False):
|
||||
if not attach_time:
|
||||
attach_time = ec2utils.isotime(None, True)
|
||||
network_interface.update({
|
||||
'instance_id': instance_id,
|
||||
'device_index': device_index,
|
||||
'attach_time': attach_time,
|
||||
'delete_on_termination': delete_on_termination})
|
||||
db_api.update_item(context, network_interface)
|
||||
|
||||
|
||||
def _detach_network_interface_item(context, network_interface):
|
||||
network_interface.pop('instance_id', None)
|
||||
network_interface.pop('device_index', None)
|
||||
network_interface.pop('attach_time', None)
|
||||
network_interface.pop('delete_on_termination', None)
|
||||
db_api.update_item(context, network_interface)
|
@ -1,39 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy
|
||||
# of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
|
||||
import ec2api.api
|
||||
import ec2api.api.auth
|
||||
import ec2api.api.availability_zone
|
||||
import ec2api.api.common
|
||||
import ec2api.api.dhcp_options
|
||||
import ec2api.api.ec2utils
|
||||
import ec2api.api.image
|
||||
import ec2api.api.instance
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('DEFAULT',
|
||||
itertools.chain(
|
||||
ec2api.api.ec2_opts,
|
||||
ec2api.api.auth.auth_opts,
|
||||
ec2api.api.availability_zone.availability_zone_opts,
|
||||
ec2api.api.common.ec2_opts,
|
||||
ec2api.api.dhcp_options.ec2_opts,
|
||||
ec2api.api.ec2utils.ec2_opts,
|
||||
ec2api.api.image.s3_opts,
|
||||
ec2api.api.image.rpcapi_opts,
|
||||
ec2api.api.instance.ec2_opts,
|
||||
)),
|
||||
]
|
@ -1,686 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import copy
|
||||
|
||||
import netaddr
|
||||
from novaclient import exceptions as nova_exception
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import vpn_connection as vpn_connection_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
HOST_TARGET = 'host'
|
||||
VPN_TARGET = 'vpn'
|
||||
|
||||
|
||||
"""Route tables related API implementation
|
||||
"""
|
||||
|
||||
|
||||
class Validator(common.Validator):
|
||||
|
||||
def igw_or_vgw_id(self, id):
|
||||
self.ec2_id(id, ['igw', 'vgw'])
|
||||
|
||||
|
||||
def create_route_table(context, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
route_table = _create_route_table(context, vpc)
|
||||
return {'routeTable': _format_route_table(context, route_table,
|
||||
is_main=False)}
|
||||
|
||||
|
||||
def create_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id=None, instance_id=None,
|
||||
network_interface_id=None,
|
||||
vpc_peering_connection_id=None):
|
||||
return _set_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id, instance_id, network_interface_id,
|
||||
vpc_peering_connection_id, False)
|
||||
|
||||
|
||||
def replace_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id=None, instance_id=None,
|
||||
network_interface_id=None,
|
||||
vpc_peering_connection_id=None):
|
||||
return _set_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id, instance_id, network_interface_id,
|
||||
vpc_peering_connection_id, True)
|
||||
|
||||
|
||||
def delete_route(context, route_table_id, destination_cidr_block):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
for route_index, route in enumerate(route_table['routes']):
|
||||
if route['destination_cidr_block'] != destination_cidr_block:
|
||||
continue
|
||||
if route.get('gateway_id', 0) is None:
|
||||
msg = _('cannot remove local route %(destination_cidr_block)s '
|
||||
'in route table %(route_table_id)s')
|
||||
msg = msg % {'route_table_id': route_table_id,
|
||||
'destination_cidr_block': destination_cidr_block}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
break
|
||||
else:
|
||||
raise exception.InvalidRouteNotFound(
|
||||
route_table_id=route_table_id,
|
||||
destination_cidr_block=destination_cidr_block)
|
||||
update_target = _get_route_target(route)
|
||||
if update_target == VPN_TARGET:
|
||||
vpn_gateway = db_api.get_item_by_id(context, route['gateway_id'])
|
||||
if (not vpn_gateway or
|
||||
vpn_gateway['vpc_id'] != route_table['vpc_id']):
|
||||
update_target = None
|
||||
rollback_route_table_state = copy.deepcopy(route_table)
|
||||
del route_table['routes'][route_index]
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.update_item(context, route_table)
|
||||
cleaner.addCleanup(db_api.update_item, context,
|
||||
rollback_route_table_state)
|
||||
|
||||
if update_target:
|
||||
_update_routes_in_associated_subnets(
|
||||
context, cleaner, route_table, update_target=update_target)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def enable_vgw_route_propagation(context, route_table_id, gateway_id):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
# NOTE(ft): AWS returns GatewayNotAttached for all invalid cases of
|
||||
# gateway_id value
|
||||
vpn_gateway = ec2utils.get_db_item(context, gateway_id)
|
||||
if vpn_gateway['vpc_id'] != route_table['vpc_id']:
|
||||
raise exception.GatewayNotAttached(gw_id=vpn_gateway['id'],
|
||||
vpc_id=route_table['vpc_id'])
|
||||
if vpn_gateway['id'] in route_table.setdefault('propagating_gateways', []):
|
||||
return True
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_append_propagation_to_route_table_item(context, route_table,
|
||||
vpn_gateway['id'])
|
||||
cleaner.addCleanup(_remove_propagation_from_route_table_item,
|
||||
context, route_table, vpn_gateway['id'])
|
||||
|
||||
_update_routes_in_associated_subnets(context, cleaner, route_table,
|
||||
update_target=VPN_TARGET)
|
||||
return True
|
||||
|
||||
|
||||
def disable_vgw_route_propagation(context, route_table_id, gateway_id):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
if gateway_id not in route_table.get('propagating_gateways', []):
|
||||
return True
|
||||
vpn_gateway = db_api.get_item_by_id(context, gateway_id)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_remove_propagation_from_route_table_item(context, route_table,
|
||||
gateway_id)
|
||||
cleaner.addCleanup(_append_propagation_to_route_table_item,
|
||||
context, route_table, gateway_id)
|
||||
|
||||
if vpn_gateway and vpn_gateway['vpc_id'] == route_table['vpc_id']:
|
||||
_update_routes_in_associated_subnets(context, cleaner, route_table,
|
||||
update_target=VPN_TARGET)
|
||||
return True
|
||||
|
||||
|
||||
def associate_route_table(context, route_table_id, subnet_id):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
if route_table['vpc_id'] != subnet['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and subnet %(subnet_id)s belong to '
|
||||
'different networks')
|
||||
msg = msg % {'rtb_id': route_table_id,
|
||||
'subnet_id': subnet_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
if 'route_table_id' in subnet:
|
||||
msg = _('The specified association for route table %(rtb_id)s '
|
||||
'conflicts with an existing association')
|
||||
msg = msg % {'rtb_id': route_table_id}
|
||||
raise exception.ResourceAlreadyAssociated(msg)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_associate_subnet_item(context, subnet, route_table['id'])
|
||||
cleaner.addCleanup(_disassociate_subnet_item, context, subnet)
|
||||
|
||||
_update_subnet_routes(context, cleaner, subnet, route_table)
|
||||
|
||||
return {'associationId': ec2utils.change_ec2_id_kind(subnet['id'],
|
||||
'rtbassoc')}
|
||||
|
||||
|
||||
def replace_route_table_association(context, association_id, route_table_id):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
if route_table['vpc_id'] == ec2utils.change_ec2_id_kind(association_id,
|
||||
'vpc'):
|
||||
vpc = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(association_id, 'vpc'))
|
||||
if vpc is None:
|
||||
raise exception.InvalidAssociationIDNotFound(id=association_id)
|
||||
|
||||
rollback_route_table_id = vpc['route_table_id']
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_associate_vpc_item(context, vpc, route_table['id'])
|
||||
cleaner.addCleanup(_associate_vpc_item, context, vpc,
|
||||
rollback_route_table_id)
|
||||
|
||||
_update_routes_in_associated_subnets(
|
||||
context, cleaner, route_table, default_associations_only=True)
|
||||
else:
|
||||
subnet = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(association_id, 'subnet'))
|
||||
if subnet is None or 'route_table_id' not in subnet:
|
||||
raise exception.InvalidAssociationIDNotFound(id=association_id)
|
||||
if subnet['vpc_id'] != route_table['vpc_id']:
|
||||
msg = _('Route table association %(rtbassoc_id)s and route table '
|
||||
'%(rtb_id)s belong to different networks')
|
||||
msg = msg % {'rtbassoc_id': association_id,
|
||||
'rtb_id': route_table_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
rollback_route_table_id = subnet['route_table_id']
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_associate_subnet_item(context, subnet, route_table['id'])
|
||||
cleaner.addCleanup(_associate_subnet_item, context, subnet,
|
||||
rollback_route_table_id)
|
||||
|
||||
_update_subnet_routes(context, cleaner, subnet, route_table)
|
||||
|
||||
return {'newAssociationId': association_id}
|
||||
|
||||
|
||||
def disassociate_route_table(context, association_id):
|
||||
subnet = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(association_id, 'subnet'))
|
||||
if not subnet:
|
||||
vpc = db_api.get_item_by_id(
|
||||
context, ec2utils.change_ec2_id_kind(association_id, 'vpc'))
|
||||
if vpc is None:
|
||||
raise exception.InvalidAssociationIDNotFound(id=association_id)
|
||||
msg = _('Cannot disassociate the main route table association '
|
||||
'%(rtbassoc_id)s') % {'rtbassoc_id': association_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
if 'route_table_id' not in subnet:
|
||||
raise exception.InvalidAssociationIDNotFound(id=association_id)
|
||||
|
||||
rollback_route_table_id = subnet['route_table_id']
|
||||
vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
|
||||
main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_disassociate_subnet_item(context, subnet)
|
||||
cleaner.addCleanup(_associate_subnet_item, context, subnet,
|
||||
rollback_route_table_id)
|
||||
|
||||
_update_subnet_routes(context, cleaner, subnet, main_route_table)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def delete_route_table(context, route_table_id):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
vpc = db_api.get_item_by_id(context, route_table['vpc_id'])
|
||||
_delete_route_table(context, route_table['id'], vpc)
|
||||
return True
|
||||
|
||||
|
||||
class RouteTableDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'rtb'
|
||||
FILTER_MAP = {'association.route-table-association-id': (
|
||||
['associationSet', 'routeTableAssociationId']),
|
||||
'association.route-table-id': ['associationSet',
|
||||
'routeTableId'],
|
||||
'association.subnet-id': ['associationSet', 'subnetId'],
|
||||
'association.main': ['associationSet', 'main'],
|
||||
'route-table-id': 'routeTableId',
|
||||
'route.destination-cidr-block': ['routeSet',
|
||||
'destinationCidrBlock'],
|
||||
'route.gateway-id': ['routeSet', 'gatewayId'],
|
||||
'route.instance-id': ['routeSet', 'instanceId'],
|
||||
'route.origin': ['routeSet', 'origin'],
|
||||
'route.state': ['routeSet', 'state'],
|
||||
'vpc-id': 'vpcId'}
|
||||
|
||||
def format(self, route_table):
|
||||
return _format_route_table(
|
||||
self.context, route_table,
|
||||
associated_subnet_ids=self.associations[route_table['id']],
|
||||
is_main=(self.vpcs[route_table['vpc_id']]['route_table_id'] ==
|
||||
route_table['id']),
|
||||
gateways=self.gateways,
|
||||
network_interfaces=self.network_interfaces,
|
||||
vpn_connections_by_gateway_id=self.vpn_connections_by_gateway_id)
|
||||
|
||||
def get_db_items(self):
|
||||
associations = collections.defaultdict(list)
|
||||
for subnet in db_api.get_items(self.context, 'subnet'):
|
||||
if 'route_table_id' in subnet:
|
||||
associations[subnet['route_table_id']].append(subnet['id'])
|
||||
self.associations = associations
|
||||
vpcs = db_api.get_items(self.context, 'vpc')
|
||||
self.vpcs = {vpc['id']: vpc for vpc in vpcs}
|
||||
gateways = (db_api.get_items(self.context, 'igw') +
|
||||
db_api.get_items(self.context, 'vgw'))
|
||||
self.gateways = {gw['id']: gw for gw in gateways}
|
||||
# TODO(ft): scan route tables to get only used instances and
|
||||
# network interfaces to reduce DB and Nova throughput
|
||||
network_interfaces = db_api.get_items(self.context, 'eni')
|
||||
self.network_interfaces = {eni['id']: eni
|
||||
for eni in network_interfaces}
|
||||
vpn_connections = db_api.get_items(self.context, 'vpn')
|
||||
vpns_by_gateway_id = {}
|
||||
for vpn in vpn_connections:
|
||||
vpns = vpns_by_gateway_id.setdefault(vpn['vpn_gateway_id'], [])
|
||||
vpns.append(vpn)
|
||||
self.vpn_connections_by_gateway_id = vpns_by_gateway_id
|
||||
return super(RouteTableDescriber, self).get_db_items()
|
||||
|
||||
|
||||
def describe_route_tables(context, route_table_id=None, filter=None):
|
||||
ec2utils.check_and_create_default_vpc(context)
|
||||
formatted_route_tables = RouteTableDescriber().describe(
|
||||
context, ids=route_table_id, filter=filter)
|
||||
return {'routeTableSet': formatted_route_tables}
|
||||
|
||||
|
||||
def _create_route_table(context, vpc):
|
||||
route_table = {'vpc_id': vpc['id'],
|
||||
'routes': [{'destination_cidr_block': vpc['cidr_block'],
|
||||
'gateway_id': None}]}
|
||||
route_table = db_api.add_item(context, 'rtb', route_table)
|
||||
return route_table
|
||||
|
||||
|
||||
def _delete_route_table(context, route_table_id, vpc=None, cleaner=None):
|
||||
def get_associated_subnets():
|
||||
return [s for s in db_api.get_items(context, 'subnet')
|
||||
if s.get('route_table_id') == route_table_id]
|
||||
|
||||
if (vpc and route_table_id == vpc['route_table_id'] or
|
||||
len(get_associated_subnets()) > 0):
|
||||
msg = _("The routeTable '%(rtb_id)s' has dependencies and cannot "
|
||||
"be deleted.") % {'rtb_id': route_table_id}
|
||||
raise exception.DependencyViolation(msg)
|
||||
if cleaner:
|
||||
route_table = db_api.get_item_by_id(context, route_table_id)
|
||||
db_api.delete_item(context, route_table_id)
|
||||
if cleaner and route_table:
|
||||
cleaner.addCleanup(db_api.restore_item, context, 'rtb', route_table)
|
||||
|
||||
|
||||
def _set_route(context, route_table_id, destination_cidr_block,
|
||||
gateway_id, instance_id, network_interface_id,
|
||||
vpc_peering_connection_id, do_replace):
|
||||
route_table = ec2utils.get_db_item(context, route_table_id)
|
||||
vpc = db_api.get_item_by_id(context, route_table['vpc_id'])
|
||||
vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
|
||||
route_ipnet = netaddr.IPNetwork(destination_cidr_block)
|
||||
if route_ipnet in vpc_ipnet:
|
||||
msg = _('Cannot create a more specific route for '
|
||||
'%(destination_cidr_block)s than local route '
|
||||
'%(vpc_cidr_block)s in route table %(rtb_id)s')
|
||||
msg = msg % {'rtb_id': route_table_id,
|
||||
'destination_cidr_block': destination_cidr_block,
|
||||
'vpc_cidr_block': vpc['cidr_block']}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
obj_param_count = len([p for p in (gateway_id, network_interface_id,
|
||||
instance_id, vpc_peering_connection_id)
|
||||
if p is not None])
|
||||
if obj_param_count != 1:
|
||||
msg = _('The request must contain exactly one of gatewayId, '
|
||||
'networkInterfaceId, vpcPeeringConnectionId or instanceId')
|
||||
if obj_param_count == 0:
|
||||
raise exception.MissingParameter(msg)
|
||||
else:
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
|
||||
rollabck_route_table_state = copy.deepcopy(route_table)
|
||||
if do_replace:
|
||||
route_index, old_route = next(
|
||||
((i, r) for i, r in enumerate(route_table['routes'])
|
||||
if r['destination_cidr_block'] == destination_cidr_block),
|
||||
(None, None))
|
||||
if route_index is None:
|
||||
msg = _("There is no route defined for "
|
||||
"'%(destination_cidr_block)s' in the route table. "
|
||||
"Use CreateRoute instead.")
|
||||
msg = msg % {'destination_cidr_block': destination_cidr_block}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
else:
|
||||
del route_table['routes'][route_index]
|
||||
|
||||
if gateway_id:
|
||||
gateway = ec2utils.get_db_item(context, gateway_id)
|
||||
if gateway.get('vpc_id') != route_table['vpc_id']:
|
||||
if ec2utils.get_ec2_id_kind(gateway_id) == 'vgw':
|
||||
raise exception.InvalidGatewayIDNotFound(id=gateway['id'])
|
||||
else: # igw
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Route table %(rtb_id)s and network gateway %(igw_id)s '
|
||||
'belong to different networks') %
|
||||
{'rtb_id': route_table_id,
|
||||
'igw_id': gateway_id})
|
||||
route = {'gateway_id': gateway['id']}
|
||||
elif network_interface_id:
|
||||
network_interface = ec2utils.get_db_item(context, network_interface_id)
|
||||
if network_interface['vpc_id'] != route_table['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and interface %(eni_id)s '
|
||||
'belong to different networks')
|
||||
msg = msg % {'rtb_id': route_table_id,
|
||||
'eni_id': network_interface_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
route = {'network_interface_id': network_interface['id']}
|
||||
elif instance_id:
|
||||
# TODO(ft): implement search in DB layer
|
||||
network_interfaces = [eni for eni in db_api.get_items(context, 'eni')
|
||||
if eni.get('instance_id') == instance_id]
|
||||
if len(network_interfaces) == 0:
|
||||
msg = _("Invalid value '%(i_id)s' for instance ID. "
|
||||
"Instance is not in a VPC.")
|
||||
msg = msg % {'i_id': instance_id}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
elif len(network_interfaces) > 1:
|
||||
raise exception.InvalidInstanceId(instance_id=instance_id)
|
||||
network_interface = network_interfaces[0]
|
||||
if network_interface['vpc_id'] != route_table['vpc_id']:
|
||||
msg = _('Route table %(rtb_id)s and interface %(eni_id)s '
|
||||
'belong to different networks')
|
||||
msg = msg % {'rtb_id': route_table_id,
|
||||
'eni_id': network_interface['id']}
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
route = {'network_interface_id': network_interface['id']}
|
||||
else:
|
||||
raise exception.InvalidRequest('Parameter VpcPeeringConnectionId is '
|
||||
'not supported by this implementation')
|
||||
route['destination_cidr_block'] = destination_cidr_block
|
||||
update_target = _get_route_target(route)
|
||||
|
||||
if do_replace:
|
||||
idempotent_call = False
|
||||
old_target = _get_route_target(old_route)
|
||||
if old_target != update_target:
|
||||
update_target = None
|
||||
else:
|
||||
old_route = next((r for r in route_table['routes']
|
||||
if r['destination_cidr_block'] ==
|
||||
destination_cidr_block), None)
|
||||
idempotent_call = old_route == route
|
||||
if old_route and not idempotent_call:
|
||||
raise exception.RouteAlreadyExists(
|
||||
destination_cidr_block=destination_cidr_block)
|
||||
|
||||
if not idempotent_call:
|
||||
route_table['routes'].append(route)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.update_item(context, route_table)
|
||||
cleaner.addCleanup(db_api.update_item, context,
|
||||
rollabck_route_table_state)
|
||||
_update_routes_in_associated_subnets(context, cleaner, route_table,
|
||||
update_target=update_target)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _format_route_table(context, route_table, is_main=False,
|
||||
associated_subnet_ids=[],
|
||||
gateways={},
|
||||
network_interfaces={},
|
||||
vpn_connections_by_gateway_id={}):
|
||||
vpc_id = route_table['vpc_id']
|
||||
ec2_route_table = {
|
||||
'routeTableId': route_table['id'],
|
||||
'vpcId': vpc_id,
|
||||
'routeSet': [],
|
||||
'propagatingVgwSet': [
|
||||
{'gatewayId': vgw_id}
|
||||
for vgw_id in route_table.get('propagating_gateways', [])],
|
||||
# NOTE(ft): AWS returns empty tag set for a route table
|
||||
# if no tag exists
|
||||
'tagSet': [],
|
||||
}
|
||||
# TODO(ft): refactor to get Nova instances outside of this function
|
||||
nova = clients.nova(context)
|
||||
for route in route_table['routes']:
|
||||
origin = ('CreateRouteTable'
|
||||
if route.get('gateway_id', 0) is None else
|
||||
'CreateRoute')
|
||||
ec2_route = {'destinationCidrBlock': route['destination_cidr_block'],
|
||||
'origin': origin}
|
||||
if 'gateway_id' in route:
|
||||
gateway_id = route['gateway_id']
|
||||
if gateway_id is None:
|
||||
state = 'active'
|
||||
ec2_gateway_id = 'local'
|
||||
else:
|
||||
gateway = gateways.get(gateway_id)
|
||||
state = ('active'
|
||||
if gateway and gateway.get('vpc_id') == vpc_id else
|
||||
'blackhole')
|
||||
ec2_gateway_id = gateway_id
|
||||
ec2_route.update({'gatewayId': ec2_gateway_id,
|
||||
'state': state})
|
||||
else:
|
||||
network_interface_id = route['network_interface_id']
|
||||
network_interface = network_interfaces.get(network_interface_id)
|
||||
instance_id = (network_interface.get('instance_id')
|
||||
if network_interface else
|
||||
None)
|
||||
state = 'blackhole'
|
||||
if instance_id:
|
||||
instance = db_api.get_item_by_id(context, instance_id)
|
||||
if instance:
|
||||
try:
|
||||
os_instance = nova.servers.get(instance['os_id'])
|
||||
if os_instance and os_instance.status == 'ACTIVE':
|
||||
state = 'active'
|
||||
except nova_exception.NotFound:
|
||||
pass
|
||||
ec2_route.update({'instanceId': instance_id,
|
||||
'instanceOwnerId': context.project_id})
|
||||
ec2_route.update({'networkInterfaceId': network_interface_id,
|
||||
'state': state})
|
||||
ec2_route_table['routeSet'].append(ec2_route)
|
||||
|
||||
for vgw_id in route_table.get('propagating_gateways', []):
|
||||
vgw = gateways.get(vgw_id)
|
||||
if vgw and vgw_id in vpn_connections_by_gateway_id:
|
||||
cidrs = set()
|
||||
vpn_connections = vpn_connections_by_gateway_id[vgw_id]
|
||||
for vpn_connection in vpn_connections:
|
||||
cidrs.update(vpn_connection['cidrs'])
|
||||
state = 'active' if vgw['vpc_id'] == vpc_id else 'blackhole'
|
||||
for cidr in cidrs:
|
||||
ec2_route = {'gatewayId': vgw_id,
|
||||
'destinationCidrBlock': cidr,
|
||||
'state': state,
|
||||
'origin': 'EnableVgwRoutePropagation'}
|
||||
ec2_route_table['routeSet'].append(ec2_route)
|
||||
|
||||
associations = []
|
||||
if is_main:
|
||||
associations.append({
|
||||
'routeTableAssociationId': ec2utils.change_ec2_id_kind(vpc_id,
|
||||
'rtbassoc'),
|
||||
'routeTableId': route_table['id'],
|
||||
'main': True})
|
||||
for subnet_id in associated_subnet_ids:
|
||||
associations.append({
|
||||
'routeTableAssociationId': ec2utils.change_ec2_id_kind(subnet_id,
|
||||
'rtbassoc'),
|
||||
'routeTableId': route_table['id'],
|
||||
'subnetId': subnet_id,
|
||||
'main': False})
|
||||
if associations:
|
||||
ec2_route_table['associationSet'] = associations
|
||||
|
||||
return ec2_route_table
|
||||
|
||||
|
||||
def _update_routes_in_associated_subnets(context, cleaner, route_table,
|
||||
default_associations_only=None,
|
||||
update_target=None):
|
||||
if default_associations_only:
|
||||
appropriate_rtb_ids = (None,)
|
||||
else:
|
||||
vpc = db_api.get_item_by_id(context, route_table['vpc_id'])
|
||||
if vpc['route_table_id'] == route_table['id']:
|
||||
appropriate_rtb_ids = (route_table['id'], None)
|
||||
else:
|
||||
appropriate_rtb_ids = (route_table['id'],)
|
||||
neutron = clients.neutron(context)
|
||||
subnets = [subnet for subnet in db_api.get_items(context, 'subnet')
|
||||
if (subnet['vpc_id'] == route_table['vpc_id'] and
|
||||
subnet.get('route_table_id') in appropriate_rtb_ids)]
|
||||
# NOTE(ft): we need to update host routes for both host and vpn target
|
||||
# because vpn-related routes are present in host routes as well
|
||||
_update_host_routes(context, neutron, cleaner, route_table, subnets)
|
||||
if not update_target or update_target == VPN_TARGET:
|
||||
vpn_connection_api._update_vpn_routes(context, neutron, cleaner,
|
||||
route_table, subnets)
|
||||
|
||||
|
||||
def _update_subnet_routes(context, cleaner, subnet, route_table):
|
||||
neutron = clients.neutron(context)
|
||||
_update_host_routes(context, neutron, cleaner, route_table, [subnet])
|
||||
vpn_connection_api._update_vpn_routes(context, neutron, cleaner,
|
||||
route_table, [subnet])
|
||||
|
||||
|
||||
def _update_host_routes(context, neutron, cleaner, route_table, subnets):
|
||||
destinations = _get_active_route_destinations(context, route_table)
|
||||
for subnet in subnets:
|
||||
# TODO(ft): do list subnet w/ filters instead of show one by one
|
||||
os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
|
||||
host_routes, gateway_ip = _get_subnet_host_routes_and_gateway_ip(
|
||||
context, route_table, os_subnet['cidr'], destinations)
|
||||
neutron.update_subnet(subnet['os_id'],
|
||||
{'subnet': {'host_routes': host_routes,
|
||||
'gateway_ip': gateway_ip}})
|
||||
cleaner.addCleanup(
|
||||
neutron.update_subnet, subnet['os_id'],
|
||||
{'subnet': {'host_routes': os_subnet['host_routes'],
|
||||
'gateway_ip': os_subnet['gateway_ip']}})
|
||||
|
||||
|
||||
def _get_active_route_destinations(context, route_table):
|
||||
vpn_connections = {vpn['vpn_gateway_id']: vpn
|
||||
for vpn in db_api.get_items(context, 'vpn')}
|
||||
dst_ids = [route[id_key]
|
||||
for route in route_table['routes']
|
||||
for id_key in ('gateway_id', 'network_interface_id')
|
||||
if route.get(id_key) is not None]
|
||||
dst_ids.extend(route_table.get('propagating_gateways', []))
|
||||
destinations = {item['id']: item
|
||||
for item in db_api.get_items_by_ids(context, dst_ids)
|
||||
if (item['vpc_id'] == route_table['vpc_id'] and
|
||||
(ec2utils.get_ec2_id_kind(item['id']) != 'vgw' or
|
||||
item['id'] in vpn_connections))}
|
||||
for vpn in vpn_connections.values():
|
||||
if vpn['vpn_gateway_id'] in destinations:
|
||||
destinations[vpn['vpn_gateway_id']]['vpn_connection'] = vpn
|
||||
return destinations
|
||||
|
||||
|
||||
def _get_subnet_host_routes_and_gateway_ip(context, route_table, cidr_block,
|
||||
destinations=None):
|
||||
if not destinations:
|
||||
destinations = _get_active_route_destinations(context, route_table)
|
||||
gateway_ip = str(netaddr.IPAddress(
|
||||
netaddr.IPNetwork(cidr_block).first + 1))
|
||||
|
||||
def get_nexthop(route):
|
||||
if 'gateway_id' in route:
|
||||
gateway_id = route['gateway_id']
|
||||
if gateway_id and gateway_id not in destinations:
|
||||
return '127.0.0.1'
|
||||
return gateway_ip
|
||||
network_interface = destinations.get(route['network_interface_id'])
|
||||
if not network_interface:
|
||||
return '127.0.0.1'
|
||||
return network_interface['private_ip_address']
|
||||
|
||||
host_routes = []
|
||||
subnet_gateway_is_used = False
|
||||
for route in route_table['routes']:
|
||||
nexthop = get_nexthop(route)
|
||||
cidr = route['destination_cidr_block']
|
||||
if cidr == '0.0.0.0/0':
|
||||
if nexthop == '127.0.0.1':
|
||||
continue
|
||||
elif nexthop == gateway_ip:
|
||||
subnet_gateway_is_used = True
|
||||
host_routes.append({'destination': cidr,
|
||||
'nexthop': nexthop})
|
||||
host_routes.extend(
|
||||
{'destination': cidr,
|
||||
'nexthop': gateway_ip}
|
||||
for vgw_id in route_table.get('propagating_gateways', [])
|
||||
for cidr in (destinations.get(vgw_id, {}).get('vpn_connection', {}).
|
||||
get('cidrs', [])))
|
||||
|
||||
if not subnet_gateway_is_used:
|
||||
# NOTE(andrey-mp): add route to metadata server
|
||||
host_routes.append(
|
||||
{'destination': '169.254.169.254/32',
|
||||
'nexthop': gateway_ip})
|
||||
# NOTE(ft): gateway_ip is set to None to allow correct handling
|
||||
# of 0.0.0.0/0 route by Neutron.
|
||||
gateway_ip = None
|
||||
return host_routes, gateway_ip
|
||||
|
||||
|
||||
def _get_route_target(route):
|
||||
if ec2utils.get_ec2_id_kind(route.get('gateway_id') or '') == 'vgw':
|
||||
return VPN_TARGET
|
||||
else:
|
||||
return HOST_TARGET
|
||||
|
||||
|
||||
def _associate_subnet_item(context, subnet, route_table_id):
|
||||
subnet['route_table_id'] = route_table_id
|
||||
db_api.update_item(context, subnet)
|
||||
|
||||
|
||||
def _disassociate_subnet_item(context, subnet):
|
||||
subnet.pop('route_table_id')
|
||||
db_api.update_item(context, subnet)
|
||||
|
||||
|
||||
def _associate_vpc_item(context, vpc, route_table_id):
|
||||
vpc['route_table_id'] = route_table_id
|
||||
db_api.update_item(context, vpc)
|
||||
|
||||
|
||||
def _append_propagation_to_route_table_item(context, route_table, gateway_id):
|
||||
vgws = route_table.setdefault('propagating_gateways', [])
|
||||
vgws.append(gateway_id)
|
||||
db_api.update_item(context, route_table)
|
||||
|
||||
|
||||
def _remove_propagation_from_route_table_item(context, route_table,
|
||||
gateway_id):
|
||||
vgws = route_table['propagating_gateways']
|
||||
vgws.remove(gateway_id)
|
||||
if not vgws:
|
||||
del route_table['propagating_gateways']
|
||||
db_api.update_item(context, route_table)
|
@ -1,589 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import copy
|
||||
|
||||
try:
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
except ImportError:
|
||||
pass # clients will log absense of neutronclient in this case
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import validator
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Security Groups related API implementation
|
||||
"""
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
SECURITY_GROUP_MAP = {'domain-name-servers': 'dns-servers',
|
||||
'domain-name': 'domain-name',
|
||||
'ntp-servers': 'ntp-server',
|
||||
'netbios-name-servers': 'netbios-ns',
|
||||
'netbios-node-type': 'netbios-nodetype'}
|
||||
|
||||
DEFAULT_GROUP_NAME = 'default'
|
||||
|
||||
|
||||
def get_security_group_engine():
|
||||
return SecurityGroupEngineNeutron()
|
||||
|
||||
|
||||
def create_security_group(context, group_name, group_description,
|
||||
vpc_id=None):
|
||||
if group_name == DEFAULT_GROUP_NAME:
|
||||
if vpc_id:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Cannot use reserved security group name: %s')
|
||||
% DEFAULT_GROUP_NAME)
|
||||
else:
|
||||
raise exception.InvalidGroupReserved(group_name=group_name)
|
||||
filter = [{'name': 'group-name',
|
||||
'value': [group_name]}]
|
||||
if not vpc_id and CONF.disable_ec2_classic:
|
||||
vpc_id = ec2utils.get_default_vpc(context)['id']
|
||||
if vpc_id and group_name != vpc_id:
|
||||
filter.append({'name': 'vpc-id',
|
||||
'value': [vpc_id]})
|
||||
security_groups = describe_security_groups(
|
||||
context, filter=filter)['securityGroupInfo']
|
||||
if not vpc_id:
|
||||
# TODO(andrey-mp): remove it when fitering by None will be implemented
|
||||
security_groups = [sg for sg in security_groups
|
||||
if sg.get('vpcId') is None]
|
||||
if security_groups:
|
||||
raise exception.InvalidGroupDuplicate(name=group_name)
|
||||
return _create_security_group(context, group_name, group_description,
|
||||
vpc_id)
|
||||
|
||||
|
||||
def _create_security_group(context, group_name, group_description,
|
||||
vpc_id=None, default=False):
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
try:
|
||||
secgroup_body = (
|
||||
{'security_group': {'name': group_name,
|
||||
'description': group_description}})
|
||||
os_security_group = neutron.create_security_group(
|
||||
secgroup_body)['security_group']
|
||||
except neutron_exception.OverQuotaClient:
|
||||
raise exception.ResourceLimitExceeded(resource='security groups')
|
||||
cleaner.addCleanup(neutron.delete_security_group,
|
||||
os_security_group['id'])
|
||||
if vpc_id:
|
||||
# NOTE(Alex) Check if such vpc exists
|
||||
ec2utils.get_db_item(context, vpc_id)
|
||||
item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']}
|
||||
if not default:
|
||||
security_group = db_api.add_item(context, 'sg', item)
|
||||
else:
|
||||
item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
|
||||
# NOTE(andrey-mp): try to add item with specific id
|
||||
# and catch exception if it exists
|
||||
security_group = db_api.restore_item(context, 'sg', item)
|
||||
return {'return': 'true',
|
||||
'groupId': security_group['id']}
|
||||
|
||||
|
||||
def _create_default_security_group(context, vpc):
|
||||
# NOTE(Alex): OpenStack doesn't allow creation of another group
|
||||
# named 'default' hence vpc-id is used.
|
||||
try:
|
||||
sg_id = _create_security_group(context, vpc['id'],
|
||||
'Default VPC security group', vpc['id'],
|
||||
default=True)['groupId']
|
||||
except (exception.EC2DBDuplicateEntry, exception.InvalidVpcIDNotFound):
|
||||
# NOTE(andrey-mp): when this thread tries to recreate default group
|
||||
# but another thread tries to delete vpc we should pass vpc not found
|
||||
LOG.exception('Failed to create default security group.')
|
||||
return None
|
||||
return sg_id
|
||||
|
||||
|
||||
def delete_security_group(context, group_name=None, group_id=None,
|
||||
delete_default=False):
|
||||
if group_name is None and group_id is None:
|
||||
raise exception.MissingParameter(param='group id or name')
|
||||
security_group_engine.delete_group(context, group_name, group_id,
|
||||
delete_default)
|
||||
return True
|
||||
|
||||
|
||||
class SecurityGroupDescriber(common.TaggableItemsDescriber):
|
||||
|
||||
KIND = 'sg'
|
||||
FILTER_MAP = {'description': 'groupDescription',
|
||||
'group-id': 'groupId',
|
||||
'group-name': 'groupName',
|
||||
'ip-permission.cidr': ['ipPermissions',
|
||||
['ipRanges', 'cidrIp']],
|
||||
'ip-permission.from-port': ['ipPermissions', 'fromPort'],
|
||||
'ip-permission.group-id': ['ipPermissions',
|
||||
['groups', 'groupId']],
|
||||
'ip-permission.group-name': ['ipPermissions',
|
||||
['groups', 'groupName']],
|
||||
'ip-permission.protocol': ['ipPermissions', 'ipProtocol'],
|
||||
'ip-permission.to-port': ['ipPermissions', 'toPort'],
|
||||
'ip-permission.user-id': ['ipPermissions',
|
||||
['groups', 'userId']],
|
||||
'owner-id': 'ownerId',
|
||||
'vpc-id': 'vpcId',
|
||||
}
|
||||
|
||||
def __init__(self, default_vpc_id):
|
||||
super(SecurityGroupDescriber, self).__init__()
|
||||
self.all_db_items = None
|
||||
self.default_vpc_id = default_vpc_id
|
||||
|
||||
def format(self, item=None, os_item=None):
|
||||
return _format_security_group(item, os_item,
|
||||
self.all_db_items, self.os_items)
|
||||
|
||||
def get_os_items(self):
|
||||
if self.all_db_items is None:
|
||||
self.all_db_items = db_api.get_items(self.context, 'sg')
|
||||
os_groups = security_group_engine.get_os_groups(self.context)
|
||||
if self.check_and_repair_default_groups(os_groups, self.all_db_items):
|
||||
self.all_db_items = db_api.get_items(self.context, 'sg')
|
||||
self.items = self.get_db_items()
|
||||
os_groups = security_group_engine.get_os_groups(self.context)
|
||||
for os_group in os_groups:
|
||||
os_group['name'] = _translate_group_name(self.context,
|
||||
os_group,
|
||||
self.all_db_items)
|
||||
return os_groups
|
||||
|
||||
def check_and_repair_default_groups(self, os_groups, db_groups):
|
||||
vpcs = ec2utils.get_db_items(self.context, 'vpc', None)
|
||||
os_groups_dict = {g['name']: g['id'] for g in os_groups}
|
||||
db_groups_dict = {g['os_id']: g['vpc_id'] for g in db_groups}
|
||||
had_to_repair = False
|
||||
for vpc in vpcs:
|
||||
os_group = os_groups_dict.get(vpc['id'])
|
||||
if os_group:
|
||||
db_group = db_groups_dict.get(os_group)
|
||||
if db_group and db_group == vpc['id']:
|
||||
continue
|
||||
result = _create_default_security_group(self.context, vpc)
|
||||
if result:
|
||||
had_to_repair = True
|
||||
return had_to_repair
|
||||
|
||||
def is_selected_item(self, context, os_item_name, item):
|
||||
if item and item['id'] in self.ids:
|
||||
return True
|
||||
if os_item_name in self.names:
|
||||
if not CONF.disable_ec2_classic:
|
||||
return (not item or not item['vpc_id'])
|
||||
else:
|
||||
return (self.default_vpc_id and item and
|
||||
item['vpc_id'] == self.default_vpc_id)
|
||||
return False
|
||||
|
||||
|
||||
def describe_security_groups(context, group_name=None, group_id=None,
|
||||
filter=None):
|
||||
default_vpc_id = None
|
||||
default_vpc = ec2utils.check_and_create_default_vpc(context)
|
||||
if default_vpc:
|
||||
default_vpc_id = default_vpc['id']
|
||||
formatted_security_groups = SecurityGroupDescriber(
|
||||
default_vpc_id).describe(context, group_id, group_name, filter)
|
||||
return {'securityGroupInfo': formatted_security_groups}
|
||||
|
||||
|
||||
# TODO(Alex) cidr/ports/protocol/source_group should be possible
|
||||
# to pass in root set of parameters, not in ip_permissions as now only
|
||||
# supported, for authorize and revoke functions.
|
||||
# The new parameters appeared only in the very recent version of AWS doc.
|
||||
# API version 2014-06-15 didn't claim support of it.
|
||||
|
||||
def authorize_security_group_ingress(context, group_id=None,
|
||||
group_name=None, ip_permissions=None):
|
||||
if group_name and not group_id and CONF.disable_ec2_classic:
|
||||
sg = describe_security_groups(
|
||||
context,
|
||||
group_name=[group_name])['securityGroupInfo'][0]
|
||||
group_id = sg['groupId']
|
||||
group_name = None
|
||||
return _authorize_security_group(context, group_id, group_name,
|
||||
ip_permissions, 'ingress')
|
||||
|
||||
|
||||
def authorize_security_group_egress(context, group_id, ip_permissions=None):
|
||||
security_group = ec2utils.get_db_item(context, group_id)
|
||||
if not security_group.get('vpc_id'):
|
||||
raise exception.InvalidParameterValue(message=_('Only Amazon VPC '
|
||||
'security groups may be used with this operation.'))
|
||||
return _authorize_security_group(context, group_id, None,
|
||||
ip_permissions, 'egress')
|
||||
|
||||
|
||||
def _authorize_security_group(context, group_id, group_name,
|
||||
ip_permissions, direction):
|
||||
rules_bodies = _build_rules(context, group_id, group_name,
|
||||
ip_permissions, direction)
|
||||
for rule_body in rules_bodies:
|
||||
security_group_engine.authorize_security_group(context, rule_body)
|
||||
return True
|
||||
|
||||
|
||||
def _validate_parameters(protocol, from_port, to_port):
|
||||
if (not isinstance(protocol, int) and
|
||||
protocol not in ['tcp', 'udp', 'icmp']):
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Invalid value for IP protocol. Unknown protocol.'))
|
||||
if (not isinstance(from_port, int) or
|
||||
not isinstance(to_port, int)):
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Integer values should be specified for ports'))
|
||||
if protocol in ['tcp', 'udp', 6, 17]:
|
||||
if from_port == -1 or to_port == -1:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Must specify both from and to ports with TCP/UDP.'))
|
||||
if from_port > to_port:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Invalid TCP/UDP port range.'))
|
||||
if from_port < 0 or from_port > 65535:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('TCP/UDP from port is out of range.'))
|
||||
if to_port < 0 or to_port > 65535:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('TCP/UDP to port is out of range.'))
|
||||
elif protocol in ['icmp', 1]:
|
||||
if from_port < -1 or from_port > 255:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('ICMP type is out of range.'))
|
||||
if to_port < -1 or to_port > 255:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('ICMP code is out of range.'))
|
||||
|
||||
|
||||
def _build_rules(context, group_id, group_name, ip_permissions, direction):
|
||||
if group_name is None and group_id is None:
|
||||
raise exception.MissingParameter(param='group id or name')
|
||||
if ip_permissions is None:
|
||||
raise exception.MissingParameter(param='source group or cidr')
|
||||
os_security_group_id = security_group_engine.get_group_os_id(context,
|
||||
group_id,
|
||||
group_name)
|
||||
os_security_group_rule_bodies = []
|
||||
if ip_permissions is None:
|
||||
ip_permissions = []
|
||||
for rule in ip_permissions:
|
||||
os_security_group_rule_body = (
|
||||
{'security_group_id': os_security_group_id,
|
||||
'direction': direction,
|
||||
'ethertype': 'IPv4'})
|
||||
protocol = rule.get('ip_protocol', -1)
|
||||
from_port = rule.get('from_port', -1)
|
||||
to_port = rule.get('to_port', -1)
|
||||
_validate_parameters(protocol, from_port, to_port)
|
||||
if protocol != -1:
|
||||
os_security_group_rule_body['protocol'] = rule['ip_protocol']
|
||||
if from_port != -1:
|
||||
os_security_group_rule_body['port_range_min'] = rule['from_port']
|
||||
if to_port != -1:
|
||||
os_security_group_rule_body['port_range_max'] = rule['to_port']
|
||||
# NOTE(Dmitry_Eremeev): Neutron behaviour changed.
|
||||
# If rule with full port range is created (1 - 65535), then Neutron
|
||||
# creates rule without ports specified.
|
||||
# If a rule with full port range must be deleted, then Neutron cannot
|
||||
# find a rule with this range in order to delete it, but it can find
|
||||
# a rule which has not ports in its properties.
|
||||
if ((from_port == 1) and (to_port in [255, 65535])):
|
||||
for item in ['port_range_min', 'port_range_max']:
|
||||
del os_security_group_rule_body[item]
|
||||
# TODO(Alex) AWS protocol claims support of multiple groups and cidrs,
|
||||
# however, neutron doesn't support it at the moment.
|
||||
# It's possible in the future to convert list values incoming from
|
||||
# REST API into several neutron rules and squeeze them back into one
|
||||
# for describing.
|
||||
# For now only 1 value is supported for either.
|
||||
if rule.get('groups'):
|
||||
os_security_group_rule_body['remote_group_id'] = (
|
||||
security_group_engine.get_group_os_id(
|
||||
context,
|
||||
rule['groups'][0].get('group_id'),
|
||||
rule['groups'][0].get('group_name')))
|
||||
elif rule.get('ip_ranges'):
|
||||
os_security_group_rule_body['remote_ip_prefix'] = (
|
||||
rule['ip_ranges'][0]['cidr_ip'])
|
||||
validator.validate_cidr_with_ipv6(
|
||||
os_security_group_rule_body['remote_ip_prefix'], 'cidr_ip')
|
||||
else:
|
||||
raise exception.MissingParameter(param='source group or cidr')
|
||||
os_security_group_rule_bodies.append(os_security_group_rule_body)
|
||||
return os_security_group_rule_bodies
|
||||
|
||||
|
||||
def revoke_security_group_ingress(context, group_id=None,
|
||||
group_name=None, ip_permissions=None):
|
||||
return _revoke_security_group(context, group_id, group_name,
|
||||
ip_permissions, 'ingress')
|
||||
|
||||
|
||||
def revoke_security_group_egress(context, group_id, ip_permissions=None):
|
||||
security_group = ec2utils.get_db_item(context, group_id)
|
||||
if not security_group.get('vpc_id'):
|
||||
raise exception.InvalidParameterValue(message=_('Only Amazon VPC '
|
||||
'security groups may be used with this operation.'))
|
||||
return _revoke_security_group(context, group_id, None,
|
||||
ip_permissions, 'egress')
|
||||
|
||||
|
||||
def _are_identical_rules(rule1, rule2):
|
||||
|
||||
def significant_values(rule):
|
||||
dict = {}
|
||||
for key, value in rule.items():
|
||||
if (value is not None and value != -1 and
|
||||
value != '0.0.0.0/0' and
|
||||
key not in ['id', 'tenant_id', 'security_group_id', 'tags',
|
||||
'description', 'revision', 'revision_number',
|
||||
'created_at', 'updated_at', 'project_id']):
|
||||
dict[key] = str(value)
|
||||
return dict
|
||||
|
||||
r1 = significant_values(rule1)
|
||||
r2 = significant_values(rule2)
|
||||
return r1 == r2
|
||||
|
||||
|
||||
def _revoke_security_group(context, group_id, group_name, ip_permissions,
|
||||
direction):
|
||||
rules_bodies = _build_rules(context, group_id, group_name,
|
||||
ip_permissions, direction)
|
||||
if not rules_bodies:
|
||||
return True
|
||||
os_rules = security_group_engine.get_os_group_rules(
|
||||
context, rules_bodies[0]['security_group_id'])
|
||||
|
||||
os_rules_to_delete = []
|
||||
for rule_body in rules_bodies:
|
||||
for os_rule in os_rules:
|
||||
if _are_identical_rules(rule_body, os_rule):
|
||||
os_rules_to_delete.append(os_rule['id'])
|
||||
|
||||
if len(os_rules_to_delete) != len(rules_bodies):
|
||||
security_group = ec2utils.get_db_item(context, group_id)
|
||||
if security_group.get('vpc_id'):
|
||||
raise exception.InvalidPermissionNotFound()
|
||||
return True
|
||||
for os_rule_id in os_rules_to_delete:
|
||||
security_group_engine.delete_os_group_rule(context, os_rule_id)
|
||||
return True
|
||||
|
||||
|
||||
def _translate_group_name(context, os_group, db_groups):
|
||||
# NOTE(Alex): This function translates VPC default group names
|
||||
# from vpc id 'vpc-xxxxxxxx' format to 'default'. It's supposed
|
||||
# to be called right after getting security groups from OpenStack
|
||||
# in order to avoid problems with incoming 'default' name value
|
||||
# in all of the subsequent handling (filtering, using in parameters...)
|
||||
if os_group['name'].startswith('vpc-') and db_groups:
|
||||
db_group = next((g for g in db_groups
|
||||
if g['os_id'] == os_group['id']), None)
|
||||
if db_group and db_group.get('vpc_id'):
|
||||
return DEFAULT_GROUP_NAME
|
||||
return os_group['name']
|
||||
|
||||
|
||||
def _format_security_groups_ids_names(context):
|
||||
neutron = clients.neutron(context)
|
||||
os_security_groups = neutron.list_security_groups(
|
||||
tenant_id=context.project_id)['security_groups']
|
||||
security_groups = db_api.get_items(context, 'sg')
|
||||
ec2_security_groups = {}
|
||||
for os_security_group in os_security_groups:
|
||||
security_group = next((g for g in security_groups
|
||||
if g['os_id'] == os_security_group['id']), None)
|
||||
if security_group is None:
|
||||
continue
|
||||
ec2_security_groups[os_security_group['id']] = (
|
||||
{'groupId': security_group['id'],
|
||||
'groupName': _translate_group_name(context,
|
||||
os_security_group,
|
||||
security_groups)})
|
||||
return ec2_security_groups
|
||||
|
||||
|
||||
def _format_security_group(security_group, os_security_group,
|
||||
security_groups, os_security_groups):
|
||||
ec2_security_group = {}
|
||||
ec2_security_group['groupId'] = security_group['id']
|
||||
if security_group.get('vpc_id'):
|
||||
ec2_security_group['vpcId'] = security_group['vpc_id']
|
||||
ec2_security_group['ownerId'] = os_security_group['tenant_id']
|
||||
ec2_security_group['groupName'] = os_security_group['name']
|
||||
ec2_security_group['groupDescription'] = os_security_group['description']
|
||||
ingress_permissions = []
|
||||
egress_permissions = []
|
||||
for os_rule in os_security_group.get('security_group_rules', []):
|
||||
# NOTE(Alex) We're skipping IPv6 rules because AWS doesn't support
|
||||
# them.
|
||||
if os_rule.get('ethertype', 'IPv4') == 'IPv6':
|
||||
continue
|
||||
# NOTE(Dmitry_Eremeev): Neutron behaviour changed.
|
||||
# If rule with full port range (except icmp protocol) is created
|
||||
# (1 - 65535), then Neutron creates rule without ports specified.
|
||||
# Ports passed for rule creation don't match ports in created rule.
|
||||
# That's why default values were changed to match full port
|
||||
# range (1 - 65535)
|
||||
if os_rule.get('protocol') in ["icmp", 1]:
|
||||
min_port = max_port = -1
|
||||
else:
|
||||
min_port = 1
|
||||
max_port = 65535
|
||||
ec2_rule = {'ipProtocol': -1 if os_rule['protocol'] is None
|
||||
else os_rule['protocol'],
|
||||
'fromPort': min_port if os_rule['port_range_min'] is None
|
||||
else os_rule['port_range_min'],
|
||||
'toPort': max_port if os_rule['port_range_max'] is None
|
||||
else os_rule['port_range_max']}
|
||||
remote_group_id = os_rule['remote_group_id']
|
||||
if remote_group_id is not None:
|
||||
ec2_remote_group = {}
|
||||
db_remote_group = next((g for g in security_groups
|
||||
if g['os_id'] == remote_group_id), None)
|
||||
if db_remote_group is not None:
|
||||
ec2_remote_group['groupId'] = db_remote_group['id']
|
||||
else:
|
||||
# TODO(Alex) Log absence of remote_group
|
||||
pass
|
||||
os_remote_group = next((g for g in os_security_groups
|
||||
if g['id'] == remote_group_id), None)
|
||||
if os_remote_group is not None:
|
||||
ec2_remote_group['groupName'] = os_remote_group['name']
|
||||
ec2_remote_group['userId'] = os_remote_group['tenant_id']
|
||||
else:
|
||||
# TODO(Alex) Log absence of remote_group
|
||||
pass
|
||||
ec2_rule['groups'] = [ec2_remote_group]
|
||||
elif os_rule['remote_ip_prefix'] is not None:
|
||||
ec2_rule['ipRanges'] = [{'cidrIp': os_rule['remote_ip_prefix']}]
|
||||
if os_rule.get('direction') == 'egress':
|
||||
egress_permissions.append(ec2_rule)
|
||||
else:
|
||||
if security_group is None and os_rule['protocol'] is None:
|
||||
for protocol, min_port, max_port in (('icmp', -1, -1),
|
||||
('tcp', 1, 65535),
|
||||
('udp', 1, 65535)):
|
||||
ec2_rule['ipProtocol'] = protocol
|
||||
ec2_rule['fromPort'] = min_port
|
||||
ec2_rule['toPort'] = max_port
|
||||
ingress_permissions.append(copy.deepcopy(ec2_rule))
|
||||
else:
|
||||
ingress_permissions.append(ec2_rule)
|
||||
|
||||
ec2_security_group['ipPermissions'] = ingress_permissions
|
||||
ec2_security_group['ipPermissionsEgress'] = egress_permissions
|
||||
return ec2_security_group
|
||||
|
||||
|
||||
class SecurityGroupEngineNeutron(object):
|
||||
|
||||
def delete_group(self, context, group_name=None, group_id=None,
|
||||
delete_default=False):
|
||||
neutron = clients.neutron(context)
|
||||
if group_name:
|
||||
sg = describe_security_groups(
|
||||
context,
|
||||
group_name=[group_name])['securityGroupInfo'][0]
|
||||
group_id = sg['groupId']
|
||||
group_name = None
|
||||
|
||||
security_group = ec2utils.get_db_item(context, group_id)
|
||||
try:
|
||||
if not delete_default:
|
||||
os_security_group = neutron.show_security_group(
|
||||
security_group['os_id'])
|
||||
if (os_security_group and
|
||||
os_security_group['security_group']['name'] ==
|
||||
security_group['vpc_id']):
|
||||
raise exception.CannotDelete()
|
||||
neutron.delete_security_group(security_group['os_id'])
|
||||
except neutron_exception.Conflict as ex:
|
||||
# TODO(Alex): Instance ID is unknown here, report exception message
|
||||
# in its place - looks readable.
|
||||
raise exception.DependencyViolation(
|
||||
obj1_id=group_id,
|
||||
obj2_id=ex.message)
|
||||
except neutron_exception.NeutronClientException as ex:
|
||||
# TODO(Alex): do log error
|
||||
# TODO(Alex): adjust caught exception classes to catch:
|
||||
# the port doesn't exist
|
||||
pass
|
||||
db_api.delete_item(context, group_id)
|
||||
|
||||
def get_os_groups(self, context):
|
||||
neutron = clients.neutron(context)
|
||||
return neutron.list_security_groups(
|
||||
tenant_id=context.project_id)['security_groups']
|
||||
|
||||
def authorize_security_group(self, context, rule_body):
|
||||
neutron = clients.neutron(context)
|
||||
try:
|
||||
os_security_group_rule = neutron.create_security_group_rule(
|
||||
{'security_group_rule': rule_body})['security_group_rule']
|
||||
except neutron_exception.OverQuotaClient:
|
||||
raise exception.RulesPerSecurityGroupLimitExceeded()
|
||||
except neutron_exception.Conflict as ex:
|
||||
raise exception.InvalidPermissionDuplicate()
|
||||
|
||||
def get_os_group_rules(self, context, os_id):
|
||||
neutron = clients.neutron(context)
|
||||
os_security_group = (
|
||||
neutron.show_security_group(os_id)['security_group'])
|
||||
return os_security_group.get('security_group_rules')
|
||||
|
||||
def delete_os_group_rule(self, context, os_id):
|
||||
neutron = clients.neutron(context)
|
||||
neutron.delete_security_group_rule(os_id)
|
||||
|
||||
def get_group_os_id(self, context, group_id, group_name):
|
||||
if group_name and not group_id:
|
||||
os_group = self.get_os_group_by_name(context, group_name)
|
||||
return str(os_group['id'])
|
||||
return ec2utils.get_db_item(context, group_id, 'sg')['os_id']
|
||||
|
||||
def get_os_group_by_name(self, context, group_name,
|
||||
os_security_groups=None):
|
||||
if os_security_groups is None:
|
||||
neutron = clients.neutron(context)
|
||||
os_security_groups = (
|
||||
neutron.list_security_groups()['security_groups'])
|
||||
os_group = next((g for g in os_security_groups
|
||||
if g['name'] == group_name), None)
|
||||
if os_group is None:
|
||||
raise exception.InvalidGroupNotFound(id=group_name)
|
||||
return os_group
|
||||
|
||||
|
||||
security_group_engine = get_security_group_engine()
|
@ -1,155 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
"""Snapshot related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_snapshot(context, volume_id, description=None):
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
cinder = clients.cinder(context)
|
||||
os_volume = cinder.volumes.get(volume['os_id'])
|
||||
# NOTE(ft): Easy fix to allow snapshot creation in statuses other than
|
||||
# AVAILABLE without cinder modifications. Potential race condition
|
||||
# though. Seems arguably non-fatal.
|
||||
if os_volume.status not in ['available', 'in-use',
|
||||
'attaching', 'detaching']:
|
||||
msg = (_("'%s' is not in a state where snapshots are allowed.") %
|
||||
volume_id)
|
||||
raise exception.IncorrectState(reason=msg)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_snapshot = cinder.volume_snapshots.create(os_volume.id, True)
|
||||
cleaner.addCleanup(os_snapshot.delete)
|
||||
snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id})
|
||||
cleaner.addCleanup(db_api.delete_item, context, snapshot['id'])
|
||||
os_snapshot.update(display_name=snapshot['id'],
|
||||
display_description=description)
|
||||
# NOTE(andrey-mp): to re-read description in version dependent format
|
||||
os_snapshot.get()
|
||||
|
||||
return _format_snapshot(context, snapshot, os_snapshot,
|
||||
volume_id=volume_id)
|
||||
|
||||
|
||||
def delete_snapshot(context, snapshot_id):
|
||||
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
||||
cinder = clients.cinder(context)
|
||||
try:
|
||||
cinder.volume_snapshots.delete(snapshot['os_id'])
|
||||
except cinder_exception.NotFound:
|
||||
pass
|
||||
# NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud
|
||||
# It will be deleted by describer in the future
|
||||
return True
|
||||
|
||||
|
||||
class SnapshotDescriber(common.TaggableItemsDescriber):
|
||||
|
||||
KIND = 'snap'
|
||||
SORT_KEY = 'snapshotId'
|
||||
FILTER_MAP = {'description': 'description',
|
||||
'owner-id': 'ownerId',
|
||||
'progress': 'progress',
|
||||
'snapshot-id': 'snapshotId',
|
||||
'start-time': 'startTime',
|
||||
'status': 'status',
|
||||
'volume-id': 'volumeId',
|
||||
'volume-size': 'volumeSize'}
|
||||
|
||||
def format(self, snapshot, os_snapshot):
|
||||
return _format_snapshot(self.context, snapshot, os_snapshot,
|
||||
self.volumes)
|
||||
|
||||
def get_db_items(self):
|
||||
self.volumes = {vol['os_id']: vol
|
||||
for vol in db_api.get_items(self.context, 'vol')}
|
||||
return super(SnapshotDescriber, self).get_db_items()
|
||||
|
||||
def get_os_items(self):
|
||||
return clients.cinder(self.context).volume_snapshots.list()
|
||||
|
||||
def get_name(self, os_item):
|
||||
return ''
|
||||
|
||||
|
||||
def describe_snapshots(context, snapshot_id=None, owner=None,
|
||||
restorable_by=None, filter=None,
|
||||
max_results=None, next_token=None):
|
||||
if snapshot_id and max_results:
|
||||
msg = _('The parameter snapshotSet cannot be used with the parameter '
|
||||
'maxResults')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
|
||||
snapshot_describer = SnapshotDescriber()
|
||||
formatted_snapshots = snapshot_describer.describe(
|
||||
context, ids=snapshot_id, filter=filter,
|
||||
max_results=max_results, next_token=next_token)
|
||||
result = {'snapshotSet': formatted_snapshots}
|
||||
if snapshot_describer.next_token:
|
||||
result['nextToken'] = snapshot_describer.next_token
|
||||
return result
|
||||
|
||||
|
||||
def _format_snapshot(context, snapshot, os_snapshot, volumes={},
|
||||
volume_id=None):
|
||||
# NOTE(mikal): this is just a set of strings in cinder. If they
|
||||
# implement an enum, then we should move this code to use it. The
|
||||
# valid ec2 statuses are "pending", "completed", and "error".
|
||||
status_map = {'new': 'pending',
|
||||
'creating': 'pending',
|
||||
'available': 'completed',
|
||||
'active': 'completed',
|
||||
'deleting': 'pending',
|
||||
'deleted': None,
|
||||
'error': 'error'}
|
||||
|
||||
mapped_status = status_map.get(os_snapshot.status, os_snapshot.status)
|
||||
if not mapped_status:
|
||||
return None
|
||||
|
||||
if not volume_id and os_snapshot.volume_id:
|
||||
volume = ec2utils.get_db_item_by_os_id(
|
||||
context, 'vol', os_snapshot.volume_id, volumes)
|
||||
volume_id = volume['id']
|
||||
|
||||
# NOTE(andrey-mp): ownerId and progress are empty in just created snapshot
|
||||
ownerId = os_snapshot.project_id
|
||||
if not ownerId:
|
||||
ownerId = context.project_id
|
||||
progress = os_snapshot.progress
|
||||
if not progress:
|
||||
progress = '0%'
|
||||
description = (getattr(os_snapshot, 'description', None) or
|
||||
getattr(os_snapshot, 'display_description', None))
|
||||
return {'snapshotId': snapshot['id'],
|
||||
'volumeId': volume_id,
|
||||
'status': mapped_status,
|
||||
'startTime': os_snapshot.created_at,
|
||||
'progress': progress,
|
||||
'ownerId': ownerId,
|
||||
'volumeSize': os_snapshot.size,
|
||||
'description': description}
|
@ -1,209 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import netaddr
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import network_interface as network_interface_api
|
||||
from ec2api.api import route_table as route_table_api
|
||||
from ec2api.api import vpn_gateway as vpn_gateway_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Subnet related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_subnet(context, vpc_id, cidr_block,
|
||||
availability_zone=None):
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
|
||||
subnet_ipnet = netaddr.IPNetwork(cidr_block)
|
||||
if subnet_ipnet not in vpc_ipnet:
|
||||
raise exception.InvalidSubnetRange(cidr_block=cidr_block)
|
||||
|
||||
main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
|
||||
(host_routes,
|
||||
gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip(
|
||||
context, main_route_table, cidr_block)
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
# NOTE(andrey-mp): set fake name to filter networks in instance api
|
||||
os_network_body = {'network': {'name': 'subnet-0'}}
|
||||
try:
|
||||
os_network = neutron.create_network(os_network_body)['network']
|
||||
cleaner.addCleanup(neutron.delete_network, os_network['id'])
|
||||
# NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
|
||||
# OpenStack we decided not to support this as compatibility.
|
||||
os_subnet_body = {'subnet': {'network_id': os_network['id'],
|
||||
'ip_version': '4',
|
||||
'cidr': cidr_block,
|
||||
'host_routes': host_routes}}
|
||||
os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
|
||||
cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
|
||||
except neutron_exception.OverQuotaClient:
|
||||
raise exception.SubnetLimitExceeded()
|
||||
try:
|
||||
neutron.add_interface_router(vpc['os_id'],
|
||||
{'subnet_id': os_subnet['id']})
|
||||
except neutron_exception.BadRequest:
|
||||
raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
|
||||
cleaner.addCleanup(neutron.remove_interface_router,
|
||||
vpc['os_id'], {'subnet_id': os_subnet['id']})
|
||||
subnet = db_api.add_item(context, 'subnet',
|
||||
{'os_id': os_subnet['id'],
|
||||
'vpc_id': vpc['id']})
|
||||
cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
|
||||
vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner,
|
||||
subnet, vpc, main_route_table)
|
||||
neutron.update_network(os_network['id'],
|
||||
{'network': {'name': subnet['id']}})
|
||||
# NOTE(ft): In some cases we need gateway_ip to be None (see
|
||||
# _get_subnet_host_routes_and_gateway_ip). It's not set during subnet
|
||||
# creation to allow automatic configuration of the default port by
|
||||
# which subnet is attached to the router.
|
||||
neutron.update_subnet(os_subnet['id'],
|
||||
{'subnet': {'name': subnet['id'],
|
||||
'gateway_ip': gateway_ip}})
|
||||
os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
|
||||
return {'subnet': _format_subnet(context, subnet, os_subnet,
|
||||
os_network, os_ports)}
|
||||
|
||||
|
||||
def delete_subnet(context, subnet_id):
|
||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||
vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
|
||||
network_interfaces = network_interface_api.describe_network_interfaces(
|
||||
context,
|
||||
filter=[{'name': 'subnet-id',
|
||||
'value': [subnet_id]}])['networkInterfaceSet']
|
||||
if network_interfaces:
|
||||
msg = _("The subnet '%(subnet_id)s' has dependencies and "
|
||||
"cannot be deleted.") % {'subnet_id': subnet_id}
|
||||
raise exception.DependencyViolation(msg)
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.delete_item(context, subnet['id'])
|
||||
cleaner.addCleanup(db_api.restore_item, context, 'subnet', subnet)
|
||||
vpn_gateway_api._stop_vpn_in_subnet(context, neutron, cleaner, subnet)
|
||||
try:
|
||||
neutron.remove_interface_router(vpc['os_id'],
|
||||
{'subnet_id': subnet['os_id']})
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
cleaner.addCleanup(neutron.add_interface_router,
|
||||
vpc['os_id'],
|
||||
{'subnet_id': subnet['os_id']})
|
||||
try:
|
||||
os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
neutron.delete_network(os_subnet['network_id'])
|
||||
except neutron_exception.NetworkInUseClient as ex:
|
||||
LOG.warning('Failed to delete network %(os_id)s during '
|
||||
'deleting Subnet %(id)s. Reason: %(reason)s',
|
||||
{'id': subnet['id'],
|
||||
'os_id': os_subnet['network_id'],
|
||||
'reason': ex.message})
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class SubnetDescriber(common.TaggableItemsDescriber):
|
||||
|
||||
KIND = 'subnet'
|
||||
FILTER_MAP = {'available-ip-address-count': 'availableIpAddressCount',
|
||||
'cidr': 'cidrBlock',
|
||||
'cidrBlock': 'cidrBlock',
|
||||
'cidr-block': 'cidrBlock',
|
||||
'subnet-id': 'subnetId',
|
||||
'state': 'state',
|
||||
'vpc-id': 'vpcId'}
|
||||
|
||||
def format(self, subnet, os_subnet):
|
||||
if not subnet:
|
||||
return None
|
||||
os_network = next((n for n in self.os_networks
|
||||
if n['id'] == os_subnet['network_id']),
|
||||
None)
|
||||
if not os_network:
|
||||
self.delete_obsolete_item(subnet)
|
||||
return None
|
||||
return _format_subnet(self.context, subnet, os_subnet, os_network,
|
||||
self.os_ports)
|
||||
|
||||
def get_name(self, os_item):
|
||||
return ''
|
||||
|
||||
def get_os_items(self):
|
||||
neutron = clients.neutron(self.context)
|
||||
self.os_networks = neutron.list_networks(
|
||||
tenant_id=self.context.project_id)['networks']
|
||||
self.os_ports = neutron.list_ports(
|
||||
tenant_id=self.context.project_id)['ports']
|
||||
return neutron.list_subnets(
|
||||
tenant_id=self.context.project_id)['subnets']
|
||||
|
||||
|
||||
def describe_subnets(context, subnet_id=None, filter=None):
|
||||
ec2utils.check_and_create_default_vpc(context)
|
||||
formatted_subnets = SubnetDescriber().describe(context, ids=subnet_id,
|
||||
filter=filter)
|
||||
return {'subnetSet': formatted_subnets}
|
||||
|
||||
|
||||
def _format_subnet(context, subnet, os_subnet, os_network, os_ports):
|
||||
status_map = {'ACTIVE': 'available',
|
||||
'BUILD': 'pending',
|
||||
'DOWN': 'available',
|
||||
'ERROR': 'available'}
|
||||
cidr_range = int(os_subnet['cidr'].split('/')[1])
|
||||
# NOTE(Alex) First and last IP addresses are system ones.
|
||||
ip_count = pow(2, 32 - cidr_range) - 2
|
||||
# TODO(Alex): Probably performance-killer. Will have to optimize.
|
||||
service_ports = ['network:dhcp', 'network:distributed']
|
||||
service_port_accounted = False
|
||||
for port in os_ports:
|
||||
for fixed_ip in port.get('fixed_ips', []):
|
||||
if fixed_ip['subnet_id'] == os_subnet['id']:
|
||||
ip_count -= 1
|
||||
if port['device_owner'] in service_ports:
|
||||
service_port_accounted = True
|
||||
if not service_port_accounted:
|
||||
ip_count -= 1
|
||||
return {
|
||||
'subnetId': subnet['id'],
|
||||
'state': status_map.get(os_network['status'], 'available'),
|
||||
'vpcId': subnet['vpc_id'],
|
||||
'cidrBlock': os_subnet['cidr'],
|
||||
'defaultForAz': 'false',
|
||||
'mapPublicIpOnLaunch': 'false',
|
||||
'availableIpAddressCount': ip_count
|
||||
}
|
@ -1,126 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
"""Tag related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
RESOURCE_TYPES = {
|
||||
'dopt': 'dhcp-options',
|
||||
'ami': 'image',
|
||||
'aki': 'image',
|
||||
'ari': 'image',
|
||||
'cgw': 'customer-gateway',
|
||||
'i': 'instance',
|
||||
'igw': 'internet-gateway',
|
||||
'eni': 'network-interface',
|
||||
'rtb': 'route-table',
|
||||
'snap': 'snapshot',
|
||||
'subnet': 'subnet',
|
||||
'sg': 'security-group',
|
||||
'vgw': 'vpn-gateway',
|
||||
'vol': 'volume',
|
||||
'vpc': 'vpc',
|
||||
'vpn': 'vpn-connection',
|
||||
}
|
||||
|
||||
|
||||
def create_tags(context, resource_id, tag):
|
||||
reason = None
|
||||
for tag_pair in tag:
|
||||
if not tag_pair.get('key'):
|
||||
reason = _('Not empty key must be present')
|
||||
elif len(tag_pair['key']) > 127:
|
||||
reason = _('Tag key exceeds the maximum length of 127 characters')
|
||||
elif tag_pair['key'].startswith('aws:'):
|
||||
reason = _("Tag keys starting with 'aws:' are reserved for "
|
||||
"internal use")
|
||||
elif 'value' not in tag_pair:
|
||||
reason = _('Value must be present')
|
||||
elif len(tag_pair['value']) > 255:
|
||||
reason = _('Tag value exceeds the maximum length of 255 '
|
||||
'characters')
|
||||
if reason:
|
||||
raise exception.InvalidParameterValue(
|
||||
parameter='Tag', value=str(tag_pair), reason=reason)
|
||||
|
||||
for item_id in resource_id:
|
||||
kind = ec2utils.get_ec2_id_kind(item_id)
|
||||
if kind not in RESOURCE_TYPES:
|
||||
raise exception.InvalidID(id=item_id)
|
||||
# NOTE(ft): check items exist (excluding images because AWS allows to
|
||||
# create a tag with any image id)
|
||||
if kind not in ('ami', 'ari', 'aki'):
|
||||
ec2utils.get_db_item(context, item_id)
|
||||
|
||||
tags = [dict(item_id=item_id,
|
||||
key=tag_pair['key'],
|
||||
value=tag_pair['value'])
|
||||
for item_id in resource_id
|
||||
for tag_pair in tag]
|
||||
|
||||
db_api.add_tags(context, tags)
|
||||
return True
|
||||
|
||||
|
||||
def delete_tags(context, resource_id, tag=None):
|
||||
db_api.delete_tags(context, resource_id, tag)
|
||||
return True
|
||||
|
||||
|
||||
class TagDescriber(common.NonOpenstackItemsDescriber):
|
||||
|
||||
SORT_KEY = 'key'
|
||||
FILTER_MAP = {'key': 'key',
|
||||
'tag-key': 'key',
|
||||
'resource-id': 'resourceId',
|
||||
'resource-type': 'resourceType',
|
||||
'value': 'value',
|
||||
'tag-value': 'value'}
|
||||
|
||||
def get_db_items(self):
|
||||
return db_api.get_tags(self.context)
|
||||
|
||||
def format(self, item):
|
||||
return _format_tag(item)
|
||||
|
||||
|
||||
def describe_tags(context, filter=None, max_results=None, next_token=None):
|
||||
tag_describer = TagDescriber()
|
||||
formatted_tags = tag_describer.describe(
|
||||
context, filter=filter, max_results=max_results, next_token=next_token)
|
||||
result = {'tagSet': formatted_tags}
|
||||
if tag_describer.next_token:
|
||||
result['nextToken'] = tag_describer.next_token
|
||||
return result
|
||||
|
||||
|
||||
def _format_tag(tag):
|
||||
kind = ec2utils.get_ec2_id_kind(tag['item_id'])
|
||||
return {
|
||||
'resourceType': RESOURCE_TYPES.get(kind, kind),
|
||||
'resourceId': tag['item_id'],
|
||||
'key': tag['key'],
|
||||
'value': tag['value'],
|
||||
}
|
@ -1,230 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validate_str(val, parameter_name, max_length=None):
|
||||
if (isinstance(val, str) and
|
||||
(max_length is None or max_length and len(val) <= max_length)):
|
||||
return True
|
||||
raise exception.ValidationError(
|
||||
reason=_("%s should not be greater "
|
||||
"than 255 characters.") % parameter_name)
|
||||
|
||||
|
||||
def validate_bool(val, parameter_name):
|
||||
if isinstance(val, bool):
|
||||
return True
|
||||
raise exception.ValidationError(
|
||||
reason=_("Expected a boolean value for parameter %s") % parameter_name)
|
||||
|
||||
|
||||
def validate_int(val, parameter_name):
|
||||
if isinstance(val, int):
|
||||
return True
|
||||
raise exception.ValidationError(
|
||||
reason=(_("Expected an integer value for parameter %s") %
|
||||
parameter_name))
|
||||
|
||||
|
||||
def validate_list(items, parameter_name):
|
||||
if not isinstance(items, list):
|
||||
raise exception.InvalidParameterValue(
|
||||
value=items,
|
||||
parameter=parameter_name,
|
||||
reason='Expected a list here')
|
||||
|
||||
|
||||
def _is_valid_cidr(address):
|
||||
"""Check if address is valid
|
||||
|
||||
The provided address can be a IPv6 or a IPv4
|
||||
CIDR address.
|
||||
"""
|
||||
try:
|
||||
# Validate the correct CIDR Address
|
||||
netaddr.IPNetwork(address)
|
||||
except netaddr.core.AddrFormatError:
|
||||
return False
|
||||
except UnboundLocalError:
|
||||
# NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
|
||||
# https://github.com/drkjam/netaddr/issues/2)
|
||||
return False
|
||||
|
||||
# Prior validation partially verify /xx part
|
||||
# Verify it here
|
||||
ip_segment = address.split('/')
|
||||
|
||||
if (len(ip_segment) <= 1 or
|
||||
ip_segment[1] == ''):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def validate_cidr_with_ipv6(cidr, parameter_name, **kwargs):
|
||||
invalid_format_exception = exception.InvalidParameterValue(
|
||||
value=cidr,
|
||||
parameter=parameter_name,
|
||||
reason='This is not a valid CIDR block.')
|
||||
if not _is_valid_cidr(cidr):
|
||||
raise invalid_format_exception
|
||||
return True
|
||||
|
||||
|
||||
_cidr_re = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$")
|
||||
|
||||
|
||||
def validate_cidr(cidr, parameter_name):
|
||||
invalid_format_exception = exception.InvalidParameterValue(
|
||||
value=cidr,
|
||||
parameter=parameter_name,
|
||||
reason='This is not a valid CIDR block.')
|
||||
if not _cidr_re.match(cidr):
|
||||
raise invalid_format_exception
|
||||
address, size = cidr.split("/")
|
||||
octets = address.split(".")
|
||||
if any(int(octet) > 255 for octet in octets):
|
||||
raise invalid_format_exception
|
||||
size = int(size)
|
||||
if size > 32:
|
||||
raise invalid_format_exception
|
||||
return True
|
||||
|
||||
|
||||
def _validate_cidr_block(cidr):
|
||||
validate_cidr(cidr, 'cidrBlock')
|
||||
size = int(cidr.split("/")[-1])
|
||||
return size >= 16 and size <= 28
|
||||
|
||||
|
||||
def validate_vpc_cidr(cidr):
|
||||
if not _validate_cidr_block(cidr):
|
||||
raise exception.InvalidVpcRange(cidr_block=cidr)
|
||||
|
||||
|
||||
def validate_subnet_cidr(cidr):
|
||||
if not _validate_cidr_block(cidr):
|
||||
raise exception.InvalidSubnetRange(cidr_block=cidr)
|
||||
|
||||
|
||||
# NOTE(Alex) Unfortunately Amazon returns various kinds of error for invalid
|
||||
# IDs (...ID.Malformed, ...Id.Malformed, ...ID.NotFound, InvalidParameterValue)
|
||||
# So we decided here to commonize invalid IDs to InvalidParameterValue error.
|
||||
|
||||
def validate_ec2_id(val, parameter_name, prefices):
|
||||
try:
|
||||
prefix, value = val.rsplit('-', 1)
|
||||
int(value, 16)
|
||||
if not prefices or prefix in prefices:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not prefices:
|
||||
reason = _('Invalid EC2 id was specified.')
|
||||
else:
|
||||
reason = _('Expected: %(prefix)s-...') % {'prefix': prefices[0]}
|
||||
raise exception.InvalidParameterValue(
|
||||
value=val, parameter=parameter_name, reason=reason)
|
||||
|
||||
|
||||
def validate_ec2_association_id(id, parameter_name, action):
|
||||
if action == 'DisassociateAddress':
|
||||
return validate_ec2_id(['eipassoc'])(id, parameter_name)
|
||||
else:
|
||||
return validate_ec2_id(['rtbassoc'])(id, parameter_name)
|
||||
|
||||
|
||||
def validate_ipv4(address, parameter_name):
|
||||
"""Verify that address represents a valid IPv4 address."""
|
||||
try:
|
||||
if netaddr.valid_ipv4(address):
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
raise exception.InvalidParameterValue(
|
||||
value=address, parameter=parameter_name,
|
||||
reason=_('Not a valid IP address'))
|
||||
|
||||
|
||||
def validate_enum(value, allowed_values, parameter_name, allow_empty=False):
|
||||
if value is None and allow_empty or value in allowed_values:
|
||||
return True
|
||||
raise exception.InvalidParameterValue(
|
||||
value=value, parameter=parameter_name,
|
||||
reason=_('Invalid parameter value specified'))
|
||||
|
||||
|
||||
def validate_filter(filters):
|
||||
for filter in filters:
|
||||
if (not filter.get('name') or not filter.get('value') or
|
||||
not isinstance(filter['value'], list)):
|
||||
raise exception.InvalidFilter()
|
||||
return True
|
||||
|
||||
|
||||
def validate_key_value_dict_list(dict_list, parameter_name):
|
||||
for dict in dict_list:
|
||||
if not dict.get('key') or dict.get('value') is None:
|
||||
raise exception.InvalidParameterValue(
|
||||
value=dict, parameter=parameter_name,
|
||||
reason=_('Expected list of key value dictionaries'))
|
||||
return True
|
||||
|
||||
|
||||
def validate_security_group_str(value, parameter_name, vpc_id=None):
|
||||
# NOTE(Alex) Amazon accepts any ASCII for EC2 classic;
|
||||
# for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*
|
||||
if vpc_id:
|
||||
allowed = r'^[a-zA-Z0-9\._\-:/\(\)#,@\[\]\+=&;\{\}!\$\*\ ]+$'
|
||||
else:
|
||||
allowed = r'^[\x20-\x7E]+$'
|
||||
msg = ''
|
||||
try:
|
||||
val = value.strip()
|
||||
except AttributeError:
|
||||
msg = (_("Security group %s is not a string or unicode") %
|
||||
parameter_name)
|
||||
if not val:
|
||||
msg = _("Security group %s cannot be empty.") % parameter_name
|
||||
elif not re.match(allowed, val):
|
||||
msg = (_("Specified value for parameter Group%(property)s is "
|
||||
"invalid. Content limited to '%(allowed)s'.") %
|
||||
{'allowed': 'allowed',
|
||||
'property': parameter_name})
|
||||
elif len(val) > 255:
|
||||
msg = _("Security group %s should not be greater "
|
||||
"than 255 characters.") % parameter_name
|
||||
if msg:
|
||||
raise exception.ValidationError(reason=msg)
|
||||
return True
|
||||
|
||||
|
||||
def validate_vpn_connection_type(value):
|
||||
if value != 'ipsec.1':
|
||||
raise exception.InvalidParameterValue(
|
||||
value=type, parameter='type',
|
||||
reason=_('Invalid VPN connection type.'))
|
||||
return True
|
@ -1,252 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from novaclient import exceptions as nova_exception
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api import context as ec2_context
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Volume related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_volume(context, availability_zone=None, size=None,
|
||||
snapshot_id=None, volume_type=None, iops=None,
|
||||
encrypted=None, kms_key_id=None, client_token=None):
|
||||
|
||||
if client_token:
|
||||
result = describe_volumes(context,
|
||||
filter=[{'name': 'client-token',
|
||||
'value': [client_token]}])
|
||||
if result['volumeSet']:
|
||||
if len(result['volumeSet']) > 1:
|
||||
LOG.error('describe_volumes returns %s '
|
||||
'volumes, but 1 is expected.',
|
||||
len(result['volumeSet']))
|
||||
LOG.error('Requested client token: %s', client_token)
|
||||
LOG.error('Result: %s', result)
|
||||
return result['volumeSet'][0]
|
||||
|
||||
if snapshot_id is not None:
|
||||
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
||||
os_snapshot_id = snapshot['os_id']
|
||||
else:
|
||||
os_snapshot_id = None
|
||||
|
||||
cinder = clients.cinder(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_volume = cinder.volumes.create(
|
||||
size, snapshot_id=os_snapshot_id, volume_type=volume_type,
|
||||
availability_zone=availability_zone)
|
||||
cleaner.addCleanup(os_volume.delete)
|
||||
|
||||
volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id})
|
||||
cleaner.addCleanup(db_api.delete_item, context, volume['id'])
|
||||
os_volume.update(display_name=volume['id'])
|
||||
|
||||
return _format_volume(context, volume, os_volume, snapshot_id=snapshot_id)
|
||||
|
||||
|
||||
def attach_volume(context, volume_id, instance_id, device):
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
instance = ec2utils.get_db_item(context, instance_id)
|
||||
|
||||
nova = clients.nova(context)
|
||||
try:
|
||||
nova.volumes.create_server_volume(instance['os_id'], volume['os_id'],
|
||||
device)
|
||||
except (nova_exception.Conflict, nova_exception.BadRequest):
|
||||
# TODO(andrey-mp): raise correct errors for different cases
|
||||
LOG.exception('Attach has failed.')
|
||||
raise exception.UnsupportedOperation()
|
||||
cinder = clients.cinder(context)
|
||||
os_volume = cinder.volumes.get(volume['os_id'])
|
||||
attachment = _format_attachment(context, volume, os_volume,
|
||||
instance_id=instance_id)
|
||||
# NOTE(andrey-mp): nova sets deleteOnTermination=False for attached volume
|
||||
attachment['deleteOnTermination'] = False
|
||||
return attachment
|
||||
|
||||
|
||||
def detach_volume(context, volume_id, instance_id=None, device=None,
|
||||
force=None):
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
|
||||
cinder = clients.cinder(context)
|
||||
os_volume = cinder.volumes.get(volume['os_id'])
|
||||
os_instance_id = next(iter(os_volume.attachments), {}).get('server_id')
|
||||
if not os_instance_id:
|
||||
# TODO(ft): Change the message with the real AWS message
|
||||
reason = _('Volume %(vol_id)s is not attached to anything')
|
||||
raise exception.IncorrectState(reason=reason % {'vol_id': volume_id})
|
||||
|
||||
nova = clients.nova(context)
|
||||
nova.volumes.delete_server_volume(os_instance_id, os_volume.id)
|
||||
os_volume.get()
|
||||
instance_id = next((i['id'] for i in db_api.get_items(context, 'i')
|
||||
if i['os_id'] == os_instance_id), None)
|
||||
return _format_attachment(context, volume, os_volume,
|
||||
instance_id=instance_id)
|
||||
|
||||
|
||||
def delete_volume(context, volume_id):
|
||||
volume = ec2utils.get_db_item(context, volume_id)
|
||||
cinder = clients.cinder(context)
|
||||
try:
|
||||
cinder.volumes.delete(volume['os_id'])
|
||||
except cinder_exception.BadRequest:
|
||||
# TODO(andrey-mp): raise correct errors for different cases
|
||||
raise exception.UnsupportedOperation()
|
||||
except cinder_exception.NotFound:
|
||||
pass
|
||||
# NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud
|
||||
# It will be deleted by describer in the future
|
||||
return True
|
||||
|
||||
|
||||
class VolumeDescriber(common.TaggableItemsDescriber):
|
||||
|
||||
KIND = 'vol'
|
||||
SORT_KEY = 'volumeId'
|
||||
FILTER_MAP = {
|
||||
'availability-zone': 'availabilityZone',
|
||||
'client-token': 'clientToken',
|
||||
'create-time': 'createTime',
|
||||
'encrypted': 'encrypted',
|
||||
'size': 'size',
|
||||
'snapshot-id': 'snapshotId',
|
||||
'status': 'status',
|
||||
'volume-id': 'volumeId',
|
||||
'volume-type': 'volumeType',
|
||||
'attachment.delete-on-termination':
|
||||
['attachmentSet', 'deleteOnTermination'],
|
||||
'attachment.device': ['attachmentSet', 'device'],
|
||||
'attachment.instance-id': ['attachmentSet', 'instanceId'],
|
||||
'attachment.status': ['attachmentSet', 'status']}
|
||||
|
||||
def format(self, volume, os_volume):
|
||||
return _format_volume(self.context, volume, os_volume,
|
||||
self.instances, self.os_instances,
|
||||
self.snapshots)
|
||||
|
||||
def get_db_items(self):
|
||||
self.instances = {i['os_id']: i
|
||||
for i in db_api.get_items(self.context, 'i')}
|
||||
self.snapshots = {s['os_id']: s
|
||||
for s in db_api.get_items(self.context, 'snap')}
|
||||
return super(VolumeDescriber, self).get_db_items()
|
||||
|
||||
def get_os_items(self):
|
||||
nova = clients.nova(ec2_context.get_os_admin_context())
|
||||
os_instances = nova.servers.list(
|
||||
search_opts={'all_tenants': True,
|
||||
'project_id': self.context.project_id})
|
||||
self.os_instances = {i.id: i for i in os_instances}
|
||||
return clients.cinder(self.context).volumes.list()
|
||||
|
||||
def get_name(self, os_item):
|
||||
return ''
|
||||
|
||||
|
||||
def describe_volumes(context, volume_id=None, filter=None,
|
||||
max_results=None, next_token=None):
|
||||
if volume_id and max_results:
|
||||
msg = _('The parameter volumeSet cannot be used with the parameter '
|
||||
'maxResults')
|
||||
raise exception.InvalidParameterCombination(msg)
|
||||
|
||||
volume_describer = VolumeDescriber()
|
||||
formatted_volumes = volume_describer.describe(
|
||||
context, ids=volume_id, filter=filter,
|
||||
max_results=max_results, next_token=next_token)
|
||||
result = {'volumeSet': formatted_volumes}
|
||||
if volume_describer.next_token:
|
||||
result['nextToken'] = volume_describer.next_token
|
||||
return result
|
||||
|
||||
|
||||
def _format_volume(context, volume, os_volume, instances={}, os_instances={},
|
||||
snapshots={}, snapshot_id=None):
|
||||
valid_ec2_api_volume_status_map = {
|
||||
'reserved': 'in-use',
|
||||
'attaching': 'in-use',
|
||||
'detaching': 'in-use'}
|
||||
|
||||
ec2_volume = {
|
||||
'volumeId': volume['id'],
|
||||
'status': valid_ec2_api_volume_status_map.get(os_volume.status,
|
||||
os_volume.status),
|
||||
'size': os_volume.size,
|
||||
'availabilityZone': os_volume.availability_zone,
|
||||
'createTime': os_volume.created_at,
|
||||
'volumeType': os_volume.volume_type,
|
||||
'encrypted': os_volume.encrypted,
|
||||
}
|
||||
if ec2_volume['status'] == 'in-use':
|
||||
ec2_volume['attachmentSet'] = (
|
||||
[_format_attachment(context, volume, os_volume, instances,
|
||||
os_instances)])
|
||||
else:
|
||||
ec2_volume['attachmentSet'] = {}
|
||||
if snapshot_id is None and os_volume.snapshot_id:
|
||||
snapshot = ec2utils.get_db_item_by_os_id(
|
||||
context, 'snap', os_volume.snapshot_id, snapshots)
|
||||
snapshot_id = snapshot['id']
|
||||
ec2_volume['snapshotId'] = snapshot_id
|
||||
|
||||
return ec2_volume
|
||||
|
||||
|
||||
def _format_attachment(context, volume, os_volume, instances={},
|
||||
os_instances={}, instance_id=None):
|
||||
os_attachment = next(iter(os_volume.attachments), {})
|
||||
os_instance_id = os_attachment.get('server_id')
|
||||
if not instance_id and os_instance_id:
|
||||
instance = ec2utils.get_db_item_by_os_id(
|
||||
context, 'i', os_instance_id, instances)
|
||||
instance_id = instance['id']
|
||||
status = os_volume.status
|
||||
if status == 'reserved':
|
||||
status = 'attaching'
|
||||
ec2_attachment = {
|
||||
'device': os_attachment.get('device'),
|
||||
'instanceId': instance_id,
|
||||
'status': (status
|
||||
if status in ('attaching', 'detaching') else
|
||||
'attached' if os_attachment else 'detached'),
|
||||
'volumeId': volume['id']}
|
||||
if os_instance_id in os_instances:
|
||||
os_instance = os_instances[os_instance_id]
|
||||
volumes_attached = getattr(os_instance,
|
||||
'os-extended-volumes:volumes_attached', [])
|
||||
volume_attached = next((va for va in volumes_attached
|
||||
if va['id'] == volume['os_id']), None)
|
||||
if volume_attached and 'delete_on_termination' in volume_attached:
|
||||
ec2_attachment['deleteOnTermination'] = (
|
||||
volume_attached['delete_on_termination'])
|
||||
return ec2_attachment
|
@ -1,200 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import internet_gateway as internet_gateway_api
|
||||
from ec2api.api import route_table as route_table_api
|
||||
from ec2api.api import security_group as security_group_api
|
||||
from ec2api.api import subnet as subnet_api
|
||||
from ec2api.api import vpn_gateway as vpn_gateway_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('ec2api-')
|
||||
|
||||
|
||||
"""VPC-object related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
DEFAULT_VPC_CIDR_BLOCK = '172.31.0.0/16'
|
||||
DEFAULT_SUBNET_CIDR_BLOCK = '172.31.0.0/20'
|
||||
|
||||
|
||||
def create_vpc(context, cidr_block, instance_tenancy='default'):
|
||||
vpc = _create_vpc(context, cidr_block)
|
||||
return {'vpc': _format_vpc(vpc)}
|
||||
|
||||
|
||||
def delete_vpc(context, vpc_id):
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
subnets = subnet_api.describe_subnets(
|
||||
context,
|
||||
filter=[{'name': 'vpc-id', 'value': [vpc_id]}])['subnetSet']
|
||||
internet_gateways = internet_gateway_api.describe_internet_gateways(
|
||||
context,
|
||||
filter=[{'name': 'attachment.vpc-id',
|
||||
'value': [vpc['id']]}])['internetGatewaySet']
|
||||
route_tables = route_table_api.describe_route_tables(
|
||||
context,
|
||||
filter=[{'name': 'vpc-id', 'value': [vpc['id']]}])['routeTableSet']
|
||||
security_groups = security_group_api.describe_security_groups(
|
||||
context,
|
||||
filter=[{'name': 'vpc-id',
|
||||
'value': [vpc['id']]}])['securityGroupInfo']
|
||||
vpn_gateways = vpn_gateway_api.describe_vpn_gateways(
|
||||
context,
|
||||
filter=[{'name': 'attachment.vpc-id',
|
||||
'value': [vpc['id']]}])['vpnGatewaySet']
|
||||
if (subnets or internet_gateways or len(route_tables) > 1 or
|
||||
len(security_groups) > 1 or vpn_gateways):
|
||||
msg = _("The vpc '%(vpc_id)s' has dependencies and "
|
||||
"cannot be deleted.")
|
||||
msg = msg % {'vpc_id': vpc['id']}
|
||||
raise exception.DependencyViolation(msg)
|
||||
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.delete_item(context, vpc['id'])
|
||||
cleaner.addCleanup(db_api.restore_item, context, 'vpc', vpc)
|
||||
route_table_api._delete_route_table(context, vpc['route_table_id'],
|
||||
cleaner=cleaner)
|
||||
if len(security_groups) > 0:
|
||||
security_group_api.delete_security_group(
|
||||
context, group_id=security_groups[0]['groupId'],
|
||||
delete_default=True)
|
||||
try:
|
||||
neutron.delete_router(vpc['os_id'])
|
||||
except neutron_exception.Conflict as ex:
|
||||
LOG.warning('Failed to delete router %(os_id)s during deleting '
|
||||
'VPC %(id)s. Reason: %(reason)s',
|
||||
{'id': vpc['id'],
|
||||
'os_id': vpc['os_id'],
|
||||
'reason': ex.message})
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class VpcDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'vpc'
|
||||
FILTER_MAP = {'cidr': 'cidrBlock',
|
||||
'dhcp-options-id': 'dhcpOptionsId',
|
||||
'is-default': 'isDefault',
|
||||
'state': 'state',
|
||||
'vpc-id': 'vpcId'}
|
||||
|
||||
def format(self, item=None, os_item=None):
|
||||
return _format_vpc(item)
|
||||
|
||||
|
||||
def describe_vpcs(context, vpc_id=None, filter=None):
|
||||
_check_and_create_default_vpc(context)
|
||||
formatted_vpcs = VpcDescriber().describe(
|
||||
context, ids=vpc_id, filter=filter)
|
||||
return {'vpcSet': formatted_vpcs}
|
||||
|
||||
|
||||
def _create_vpc(context, cidr_block, is_default=False):
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_router_body = {'router': {}}
|
||||
try:
|
||||
os_router = neutron.create_router(os_router_body)['router']
|
||||
except neutron_exception.OverQuotaClient:
|
||||
raise exception.VpcLimitExceeded()
|
||||
cleaner.addCleanup(neutron.delete_router, os_router['id'])
|
||||
vpc = db_api.add_item(context, 'vpc',
|
||||
{'os_id': os_router['id'],
|
||||
'cidr_block': cidr_block,
|
||||
'is_default': is_default})
|
||||
cleaner.addCleanup(db_api.delete_item, context, vpc['id'])
|
||||
route_table = route_table_api._create_route_table(context, vpc)
|
||||
cleaner.addCleanup(route_table_api._delete_route_table,
|
||||
context, route_table['id'])
|
||||
vpc['route_table_id'] = route_table['id']
|
||||
db_api.update_item(context, vpc)
|
||||
neutron.update_router(os_router['id'], {'router': {'name': vpc['id']}})
|
||||
sg_id = security_group_api._create_default_security_group(context, vpc)
|
||||
cleaner.addCleanup(security_group_api.delete_security_group, context,
|
||||
group_id=sg_id, delete_default=True)
|
||||
if is_default:
|
||||
igw_id = internet_gateway_api.create_internet_gateway(
|
||||
context)['internetGateway']['internetGatewayId']
|
||||
cleaner.addCleanup(internet_gateway_api.delete_internet_gateway,
|
||||
context, igw_id)
|
||||
internet_gateway_api.attach_internet_gateway(context, igw_id,
|
||||
vpc['id'])
|
||||
cleaner.addCleanup(internet_gateway_api.detach_internet_gateway,
|
||||
context, igw_id, vpc['id'])
|
||||
subnet = subnet_api.create_subnet(
|
||||
context, vpc['id'],
|
||||
DEFAULT_SUBNET_CIDR_BLOCK)['subnet']
|
||||
cleaner.addCleanup(subnet_api.delete_subnet, context,
|
||||
subnet['subnetId'])
|
||||
route_table_api.create_route(context, route_table['id'],
|
||||
'0.0.0.0/0', gateway_id=igw_id)
|
||||
return vpc
|
||||
|
||||
|
||||
def _check_and_create_default_vpc(context):
|
||||
if not CONF.disable_ec2_classic or context.is_os_admin:
|
||||
return
|
||||
|
||||
lock_name = 'default-vpc-lock-{}-'.format(context.project_id)
|
||||
|
||||
@synchronized(lock_name, external=True)
|
||||
def _check():
|
||||
for vpc in db_api.get_items(context, 'vpc'):
|
||||
if vpc.get('is_default'):
|
||||
return vpc
|
||||
try:
|
||||
default_vpc = _create_vpc(context, DEFAULT_VPC_CIDR_BLOCK,
|
||||
is_default=True)
|
||||
return default_vpc
|
||||
except Exception:
|
||||
LOG.exception('Failed to create default vpc')
|
||||
return None
|
||||
|
||||
return _check()
|
||||
|
||||
|
||||
ec2utils.set_check_and_create_default_vpc(_check_and_create_default_vpc)
|
||||
|
||||
|
||||
def _format_vpc(vpc):
|
||||
return {'vpcId': vpc['id'],
|
||||
'state': "available",
|
||||
'cidrBlock': vpc['cidr_block'],
|
||||
'isDefault': vpc.get('is_default', False),
|
||||
'dhcpOptionsId': vpc.get('dhcp_options_id', 'default'),
|
||||
}
|
@ -1,500 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import random
|
||||
import string
|
||||
|
||||
from lxml import etree
|
||||
import netaddr
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""VPN connections related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
SHARED_KEY_CHARS = string.ascii_letters + '_.' + string.digits
|
||||
AWS_MSS = 1387
|
||||
MTU_MSS_DELTA = 40 # 20 byte IP and 20 byte TCP headers
|
||||
|
||||
|
||||
def create_vpn_connection(context, customer_gateway_id, vpn_gateway_id,
|
||||
type, options=None):
|
||||
if not options or options.get('static_routes_only') is not True:
|
||||
raise exception.Unsupported('BGP dynamic routing is unsupported')
|
||||
customer_gateway = ec2utils.get_db_item(context, customer_gateway_id)
|
||||
vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
|
||||
vpn_connection = next(
|
||||
(vpn for vpn in db_api.get_items(context, 'vpn')
|
||||
if vpn['customer_gateway_id'] == customer_gateway_id),
|
||||
None)
|
||||
if vpn_connection:
|
||||
if vpn_connection['vpn_gateway_id'] == vpn_gateway_id:
|
||||
ec2_vpn_connections = describe_vpn_connections(
|
||||
context, vpn_connection_id=[vpn_connection['id']])
|
||||
return {
|
||||
'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
|
||||
else:
|
||||
raise exception.InvalidCustomerGatewayDuplicateIpAddress()
|
||||
neutron = clients.neutron(context)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
os_ikepolicy = {'ike_version': 'v1',
|
||||
'auth_algorithm': 'sha1',
|
||||
'encryption_algorithm': 'aes-128',
|
||||
'pfs': 'group2',
|
||||
'phase1_negotiation_mode': 'main',
|
||||
'lifetime': {'units': 'seconds',
|
||||
'value': 28800}}
|
||||
os_ikepolicy = neutron.create_ikepolicy(
|
||||
{'ikepolicy': os_ikepolicy})['ikepolicy']
|
||||
cleaner.addCleanup(neutron.delete_ikepolicy, os_ikepolicy['id'])
|
||||
|
||||
os_ipsecpolicy = {'transform_protocol': 'esp',
|
||||
'auth_algorithm': 'sha1',
|
||||
'encryption_algorithm': 'aes-128',
|
||||
'pfs': 'group2',
|
||||
'encapsulation_mode': 'tunnel',
|
||||
'lifetime': {'units': 'seconds',
|
||||
'value': 3600}}
|
||||
os_ipsecpolicy = neutron.create_ipsecpolicy(
|
||||
{'ipsecpolicy': os_ipsecpolicy})['ipsecpolicy']
|
||||
cleaner.addCleanup(neutron.delete_ipsecpolicy, os_ipsecpolicy['id'])
|
||||
|
||||
psk = ''.join(random.choice(SHARED_KEY_CHARS) for _x in range(32))
|
||||
vpn_connection = db_api.add_item(
|
||||
context, 'vpn',
|
||||
{'customer_gateway_id': customer_gateway['id'],
|
||||
'vpn_gateway_id': vpn_gateway['id'],
|
||||
'pre_shared_key': psk,
|
||||
'os_ikepolicy_id': os_ikepolicy['id'],
|
||||
'os_ipsecpolicy_id': os_ipsecpolicy['id'],
|
||||
'cidrs': [],
|
||||
'os_ipsec_site_connections': {}})
|
||||
cleaner.addCleanup(db_api.delete_item, context, vpn_connection['id'])
|
||||
|
||||
neutron.update_ikepolicy(
|
||||
os_ikepolicy['id'], {'ikepolicy': {'name': vpn_connection['id']}})
|
||||
neutron.update_ipsecpolicy(
|
||||
os_ipsecpolicy['id'],
|
||||
{'ipsecpolicy': {'name': vpn_connection['id']}})
|
||||
|
||||
_reset_vpn_connections(context, neutron, cleaner,
|
||||
vpn_gateway, vpn_connections=[vpn_connection])
|
||||
|
||||
ec2_vpn_connections = describe_vpn_connections(
|
||||
context, vpn_connection_id=[vpn_connection['id']])
|
||||
return {
|
||||
'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
|
||||
|
||||
|
||||
def create_vpn_connection_route(context, vpn_connection_id,
|
||||
destination_cidr_block):
|
||||
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
|
||||
if destination_cidr_block in vpn_connection['cidrs']:
|
||||
return True
|
||||
neutron = clients.neutron(context)
|
||||
vpn_gateway = db_api.get_item_by_id(context,
|
||||
vpn_connection['vpn_gateway_id'])
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_add_cidr_to_vpn_connection_item(context, vpn_connection,
|
||||
destination_cidr_block)
|
||||
cleaner.addCleanup(_remove_cidr_from_vpn_connection_item,
|
||||
context, vpn_connection, destination_cidr_block)
|
||||
|
||||
_reset_vpn_connections(context, neutron, cleaner,
|
||||
vpn_gateway, vpn_connections=[vpn_connection])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def delete_vpn_connection_route(context, vpn_connection_id,
|
||||
destination_cidr_block):
|
||||
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
|
||||
if destination_cidr_block not in vpn_connection['cidrs']:
|
||||
raise exception.InvalidRouteNotFound(
|
||||
_('The specified route %(destination_cidr_block)s does not exist')
|
||||
% {'destination_cidr_block': destination_cidr_block})
|
||||
neutron = clients.neutron(context)
|
||||
vpn_gateway = db_api.get_item_by_id(context,
|
||||
vpn_connection['vpn_gateway_id'])
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_remove_cidr_from_vpn_connection_item(context, vpn_connection,
|
||||
destination_cidr_block)
|
||||
cleaner.addCleanup(_add_cidr_to_vpn_connection_item,
|
||||
context, vpn_connection, destination_cidr_block)
|
||||
|
||||
_reset_vpn_connections(context, neutron, cleaner,
|
||||
vpn_gateway, vpn_connections=[vpn_connection])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def delete_vpn_connection(context, vpn_connection_id):
|
||||
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
db_api.delete_item(context, vpn_connection['id'])
|
||||
cleaner.addCleanup(db_api.restore_item, context, 'vpn', vpn_connection)
|
||||
neutron = clients.neutron(context)
|
||||
_stop_vpn_connection(neutron, vpn_connection)
|
||||
try:
|
||||
neutron.delete_ipsecpolicy(vpn_connection['os_ipsecpolicy_id'])
|
||||
except neutron_exception.Conflict as ex:
|
||||
LOG.warning('Failed to delete ipsecoplicy %(os_id)s during '
|
||||
'deleting VPN connection %(id)s. Reason: %(reason)s',
|
||||
{'id': vpn_connection['id'],
|
||||
'os_id': vpn_connection['os_ipsecpolicy_id'],
|
||||
'reason': ex.message})
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
try:
|
||||
neutron.delete_ikepolicy(vpn_connection['os_ikepolicy_id'])
|
||||
except neutron_exception.Conflict as ex:
|
||||
LOG.warning(
|
||||
'Failed to delete ikepolicy %(os_id)s during deleting '
|
||||
'VPN connection %(id)s. Reason: %(reason)s',
|
||||
{'id': vpn_connection['id'],
|
||||
'os_id': vpn_connection['os_ikepolicy_id'],
|
||||
'reason': ex.message})
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def describe_vpn_connections(context, vpn_connection_id=None, filter=None):
|
||||
formatted_vpn_connections = VpnConnectionDescriber().describe(
|
||||
context, ids=vpn_connection_id, filter=filter)
|
||||
return {'vpnConnectionSet': formatted_vpn_connections}
|
||||
|
||||
|
||||
class VpnConnectionDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'vpn'
|
||||
FILTER_MAP = {'customer-gateway-configuration': (
|
||||
'customerGatewayConfiguration'),
|
||||
'customer-gateway-id': 'customerGatewayId',
|
||||
'state': 'state',
|
||||
'option.static-routes-only': ('options', 'staticRoutesOnly'),
|
||||
'route.destination-cidr-block': ['routes',
|
||||
'destinationCidrBlock'],
|
||||
'type': 'type',
|
||||
'vpn-connection-id': 'vpnConnectionId',
|
||||
'vpn-gateway-id': 'vpnGatewayId'}
|
||||
|
||||
def get_db_items(self):
|
||||
self.customer_gateways = {
|
||||
cgw['id']: cgw
|
||||
for cgw in db_api.get_items(self.context, 'cgw')}
|
||||
neutron = clients.neutron(self.context)
|
||||
self.os_ikepolicies = {
|
||||
ike['id']: ike
|
||||
for ike in neutron.list_ikepolicies(
|
||||
tenant_id=self.context.project_id)['ikepolicies']}
|
||||
self.os_ipsecpolicies = {
|
||||
ipsec['id']: ipsec
|
||||
for ipsec in neutron.list_ipsecpolicies(
|
||||
tenant_id=self.context.project_id)['ipsecpolicies']}
|
||||
self.os_ipsec_site_connections = {
|
||||
conn['id']: conn
|
||||
for conn in neutron.list_ipsec_site_connections(
|
||||
tenant_id=self.context.project_id)['ipsec_site_connections']}
|
||||
self.external_ips = _get_vpn_gateways_external_ips(
|
||||
self.context, neutron)
|
||||
return super(VpnConnectionDescriber, self).get_db_items()
|
||||
|
||||
def format(self, vpn_connection):
|
||||
return _format_vpn_connection(
|
||||
vpn_connection, self.customer_gateways, self.os_ikepolicies,
|
||||
self.os_ipsecpolicies, self.os_ipsec_site_connections,
|
||||
self.external_ips)
|
||||
|
||||
|
||||
def _format_vpn_connection(vpn_connection, customer_gateways, os_ikepolicies,
|
||||
os_ipsecpolicies, os_ipsec_site_connections,
|
||||
external_ips):
|
||||
config_dict = _format_customer_config(
|
||||
vpn_connection, customer_gateways, os_ikepolicies, os_ipsecpolicies,
|
||||
os_ipsec_site_connections, external_ips)
|
||||
config = ec2utils.dict_to_xml(config_dict, 'vpn_connection')
|
||||
config.attrib['id'] = vpn_connection['id']
|
||||
config_str = etree.tostring(config, xml_declaration=True, encoding='UTF-8',
|
||||
pretty_print=True)
|
||||
return {'vpnConnectionId': vpn_connection['id'],
|
||||
'vpnGatewayId': vpn_connection['vpn_gateway_id'],
|
||||
'customerGatewayId': vpn_connection['customer_gateway_id'],
|
||||
'state': 'available',
|
||||
'type': 'ipsec.1',
|
||||
'routes': [{'destinationCidrBlock': cidr,
|
||||
'state': 'available'}
|
||||
for cidr in vpn_connection['cidrs']],
|
||||
'vgwTelemetry': [],
|
||||
'options': {'staticRoutesOnly': True},
|
||||
'customerGatewayConfiguration': config_str}
|
||||
|
||||
|
||||
def _format_customer_config(vpn_connection, customer_gateways, os_ikepolicies,
|
||||
os_ipsecpolicies, os_ipsec_site_connections,
|
||||
external_ips):
|
||||
customer_gateway = customer_gateways[vpn_connection['customer_gateway_id']]
|
||||
os_connections_ids = vpn_connection['os_ipsec_site_connections'].values()
|
||||
if os_connections_ids:
|
||||
os_ipsec_site_connection = next(
|
||||
(os_ipsec_site_connections[conn_id]
|
||||
for conn_id in os_connections_ids
|
||||
if os_ipsec_site_connections.get(conn_id)),
|
||||
None)
|
||||
else:
|
||||
os_ipsec_site_connection = None
|
||||
|
||||
# TODO(ft): figure out and add to the output tunnel internal addresses
|
||||
config_dict = {
|
||||
'customer_gateway_id': vpn_connection['customer_gateway_id'],
|
||||
'vpn_gateway_id': vpn_connection['vpn_gateway_id'],
|
||||
'vpn_connection_type': 'ipsec.1',
|
||||
'vpn_connection_attributes': 'NoBGPVPNConnection',
|
||||
'ipsec_tunnel': {
|
||||
'customer_gateway': {
|
||||
'tunnel_outside_address': {
|
||||
'ip_address': (
|
||||
os_ipsec_site_connection['peer_address']
|
||||
if os_ipsec_site_connection else
|
||||
customer_gateway['ip_address'])}},
|
||||
'vpn_gateway': {
|
||||
'tunnel_outside_address': {
|
||||
'ip_address': external_ips.get(
|
||||
vpn_connection['vpn_gateway_id'])}}},
|
||||
}
|
||||
os_ikepolicy = os_ikepolicies.get(vpn_connection['os_ikepolicy_id'])
|
||||
if os_ikepolicy:
|
||||
config_dict['ipsec_tunnel']['ike'] = {
|
||||
'authentication_protocol': os_ikepolicy['auth_algorithm'],
|
||||
'encryption_protocol': os_ikepolicy['encryption_algorithm'],
|
||||
'lifetime': os_ikepolicy['lifetime']['value'],
|
||||
'perfect_forward_secrecy': os_ikepolicy['pfs'],
|
||||
'mode': os_ikepolicy['phase1_negotiation_mode'],
|
||||
'pre_shared_key': (
|
||||
os_ipsec_site_connection['psk']
|
||||
if os_ipsec_site_connection else
|
||||
vpn_connection['pre_shared_key']),
|
||||
}
|
||||
os_ipsecpolicy = os_ipsecpolicies.get(vpn_connection['os_ipsecpolicy_id'])
|
||||
if os_ipsecpolicy:
|
||||
config_dict['ipsec_tunnel']['ipsec'] = {
|
||||
'protocol': os_ipsecpolicy['transform_protocol'],
|
||||
'authentication_protocol': os_ipsecpolicy['auth_algorithm'],
|
||||
'encryption_protocol': os_ipsecpolicy['encryption_algorithm'],
|
||||
'lifetime': os_ipsecpolicy['lifetime']['value'],
|
||||
'perfect_forward_secrecy': os_ipsecpolicy['pfs'],
|
||||
'mode': os_ipsecpolicy['encapsulation_mode'],
|
||||
'tcp_mss_adjustment': (
|
||||
os_ipsec_site_connection['mtu'] - MTU_MSS_DELTA
|
||||
if os_ipsec_site_connection else
|
||||
AWS_MSS),
|
||||
}
|
||||
return config_dict
|
||||
|
||||
|
||||
def _stop_vpn_connection(neutron, vpn_connection):
|
||||
connection_ids = vpn_connection['os_ipsec_site_connections']
|
||||
for os_connection_id in connection_ids.values():
|
||||
try:
|
||||
neutron.delete_ipsec_site_connection(os_connection_id)
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
|
||||
|
||||
def _stop_gateway_vpn_connections(context, neutron, cleaner, vpn_gateway):
|
||||
def undo_vpn_connection(context, vpn_connection, connections_ids):
|
||||
vpn_connection['os_ipsec_site_connections'] = connections_ids
|
||||
db_api.update_item(context, vpn_connection)
|
||||
|
||||
for vpn_connection in db_api.get_items(context, 'vpn'):
|
||||
if vpn_connection['vpn_gateway_id'] == vpn_gateway['id']:
|
||||
_stop_vpn_connection(neutron, vpn_connection)
|
||||
|
||||
connection_ids = vpn_connection['os_ipsec_site_connections']
|
||||
vpn_connection['os_ipsec_site_connections'] = {}
|
||||
db_api.update_item(context, vpn_connection)
|
||||
cleaner.addCleanup(undo_vpn_connection, context, vpn_connection,
|
||||
connection_ids)
|
||||
|
||||
|
||||
def _update_vpn_routes(context, neutron, cleaner, route_table, subnets):
|
||||
vpn_gateway = ec2utils.get_attached_gateway(
|
||||
context, route_table['vpc_id'], 'vgw')
|
||||
if not vpn_gateway:
|
||||
return
|
||||
_reset_vpn_connections(context, neutron, cleaner, vpn_gateway,
|
||||
route_tables=[route_table], subnets=subnets)
|
||||
|
||||
|
||||
def _reset_vpn_connections(context, neutron, cleaner, vpn_gateway,
|
||||
subnets=None, route_tables=None,
|
||||
vpn_connections=None):
|
||||
if not vpn_gateway['vpc_id']:
|
||||
return
|
||||
# TODO(ft): implement search filters in DB api
|
||||
vpn_connections = (vpn_connections or
|
||||
[vpn for vpn in db_api.get_items(context, 'vpn')
|
||||
if vpn['vpn_gateway_id'] == vpn_gateway['id']])
|
||||
if not vpn_connections:
|
||||
return
|
||||
subnets = (subnets or
|
||||
[subnet for subnet in db_api.get_items(context, 'subnet')
|
||||
if subnet['vpc_id'] == vpn_gateway['vpc_id']])
|
||||
if not subnets:
|
||||
return
|
||||
vpc = db_api.get_item_by_id(context, vpn_gateway['vpc_id'])
|
||||
customer_gateways = {cgw['id']: cgw
|
||||
for cgw in db_api.get_items(context, 'cgw')}
|
||||
route_tables = route_tables or db_api.get_items(context, 'rtb')
|
||||
route_tables = {rtb['id']: rtb
|
||||
for rtb in route_tables
|
||||
if rtb['vpc_id'] == vpc['id']}
|
||||
route_tables_cidrs = {}
|
||||
for subnet in subnets:
|
||||
route_table_id = subnet.get('route_table_id', vpc['route_table_id'])
|
||||
if route_table_id not in route_tables_cidrs:
|
||||
route_tables_cidrs[route_table_id] = (
|
||||
_get_route_table_vpn_cidrs(route_tables[route_table_id],
|
||||
vpn_gateway, vpn_connections))
|
||||
cidrs = route_tables_cidrs[route_table_id]
|
||||
for vpn_conn in vpn_connections:
|
||||
if vpn_conn['id'] in cidrs:
|
||||
_set_subnet_vpn(
|
||||
context, neutron, cleaner, subnet, vpn_conn,
|
||||
customer_gateways[vpn_conn['customer_gateway_id']],
|
||||
cidrs[vpn_conn['id']])
|
||||
else:
|
||||
_delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_conn)
|
||||
|
||||
|
||||
def _set_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection,
|
||||
customer_gateway, cidrs):
|
||||
subnets_connections = vpn_connection['os_ipsec_site_connections']
|
||||
os_connection_id = subnets_connections.get(subnet['id'])
|
||||
if os_connection_id:
|
||||
# TODO(ft): restore original peer_cidrs on crash
|
||||
neutron.update_ipsec_site_connection(
|
||||
os_connection_id,
|
||||
{'ipsec_site_connection': {'peer_cidrs': cidrs}})
|
||||
else:
|
||||
os_connection = {
|
||||
'vpnservice_id': subnet['os_vpnservice_id'],
|
||||
'ikepolicy_id': vpn_connection['os_ikepolicy_id'],
|
||||
'ipsecpolicy_id': vpn_connection['os_ipsecpolicy_id'],
|
||||
'peer_address': customer_gateway['ip_address'],
|
||||
'peer_cidrs': cidrs,
|
||||
'psk': vpn_connection['pre_shared_key'],
|
||||
'name': '%s/%s' % (vpn_connection['id'], subnet['id']),
|
||||
'peer_id': customer_gateway['ip_address'],
|
||||
'mtu': AWS_MSS + MTU_MSS_DELTA,
|
||||
'initiator': 'response-only',
|
||||
}
|
||||
os_connection = (neutron.create_ipsec_site_connection(
|
||||
{'ipsec_site_connection': os_connection})
|
||||
['ipsec_site_connection'])
|
||||
cleaner.addCleanup(neutron.delete_ipsec_site_connection,
|
||||
os_connection['id'])
|
||||
|
||||
_add_subnet_connection_to_vpn_connection_item(
|
||||
context, vpn_connection, subnet['id'], os_connection['id'])
|
||||
cleaner.addCleanup(_remove_subnet_connection_from_vpn_connection_item,
|
||||
context, vpn_connection, subnet['id'])
|
||||
|
||||
|
||||
def _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection):
|
||||
subnets_connections = vpn_connection['os_ipsec_site_connections']
|
||||
os_connection_id = subnets_connections.get(subnet['id'])
|
||||
if not os_connection_id:
|
||||
return
|
||||
|
||||
_remove_subnet_connection_from_vpn_connection_item(
|
||||
context, vpn_connection, subnet['id'])
|
||||
cleaner.addCleanup(_add_subnet_connection_to_vpn_connection_item,
|
||||
context, vpn_connection, subnet['id'], os_connection_id)
|
||||
try:
|
||||
neutron.delete_ipsec_site_connection(os_connection_id)
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
|
||||
|
||||
def _get_route_table_vpn_cidrs(route_table, vpn_gateway, vpn_connections):
|
||||
static_cidrs = [route['destination_cidr_block']
|
||||
for route in route_table['routes']
|
||||
if route.get('gateway_id') == vpn_gateway['id']]
|
||||
is_propagation_enabled = (
|
||||
vpn_gateway['id'] in route_table.get('propagating_gateways', []))
|
||||
vpn_cidrs = {}
|
||||
for vpn in vpn_connections:
|
||||
if is_propagation_enabled:
|
||||
cidrs = list(set(static_cidrs + vpn['cidrs']))
|
||||
else:
|
||||
cidrs = static_cidrs
|
||||
if cidrs:
|
||||
vpn_cidrs[vpn['id']] = cidrs
|
||||
return vpn_cidrs
|
||||
|
||||
|
||||
def _get_vpn_gateways_external_ips(context, neutron):
|
||||
vpcs = {vpc['id']: vpc
|
||||
for vpc in db_api.get_items(context, 'vpc')}
|
||||
external_ips = {}
|
||||
routers = neutron.list_routers(
|
||||
tenant_id=context.project_id)['routers']
|
||||
for router in routers:
|
||||
info = router['external_gateway_info']
|
||||
if info:
|
||||
for ip in info['external_fixed_ips']:
|
||||
if netaddr.valid_ipv4(ip['ip_address']):
|
||||
external_ips[router['id']] = ip['ip_address']
|
||||
return {vgw['id']: external_ips.get(vpcs[vgw['vpc_id']]['os_id'])
|
||||
for vgw in db_api.get_items(context, 'vgw')
|
||||
if vgw['vpc_id']}
|
||||
|
||||
|
||||
def _add_cidr_to_vpn_connection_item(context, vpn_connection, cidr):
|
||||
vpn_connection['cidrs'].append(cidr)
|
||||
db_api.update_item(context, vpn_connection)
|
||||
|
||||
|
||||
def _remove_cidr_from_vpn_connection_item(context, vpn_connection, cidr):
|
||||
vpn_connection['cidrs'].remove(cidr)
|
||||
db_api.update_item(context, vpn_connection)
|
||||
|
||||
|
||||
def _add_subnet_connection_to_vpn_connection_item(context, vpn_connection,
|
||||
subnet_id, os_connection_id):
|
||||
vpn_connection['os_ipsec_site_connections'][subnet_id] = os_connection_id
|
||||
db_api.update_item(context, vpn_connection)
|
||||
|
||||
|
||||
def _remove_subnet_connection_from_vpn_connection_item(context, vpn_connection,
|
||||
subnet_id):
|
||||
del vpn_connection['os_ipsec_site_connections'][subnet_id]
|
||||
db_api.update_item(context, vpn_connection)
|
@ -1,225 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from neutronclient.common import exceptions as neutron_exception
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import common
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import vpn_connection as vpn_connection_api
|
||||
from ec2api import clients
|
||||
from ec2api.db import api as db_api
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""VPN gateways related API implementation
|
||||
"""
|
||||
|
||||
|
||||
Validator = common.Validator
|
||||
|
||||
|
||||
def create_vpn_gateway(context, type, availability_zone=None):
|
||||
vpn_gateway = db_api.add_item(context, 'vgw', {})
|
||||
return {'vpnGateway': _format_vpn_gateway(vpn_gateway)}
|
||||
|
||||
|
||||
def attach_vpn_gateway(context, vpc_id, vpn_gateway_id):
|
||||
vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
|
||||
vpc = ec2utils.get_db_item(context, vpc_id)
|
||||
if vpn_gateway['vpc_id'] and vpn_gateway['vpc_id'] != vpc['id']:
|
||||
raise exception.VpnGatewayAttachmentLimitExceeded()
|
||||
attached_vgw = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw')
|
||||
if attached_vgw and attached_vgw['id'] != vpn_gateway['id']:
|
||||
raise exception.InvalidVpcState(vpc_id=vpc['id'],
|
||||
vgw_id=attached_vgw['id'])
|
||||
|
||||
subnets = [subnet for subnet in db_api.get_items(context, 'subnet')
|
||||
if subnet['vpc_id'] == vpc['id']]
|
||||
if not vpn_gateway['vpc_id']:
|
||||
external_network_id = None
|
||||
if not ec2utils.get_attached_gateway(context, vpc['id'], 'igw'):
|
||||
external_network_id = ec2utils.get_os_public_network(context)['id']
|
||||
neutron = clients.neutron(context)
|
||||
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_attach_vpn_gateway_item(context, vpn_gateway, vpc['id'])
|
||||
cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway)
|
||||
|
||||
if external_network_id:
|
||||
neutron.add_gateway_router(vpc['os_id'],
|
||||
{'network_id': external_network_id})
|
||||
cleaner.addCleanup(neutron.remove_gateway_router, vpc['os_id'])
|
||||
|
||||
for subnet in subnets:
|
||||
_create_subnet_vpnservice(context, neutron, cleaner,
|
||||
subnet, vpc)
|
||||
vpn_connection_api._reset_vpn_connections(
|
||||
context, neutron, cleaner, vpn_gateway, subnets=subnets)
|
||||
|
||||
return {'attachment': _format_attachment(vpn_gateway)}
|
||||
|
||||
|
||||
def detach_vpn_gateway(context, vpc_id, vpn_gateway_id):
|
||||
vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
|
||||
if vpn_gateway['vpc_id'] != vpc_id:
|
||||
raise exception.InvalidVpnGatewayAttachmentNotFound(
|
||||
vgw_id=vpn_gateway_id, vpc_id=vpc_id)
|
||||
|
||||
vpc = db_api.get_item_by_id(context, vpc_id)
|
||||
neutron = clients.neutron(context)
|
||||
remove_os_gateway_router = (
|
||||
ec2utils.get_attached_gateway(context, vpc_id, 'igw') is None)
|
||||
subnets = [subnet for subnet in db_api.get_items(context, 'subnet')
|
||||
if subnet['vpc_id'] == vpc['id']]
|
||||
with common.OnCrashCleaner() as cleaner:
|
||||
_detach_vpn_gateway_item(context, vpn_gateway)
|
||||
cleaner.addCleanup(_attach_vpn_gateway_item, context, vpn_gateway,
|
||||
vpc_id)
|
||||
vpn_connection_api._stop_gateway_vpn_connections(
|
||||
context, neutron, cleaner, vpn_gateway)
|
||||
for subnet in subnets:
|
||||
_delete_subnet_vpnservice(context, neutron, cleaner, subnet)
|
||||
|
||||
if remove_os_gateway_router:
|
||||
try:
|
||||
neutron.remove_gateway_router(vpc['os_id'])
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def delete_vpn_gateway(context, vpn_gateway_id):
|
||||
vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
|
||||
vpn_connections = db_api.get_items(context, 'vpn')
|
||||
if vpn_gateway['vpc_id'] or any(vpn['vpn_gateway_id'] == vpn_gateway['id']
|
||||
for vpn in vpn_connections):
|
||||
raise exception.IncorrectState(reason=_('The VPN gateway is in use.'))
|
||||
db_api.delete_item(context, vpn_gateway['id'])
|
||||
return True
|
||||
|
||||
|
||||
def describe_vpn_gateways(context, vpn_gateway_id=None, filter=None):
|
||||
formatted_vgws = VpnGatewayDescriber().describe(
|
||||
context, ids=vpn_gateway_id, filter=filter)
|
||||
return {'vpnGatewaySet': formatted_vgws}
|
||||
|
||||
|
||||
class VpnGatewayDescriber(common.TaggableItemsDescriber,
|
||||
common.NonOpenstackItemsDescriber):
|
||||
|
||||
KIND = 'vgw'
|
||||
FILTER_MAP = {'attachment.state': ['attachments', 'state'],
|
||||
'attachment.vpc-id': ['attachments', 'vpcId'],
|
||||
'state': 'state',
|
||||
'type': 'type',
|
||||
'vpn-gateway-id': 'vpnGatewayId'}
|
||||
|
||||
def format(self, vpn_gateway):
|
||||
return _format_vpn_gateway(vpn_gateway)
|
||||
|
||||
|
||||
def _format_vpn_gateway(vpn_gateway):
|
||||
ec2_vgw = {'vpnGatewayId': vpn_gateway['id'],
|
||||
'state': 'available',
|
||||
'type': 'ipsec.1',
|
||||
'attachments': []}
|
||||
if vpn_gateway['vpc_id']:
|
||||
ec2_vgw['attachments'].append(_format_attachment(vpn_gateway))
|
||||
return ec2_vgw
|
||||
|
||||
|
||||
def _format_attachment(vpn_gateway):
|
||||
return {'state': 'attached',
|
||||
'vpcId': vpn_gateway['vpc_id']}
|
||||
|
||||
|
||||
def _start_vpn_in_subnet(context, neutron, cleaner, subnet, vpc, route_table):
|
||||
vpn_gateway = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw')
|
||||
if not vpn_gateway:
|
||||
return
|
||||
_create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc)
|
||||
vpn_connection_api._reset_vpn_connections(context, neutron, cleaner,
|
||||
vpn_gateway, subnets=[subnet],
|
||||
route_tables=[route_table])
|
||||
|
||||
|
||||
def _stop_vpn_in_subnet(context, neutron, cleaner, subnet):
|
||||
os_vpnservice_id = subnet.get('os_vpnservice_id')
|
||||
if not os_vpnservice_id:
|
||||
return
|
||||
for vpn in db_api.get_items(context, 'vpn'):
|
||||
vpn_connection_api._delete_subnet_vpn(context, neutron, cleaner,
|
||||
subnet, vpn)
|
||||
_safe_delete_vpnservice(neutron, os_vpnservice_id, subnet['id'])
|
||||
|
||||
|
||||
def _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc):
|
||||
os_vpnservice = {'subnet_id': subnet['os_id'],
|
||||
'router_id': vpc['os_id'],
|
||||
'name': subnet['id']}
|
||||
os_vpnservice = neutron.create_vpnservice(
|
||||
{'vpnservice': os_vpnservice})['vpnservice']
|
||||
cleaner.addCleanup(neutron.delete_vpnservice, os_vpnservice['id'])
|
||||
|
||||
_set_vpnservice_in_subnet_item(context, subnet, os_vpnservice['id'])
|
||||
cleaner.addCleanup(_clear_vpnservice_in_subnet_item,
|
||||
context, subnet)
|
||||
|
||||
|
||||
def _delete_subnet_vpnservice(context, neutron, cleaner, subnet):
|
||||
os_vpnservice_id = subnet['os_vpnservice_id']
|
||||
_clear_vpnservice_in_subnet_item(context, subnet)
|
||||
cleaner.addCleanup(_set_vpnservice_in_subnet_item,
|
||||
context, subnet, os_vpnservice_id)
|
||||
_safe_delete_vpnservice(neutron, os_vpnservice_id, subnet['id'])
|
||||
|
||||
|
||||
def _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet_id):
|
||||
try:
|
||||
neutron.delete_vpnservice(os_vpnservice_id)
|
||||
except neutron_exception.NotFound:
|
||||
pass
|
||||
except neutron_exception.Conflict as ex:
|
||||
LOG.warning(
|
||||
'Failed to delete vpnservice %(os_id)s for subnet %(id)s. '
|
||||
'Reason: %(reason)s',
|
||||
{'id': subnet_id,
|
||||
'os_id': os_vpnservice_id,
|
||||
'reason': ex.message})
|
||||
|
||||
|
||||
def _attach_vpn_gateway_item(context, vpn_gateway, vpc_id):
|
||||
vpn_gateway['vpc_id'] = vpc_id
|
||||
db_api.update_item(context, vpn_gateway)
|
||||
|
||||
|
||||
def _detach_vpn_gateway_item(context, vpn_gateway):
|
||||
vpn_gateway['vpc_id'] = None
|
||||
db_api.update_item(context, vpn_gateway)
|
||||
|
||||
|
||||
def _set_vpnservice_in_subnet_item(context, subnet, os_vpnservice_id):
|
||||
subnet['os_vpnservice_id'] = os_vpnservice_id
|
||||
db_api.update_item(context, subnet)
|
||||
|
||||
|
||||
def _clear_vpnservice_in_subnet_item(context, subnet):
|
||||
del subnet['os_vpnservice_id']
|
||||
db_api.update_item(context, subnet)
|
@ -1,163 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from cinderclient import client as cinderclient
|
||||
from glanceclient import client as glanceclient
|
||||
from keystoneauth1 import loading as ks_loading
|
||||
from keystoneclient import client as keystoneclient
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
from novaclient import api_versions as nova_api_versions
|
||||
from novaclient import client as novaclient
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ec2_opts = [
|
||||
cfg.StrOpt('nova_service_type',
|
||||
default='compute',
|
||||
help='Service type of Compute API, registered in Keystone '
|
||||
'catalog. Should be v2.1 with microversion support. '
|
||||
'If it is obsolete v2, a lot of useful EC2 compliant '
|
||||
'instance properties will be unavailable.'),
|
||||
cfg.StrOpt('cinder_service_type',
|
||||
default='volumev3',
|
||||
help='Service type of Volume API, registered in Keystone '
|
||||
'catalog.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ec2_opts)
|
||||
|
||||
GROUP_AUTHTOKEN = 'keystone_authtoken'
|
||||
ks_loading.register_session_conf_options(CONF, GROUP_AUTHTOKEN)
|
||||
ks_loading.register_auth_conf_options(CONF, GROUP_AUTHTOKEN)
|
||||
|
||||
|
||||
# Nova API version with microversions support
|
||||
REQUIRED_NOVA_API_VERSION = '2.1'
|
||||
REQUIRED_NOVA_API_VERSION_ID = 'v%s' % REQUIRED_NOVA_API_VERSION
|
||||
LEGACY_NOVA_API_VERSION = '2'
|
||||
# Nova API's 2.3 microversion provides additional EC2 compliant instance
|
||||
# properties
|
||||
# Nova API's 2.10 microversion provides admin access to users keypairs,
|
||||
# which allows metadata service to expose openssh part of an instance key
|
||||
# Nova API's 2.32 microversion allows 'tag' field of bdm v2, which may be
|
||||
# contained in image bdms, defined by users or autocreated with instance
|
||||
# snapshot
|
||||
REQUIRED_NOVA_API_MICROVERSION = '2.32'
|
||||
_nova_api_version = None
|
||||
|
||||
|
||||
def nova(context):
|
||||
global _nova_api_version
|
||||
if not _nova_api_version:
|
||||
_nova_api_version = _get_nova_api_version(context)
|
||||
clnt = novaclient.Client(_nova_api_version,
|
||||
session=context.session,
|
||||
service_type=CONF.nova_service_type)
|
||||
# NOTE(ft): workaround for LP #1494116 bug
|
||||
if not hasattr(clnt.client, 'last_request_id'):
|
||||
setattr(clnt.client, 'last_request_id', None)
|
||||
return clnt
|
||||
|
||||
|
||||
def neutron(context):
|
||||
return neutronclient.Client(session=context.session,
|
||||
service_type='network')
|
||||
|
||||
|
||||
def glance(context):
|
||||
return glanceclient.Client(version='2', service_type='image',
|
||||
session=context.session)
|
||||
|
||||
|
||||
def cinder(context):
|
||||
url = context.session.get_endpoint(service_type=CONF.cinder_service_type)
|
||||
# TODO(jamielennox): This should be using proper version discovery from
|
||||
# the cinder service rather than just inspecting the URL for certain string
|
||||
# values.
|
||||
version = cinderclient.get_volume_api_from_url(url)
|
||||
return cinderclient.Client(version, session=context.session,
|
||||
service_type=CONF.cinder_service_type)
|
||||
|
||||
|
||||
def keystone(context):
|
||||
url = context.session.get_endpoint(service_type='identity')
|
||||
return keystoneclient.Client(auth_url=url,
|
||||
session=context.session)
|
||||
|
||||
|
||||
def _get_nova_api_version(context):
|
||||
client = novaclient.Client(REQUIRED_NOVA_API_VERSION,
|
||||
session=context.session,
|
||||
service_type=CONF.nova_service_type)
|
||||
|
||||
required = nova_api_versions.APIVersion(REQUIRED_NOVA_API_MICROVERSION)
|
||||
current = client.versions.get_current()
|
||||
if not current:
|
||||
logger.warning(
|
||||
'Could not check Nova API version because no version '
|
||||
'was found in Nova version list for url %(url)s of service '
|
||||
'type "%(service_type)s". '
|
||||
'Use v%(required_api_version)s Nova API.',
|
||||
{'url': client.client.get_endpoint(),
|
||||
'service_type': CONF.nova_service_type,
|
||||
'required_api_version': REQUIRED_NOVA_API_MICROVERSION})
|
||||
return REQUIRED_NOVA_API_MICROVERSION
|
||||
if current.id != REQUIRED_NOVA_API_VERSION_ID:
|
||||
logger.warning(
|
||||
'Specified "%s" Nova service type does not support v2.1 API. '
|
||||
'A lot of useful EC2 compliant instance properties '
|
||||
'will be unavailable.', CONF.nova_service_type)
|
||||
return LEGACY_NOVA_API_VERSION
|
||||
if (nova_api_versions.APIVersion(current.version) < required):
|
||||
logger.warning(
|
||||
'Nova support v%(nova_api_version)s, '
|
||||
'but v%(required_api_version)s is required. '
|
||||
'A lot of useful EC2 compliant instance properties '
|
||||
'will be unavailable.',
|
||||
{'nova_api_version': current.version,
|
||||
'required_api_version': REQUIRED_NOVA_API_MICROVERSION})
|
||||
return current.version
|
||||
logger.info('Provided Nova API version is v%(nova_api_version)s, '
|
||||
'used one is v%(required_api_version)s',
|
||||
{'nova_api_version': current.version,
|
||||
'required_api_version': (
|
||||
REQUIRED_NOVA_API_MICROVERSION)})
|
||||
return REQUIRED_NOVA_API_MICROVERSION
|
||||
|
||||
|
||||
_admin_session = None
|
||||
|
||||
|
||||
def get_os_admin_session():
|
||||
"""Create a context to interact with OpenStack as an administrator."""
|
||||
# NOTE(ft): this is a singletone because keystone's session looks thread
|
||||
# safe for both regular and token renewal requests
|
||||
global _admin_session
|
||||
if not _admin_session:
|
||||
auth_plugin = ks_loading.load_auth_from_conf_options(
|
||||
CONF, GROUP_AUTHTOKEN)
|
||||
_admin_session = ks_loading.load_session_from_conf_options(
|
||||
CONF, GROUP_AUTHTOKEN, auth=auth_plugin)
|
||||
|
||||
return _admin_session
|
||||
|
||||
|
||||
def update_request_params_with_ssl(params):
|
||||
verify = (CONF[GROUP_AUTHTOKEN].cafile or
|
||||
not CONF[GROUP_AUTHTOKEN].insecure)
|
||||
if verify is not True:
|
||||
params['verify'] = verify
|
@ -1,17 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import eventlet
|
||||
|
||||
eventlet.monkey_patch(os=False)
|
@ -1,40 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
EC2api API Server
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api import config
|
||||
from ec2api import service
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
logging.setup(CONF, 'ec2api')
|
||||
|
||||
server = service.WSGIService('ec2api', max_url_len=16384)
|
||||
service.serve(server, workers=server.workers)
|
||||
service.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,40 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
EC2api API Metadata Server
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api import config
|
||||
from ec2api import service
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
logging.setup(CONF, "ec2api")
|
||||
|
||||
server = service.WSGIService('metadata')
|
||||
service.serve(server, workers=server.workers)
|
||||
service.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,39 +0,0 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Daemon for ec2api objectstore. Supports S3 API."""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api import config
|
||||
from ec2api.s3 import s3server
|
||||
from ec2api import service
|
||||
|
||||
|
||||
def main():
|
||||
config.parse_args(sys.argv)
|
||||
logging.setup(config.CONF, "ec2api")
|
||||
|
||||
server = s3server.get_wsgi_server()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,69 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
CLI interface for EC2 API management.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ec2api import config
|
||||
from ec2api.db import migration
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def do_db_version():
|
||||
"""Print database's current migration level."""
|
||||
print(migration.db_version())
|
||||
|
||||
|
||||
def do_db_sync():
|
||||
"""Place a database under migration control and upgrade,
|
||||
|
||||
creating if necessary.
|
||||
"""
|
||||
migration.db_sync(CONF.command.version)
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
parser = subparsers.add_parser('db_version')
|
||||
parser.set_defaults(func=do_db_version)
|
||||
|
||||
parser = subparsers.add_parser('db_sync')
|
||||
parser.set_defaults(func=do_db_sync)
|
||||
parser.add_argument('version', nargs='?')
|
||||
parser.add_argument('current_version', nargs='?')
|
||||
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Commands',
|
||||
help='Available commands',
|
||||
handler=add_command_parsers)
|
||||
|
||||
|
||||
def main():
|
||||
CONF.register_cli_opt(command_opt)
|
||||
config.parse_args(sys.argv)
|
||||
log.setup(CONF, "ec2api")
|
||||
|
||||
try:
|
||||
CONF.command.func()
|
||||
except Exception as e:
|
||||
sys.exit("ERROR: %s" % e)
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import options
|
||||
from oslo_log import log
|
||||
|
||||
from ec2api import paths
|
||||
from ec2api import version
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite')
|
||||
|
||||
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
||||
'sqlalchemy=WARN', 'suds=INFO',
|
||||
'iso8601=WARN',
|
||||
'requests.packages.urllib3.connectionpool=WARN',
|
||||
'urllib3.connectionpool=WARN', 'websocket=WARN',
|
||||
'keystonemiddleware=WARN', 'routes.middleware=WARN',
|
||||
'stevedore=WARN', 'keystoneclient.auth=WARN']
|
||||
|
||||
|
||||
def parse_args(argv, default_config_files=None):
|
||||
log.set_defaults(default_log_levels=_DEFAULT_LOG_LEVELS)
|
||||
log.register_options(CONF)
|
||||
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION)
|
||||
|
||||
cfg.CONF(argv[1:],
|
||||
project='ec2api',
|
||||
version=version.version_info.version_string(),
|
||||
default_config_files=default_config_files)
|
@ -1,144 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""RequestContext: context for requests that persist through all of ec2."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from ec2api import clients
|
||||
from ec2api import exception
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
"""Security context and request information.
|
||||
|
||||
Represents the user taking a given action within the system.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, user_id, project_id, request_id=None,
|
||||
is_admin=None, remote_address=None,
|
||||
auth_token=None, user_name=None, project_name=None,
|
||||
overwrite=True, service_catalog=None, api_version=None,
|
||||
is_os_admin=None, **kwargs):
|
||||
"""Parameters
|
||||
|
||||
:param overwrite: Set to False to ensure that the greenthread local
|
||||
copy of the index is not overwritten.
|
||||
|
||||
|
||||
:param kwargs: Extra arguments that might be present, but we ignore
|
||||
because they possibly came in from older rpc messages.
|
||||
"""
|
||||
user = kwargs.pop('user', None)
|
||||
tenant = kwargs.pop('tenant', None)
|
||||
super(RequestContext, self).__init__(
|
||||
auth_token=auth_token,
|
||||
user=user_id or user,
|
||||
project_id=project_id or tenant,
|
||||
is_admin=is_admin,
|
||||
request_id=request_id,
|
||||
resource_uuid=kwargs.pop('resource_uuid', None),
|
||||
overwrite=overwrite)
|
||||
# oslo_context's RequestContext.to_dict() generates this field, we can
|
||||
# safely ignore this as we don't use it.
|
||||
kwargs.pop('user_identity', None)
|
||||
self.session = kwargs.pop('session', None)
|
||||
if kwargs:
|
||||
LOG.warning('Arguments dropped when creating context: %s',
|
||||
str(kwargs))
|
||||
|
||||
self.user_id = user_id
|
||||
self.project_id = project_id
|
||||
self.remote_address = remote_address
|
||||
timestamp = timeutils.utcnow()
|
||||
if isinstance(timestamp, str):
|
||||
timestamp = timeutils.parse_strtime(timestamp)
|
||||
self.timestamp = timestamp
|
||||
|
||||
self.service_catalog = service_catalog
|
||||
if self.service_catalog is None:
|
||||
# if list is empty or none
|
||||
self.service_catalog = []
|
||||
|
||||
self.user_name = user_name
|
||||
self.project_name = project_name
|
||||
self.is_admin = is_admin
|
||||
# TODO(ft): call policy.check_is_admin if is_admin is None
|
||||
self.is_os_admin = is_os_admin
|
||||
self.api_version = api_version
|
||||
|
||||
def to_dict(self):
|
||||
values = super(RequestContext, self).to_dict()
|
||||
# FIXME(dims): defensive hasattr() checks need to be
|
||||
# removed once we figure out why we are seeing stack
|
||||
# traces
|
||||
values.update({
|
||||
'user_id': getattr(self, 'user_id', None),
|
||||
'project_id': getattr(self, 'project_id', None),
|
||||
'is_admin': getattr(self, 'is_admin', None),
|
||||
'remote_address': getattr(self, 'remote_address', None),
|
||||
'timestamp': self.timestamp.strftime(
|
||||
timeutils.PERFECT_TIME_FORMAT) if hasattr(
|
||||
self, 'timestamp') else None,
|
||||
'request_id': getattr(self, 'request_id', None),
|
||||
'quota_class': getattr(self, 'quota_class', None),
|
||||
'user_name': getattr(self, 'user_name', None),
|
||||
'service_catalog': getattr(self, 'service_catalog', None),
|
||||
'project_name': getattr(self, 'project_name', None),
|
||||
'is_os_admin': getattr(self, 'is_os_admin', None),
|
||||
'api_version': getattr(self, 'api_version', None),
|
||||
})
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
|
||||
def is_user_context(context):
|
||||
"""Indicates if the request context is a normal user."""
|
||||
if not context:
|
||||
return False
|
||||
if context.is_os_admin:
|
||||
return False
|
||||
if not context.user_id or not context.project_id:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def require_context(ctxt):
|
||||
"""Raise exception.AuthFailure()
|
||||
|
||||
if context is not a user or an admin context.
|
||||
"""
|
||||
if not ctxt.is_os_admin and not is_user_context(ctxt):
|
||||
raise exception.AuthFailure()
|
||||
|
||||
|
||||
def get_os_admin_context():
|
||||
"""Create a context to interact with OpenStack as an administrator."""
|
||||
admin_session = clients.get_os_admin_session()
|
||||
return RequestContext(
|
||||
None, None,
|
||||
session=admin_session,
|
||||
is_os_admin=True,
|
||||
overwrite=False)
|
@ -1,19 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
DB abstraction for EC2api
|
||||
"""
|
||||
|
||||
from ec2api.db.api import * # noqa: F401
|
132
ec2api/db/api.py
132
ec2api/db/api.py
@ -1,132 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Defines interface for DB access.
|
||||
|
||||
Functions in this module are imported into the ec2api.db namespace. Call these
|
||||
functions from ec2api.db namespace, not the ec2api.db.api namespace.
|
||||
|
||||
**Related Flags**
|
||||
|
||||
:dbackend: string to lookup in the list of LazyPluggable backends.
|
||||
`sqlalchemy` is the only supported backend right now.
|
||||
|
||||
:connection: string specifying the sqlalchemy connection to use, like:
|
||||
`sqlite:///var/lib/ec2api/ec2api.sqlite`.
|
||||
|
||||
"""
|
||||
|
||||
from eventlet import tpool
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api as db_api
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
tpool_opts = [
|
||||
cfg.BoolOpt('use_tpool',
|
||||
default=False,
|
||||
deprecated_name='dbapi_use_tpool',
|
||||
deprecated_group='DEFAULT',
|
||||
help='Enable the experimental use of thread pooling for '
|
||||
'all DB API calls'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(tpool_opts, 'database')
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'ec2api.db.sqlalchemy.api'}
|
||||
|
||||
|
||||
class EC2DBAPI(object):
|
||||
"""ec2's DB API wrapper class.
|
||||
|
||||
This wraps the oslo DB API with an option to be able to use eventlet's
|
||||
thread pooling. Since the CONF variable may not be loaded at the time
|
||||
this class is instantiated, we must look at it on the first DB API call.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.__db_api = None
|
||||
|
||||
@property
|
||||
def _db_api(self):
|
||||
if not self.__db_api:
|
||||
ec2_db_api = db_api.DBAPI(CONF.database.backend,
|
||||
backend_mapping=_BACKEND_MAPPING)
|
||||
if CONF.database.use_tpool:
|
||||
self.__db_api = tpool.Proxy(ec2_db_api)
|
||||
else:
|
||||
self.__db_api = ec2_db_api
|
||||
return self.__db_api
|
||||
|
||||
def __getattr__(self, key):
|
||||
return getattr(self._db_api, key)
|
||||
|
||||
|
||||
IMPL = EC2DBAPI()
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_item(context, kind, data):
|
||||
return IMPL.add_item(context, kind, data)
|
||||
|
||||
|
||||
def add_item_id(context, kind, os_id, project_id=None):
|
||||
return IMPL.add_item_id(context, kind, os_id, project_id)
|
||||
|
||||
|
||||
def update_item(context, item):
|
||||
IMPL.update_item(context, item)
|
||||
|
||||
|
||||
def delete_item(context, item_id):
|
||||
IMPL.delete_item(context, item_id)
|
||||
|
||||
|
||||
def restore_item(context, kind, data):
|
||||
return IMPL.restore_item(context, kind, data)
|
||||
|
||||
|
||||
def get_items(context, kind):
|
||||
return IMPL.get_items(context, kind)
|
||||
|
||||
|
||||
def get_item_by_id(context, item_id):
|
||||
return IMPL.get_item_by_id(context, item_id)
|
||||
|
||||
|
||||
def get_items_by_ids(context, item_ids):
|
||||
return IMPL.get_items_by_ids(context, item_ids)
|
||||
|
||||
|
||||
def get_public_items(context, kind, item_ids=None):
|
||||
return IMPL.get_public_items(context, kind, item_ids)
|
||||
|
||||
|
||||
def get_items_ids(context, kind, item_ids=None, item_os_ids=None):
|
||||
return IMPL.get_items_ids(context, kind, item_ids=item_ids,
|
||||
item_os_ids=item_os_ids)
|
||||
|
||||
|
||||
def add_tags(context, tags):
|
||||
return IMPL.add_tags(context, tags)
|
||||
|
||||
|
||||
def delete_tags(context, item_ids, tag_pairs=None):
|
||||
return IMPL.delete_tags(context, item_ids, tag_pairs)
|
||||
|
||||
|
||||
def get_tags(context, kinds=None, item_ids=None):
|
||||
return IMPL.get_tags(context, kinds, item_ids)
|
@ -1,77 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Database setup and migration commands."""
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class LazyPluggable(object):
|
||||
"""A pluggable backend loaded lazily based on some value."""
|
||||
|
||||
def __init__(self, pivot, config_group=None, **backends):
|
||||
self.__backends = backends
|
||||
self.__pivot = pivot
|
||||
self.__backend = None
|
||||
self.__config_group = config_group
|
||||
|
||||
def __get_backend(self):
|
||||
if not self.__backend:
|
||||
if self.__config_group is None:
|
||||
backend_name = CONF[self.__pivot]
|
||||
else:
|
||||
backend_name = CONF[self.__config_group][self.__pivot]
|
||||
if backend_name not in self.__backends:
|
||||
msg = _('Invalid backend: %s') % backend_name
|
||||
raise exception.EC2Exception(msg)
|
||||
|
||||
backend = self.__backends[backend_name]
|
||||
if isinstance(backend, tuple):
|
||||
name = backend[0]
|
||||
fromlist = backend[1]
|
||||
else:
|
||||
name = backend
|
||||
fromlist = backend
|
||||
|
||||
self.__backend = __import__(name, None, None, fromlist)
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__get_backend()
|
||||
return getattr(backend, key)
|
||||
|
||||
|
||||
IMPL = LazyPluggable('backend',
|
||||
config_group='database',
|
||||
sqlalchemy='ec2api.db.sqlalchemy.migration')
|
||||
|
||||
|
||||
def db_sync(version=None):
|
||||
"""Migrate the database to `version` or the most recent version."""
|
||||
return IMPL.db_sync(version=version)
|
||||
|
||||
|
||||
def db_version():
|
||||
"""Display the current database version."""
|
||||
return IMPL.db_version()
|
||||
|
||||
|
||||
def db_initial_version():
|
||||
"""The starting version for the database."""
|
||||
return IMPL.db_initial_version()
|
@ -1,337 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of SQLAlchemy backend."""
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import random
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exception
|
||||
from oslo_db.sqlalchemy import session as db_session
|
||||
from oslo_serialization import jsonutils
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.sql import bindparam
|
||||
|
||||
import ec2api.context
|
||||
from ec2api.db.sqlalchemy import models
|
||||
from ec2api import exception
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
_MASTER_FACADE = None
|
||||
|
||||
|
||||
def _create_facade_lazily():
|
||||
global _MASTER_FACADE
|
||||
|
||||
if _MASTER_FACADE is None:
|
||||
# FIXME(priteau): Remove autocommit=True (and ideally use of
|
||||
# LegacyEngineFacade) asap since it's not compatible with SQLAlchemy
|
||||
# 2.0.
|
||||
_MASTER_FACADE = db_session.EngineFacade.from_config(CONF,
|
||||
autocommit=True)
|
||||
return _MASTER_FACADE
|
||||
|
||||
|
||||
def get_engine():
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_engine()
|
||||
|
||||
|
||||
def get_session(**kwargs):
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_session(**kwargs)
|
||||
|
||||
|
||||
def get_backend():
|
||||
"""The backend is this module itself."""
|
||||
return sys.modules[__name__]
|
||||
|
||||
|
||||
def require_context(f):
|
||||
"""Decorator to require *any* user or admin context.
|
||||
|
||||
The first argument to the wrapped function must be the context.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
ec2api.context.require_context(args[0])
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def model_query(context, model, *args, **kwargs):
|
||||
"""Query helper that accounts for context's `read_deleted` field.
|
||||
|
||||
:param context: context to query under
|
||||
:param session: if present, the session to use
|
||||
"""
|
||||
session = kwargs.get('session') or get_session()
|
||||
|
||||
return session.query(model, *args)
|
||||
|
||||
|
||||
def _new_id(kind):
|
||||
obj_id = "%(kind)s-%(id)08x" % {"kind": kind,
|
||||
"id": random.randint(1, 0xffffffff)}
|
||||
return obj_id
|
||||
|
||||
|
||||
@require_context
|
||||
def add_item(context, kind, data):
|
||||
item_ref = models.Item()
|
||||
item_ref.update({
|
||||
"project_id": context.project_id,
|
||||
"id": _new_id(kind),
|
||||
})
|
||||
item_ref.update(_pack_item_data(data))
|
||||
try:
|
||||
item_ref.save()
|
||||
except db_exception.DBDuplicateEntry as ex:
|
||||
if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and
|
||||
'os_id' not in ex.columns):
|
||||
raise
|
||||
item_ref = (model_query(context, models.Item).
|
||||
filter_by(os_id=data["os_id"]).
|
||||
filter(or_(models.Item.project_id == context.project_id,
|
||||
models.Item.project_id.is_(None))).
|
||||
filter(models.Item.id.like('%s-%%' % kind)).
|
||||
one())
|
||||
item_data = _unpack_item_data(item_ref)
|
||||
item_data.update(data)
|
||||
item_ref.update(_pack_item_data(item_data))
|
||||
item_ref.project_id = context.project_id
|
||||
item_ref.save()
|
||||
return _unpack_item_data(item_ref)
|
||||
|
||||
|
||||
@require_context
|
||||
def add_item_id(context, kind, os_id, project_id=None):
|
||||
item_ref = models.Item()
|
||||
item_ref.update({
|
||||
"id": _new_id(kind),
|
||||
"os_id": os_id,
|
||||
})
|
||||
if project_id:
|
||||
item_ref.project_id = project_id
|
||||
try:
|
||||
item_ref.save()
|
||||
except db_exception.DBDuplicateEntry as ex:
|
||||
if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and
|
||||
ex.columns != ['os_id']):
|
||||
raise
|
||||
item_ref = (model_query(context, models.Item).
|
||||
filter_by(os_id=os_id).
|
||||
one())
|
||||
return item_ref.id
|
||||
|
||||
|
||||
@require_context
|
||||
def update_item(context, item):
|
||||
item_ref = (model_query(context, models.Item).
|
||||
filter_by(project_id=context.project_id,
|
||||
id=item['id']).
|
||||
one())
|
||||
if item_ref.os_id and item_ref.os_id != item['os_id']:
|
||||
raise exception.EC2DBInvalidOsIdUpdate(item_id=item['id'],
|
||||
old_os_id=item_ref.os_id,
|
||||
new_os_id=item['os_id'])
|
||||
item_ref.update(_pack_item_data(item))
|
||||
item_ref.save()
|
||||
return _unpack_item_data(item_ref)
|
||||
|
||||
|
||||
@require_context
|
||||
def delete_item(context, item_id):
|
||||
session = get_session()
|
||||
deleted_count = (model_query(context, models.Item, session=session).
|
||||
filter_by(project_id=context.project_id,
|
||||
id=item_id).
|
||||
delete(synchronize_session=False))
|
||||
if not deleted_count:
|
||||
return
|
||||
try:
|
||||
(model_query(context, models.Tag, session=session).
|
||||
filter_by(project_id=context.project_id,
|
||||
item_id=item_id).
|
||||
delete(synchronize_session=False))
|
||||
except Exception:
|
||||
# NOTE(ft): ignore all exceptions because DB integrity is insignificant
|
||||
# for tags
|
||||
pass
|
||||
|
||||
|
||||
@require_context
|
||||
def restore_item(context, kind, data):
|
||||
try:
|
||||
item_ref = models.Item()
|
||||
item_ref.update({
|
||||
"project_id": context.project_id,
|
||||
})
|
||||
item_ref.id = data['id']
|
||||
item_ref.update(_pack_item_data(data))
|
||||
item_ref.save()
|
||||
return _unpack_item_data(item_ref)
|
||||
except db_exception.DBDuplicateEntry:
|
||||
raise exception.EC2DBDuplicateEntry(id=data['id'])
|
||||
|
||||
|
||||
@require_context
|
||||
def get_items(context, kind):
|
||||
return [_unpack_item_data(item)
|
||||
for item in (model_query(context, models.Item).
|
||||
filter_by(project_id=context.project_id).
|
||||
filter(models.Item.id.like('%s-%%' % kind)).
|
||||
all())]
|
||||
|
||||
|
||||
@require_context
|
||||
def get_item_by_id(context, item_id):
|
||||
return (_unpack_item_data(model_query(context, models.Item).
|
||||
filter_by(project_id=context.project_id,
|
||||
id=item_id).
|
||||
first()))
|
||||
|
||||
|
||||
@require_context
|
||||
def get_items_by_ids(context, item_ids):
|
||||
if not item_ids:
|
||||
return []
|
||||
return [_unpack_item_data(item)
|
||||
for item in (model_query(context, models.Item).
|
||||
filter_by(project_id=context.project_id).
|
||||
filter(models.Item.id.in_(item_ids)).
|
||||
all())]
|
||||
|
||||
|
||||
@require_context
|
||||
def get_public_items(context, kind, item_ids=None):
|
||||
query = (model_query(context, models.Item).
|
||||
filter(models.Item.id.like('%s-%%' % kind)).
|
||||
filter(models.Item.data.like('%"is_public": True%')))
|
||||
if item_ids:
|
||||
query = query.filter(models.Item.id.in_(item_ids))
|
||||
return [_unpack_item_data(item)
|
||||
for item in query.all()]
|
||||
|
||||
|
||||
@require_context
|
||||
def get_items_ids(context, kind, item_ids=None, item_os_ids=None):
|
||||
query = (model_query(context, models.Item).
|
||||
filter(models.Item.id.like('%s-%%' % kind)))
|
||||
if item_ids:
|
||||
query = query.filter(models.Item.id.in_(item_ids))
|
||||
if item_os_ids:
|
||||
query = query.filter(models.Item.os_id.in_(item_os_ids))
|
||||
return [(item['id'], item['os_id'])
|
||||
for item in query.all()]
|
||||
|
||||
|
||||
@require_context
|
||||
def add_tags(context, tags):
|
||||
session = get_session()
|
||||
get_query = (model_query(context, models.Tag, session=session).
|
||||
filter_by(project_id=context.project_id,
|
||||
# NOTE(ft): item_id param name is reserved for
|
||||
# sqlalchemy internal use
|
||||
item_id=bindparam('tag_item_id'),
|
||||
key=bindparam('tag_key')))
|
||||
with session.begin():
|
||||
for tag in tags:
|
||||
tag_ref = models.Tag(project_id=context.project_id,
|
||||
item_id=tag['item_id'],
|
||||
key=tag['key'],
|
||||
value=tag['value'])
|
||||
try:
|
||||
with session.begin(nested=True):
|
||||
tag_ref.save(session)
|
||||
except db_exception.DBDuplicateEntry as ex:
|
||||
if ('PRIMARY' not in ex.columns and
|
||||
ex.columns != ['project_id', 'item_id', 'key']):
|
||||
raise
|
||||
(get_query.params(tag_item_id=tag['item_id'],
|
||||
tag_key=tag['key']).
|
||||
update({'value': tag['value']}))
|
||||
|
||||
|
||||
@require_context
|
||||
def delete_tags(context, item_ids, tag_pairs=None):
|
||||
if not item_ids:
|
||||
return
|
||||
|
||||
query = (model_query(context, models.Tag).
|
||||
filter_by(project_id=context.project_id).
|
||||
filter(models.Tag.item_id.in_(item_ids)))
|
||||
|
||||
if tag_pairs:
|
||||
tag_fltr = None
|
||||
for tag_pair in tag_pairs:
|
||||
pair_fltr = None
|
||||
for col in ('key', 'value'):
|
||||
if col in tag_pair:
|
||||
expr = getattr(models.Tag, col) == tag_pair[col]
|
||||
pair_fltr = (expr if pair_fltr is None else
|
||||
and_(pair_fltr, expr))
|
||||
if pair_fltr is not None:
|
||||
tag_fltr = (pair_fltr if tag_fltr is None else
|
||||
or_(tag_fltr, pair_fltr))
|
||||
if tag_fltr is not None:
|
||||
query = query.filter(tag_fltr)
|
||||
|
||||
query.delete(synchronize_session=False)
|
||||
|
||||
|
||||
@require_context
|
||||
def get_tags(context, kinds=None, item_ids=None):
|
||||
query = (model_query(context, models.Tag).
|
||||
filter_by(project_id=context.project_id))
|
||||
if kinds:
|
||||
fltr = None
|
||||
for kind in kinds:
|
||||
expr = models.Tag.item_id.like('%s-%%' % kind)
|
||||
fltr = expr if fltr is None else or_(fltr, expr)
|
||||
query = query.filter(fltr)
|
||||
if item_ids:
|
||||
query = query.filter(models.Tag.item_id.in_(item_ids))
|
||||
return [dict(item_id=tag.item_id,
|
||||
key=tag.key,
|
||||
value=tag.value)
|
||||
for tag in query.all()]
|
||||
|
||||
|
||||
def _pack_item_data(item_data):
|
||||
data = copy.deepcopy(item_data)
|
||||
data.pop("id", None)
|
||||
return {
|
||||
"os_id": data.pop("os_id", None),
|
||||
"vpc_id": data.pop("vpc_id", None),
|
||||
"data": jsonutils.dumps(data),
|
||||
}
|
||||
|
||||
|
||||
def _unpack_item_data(item_ref):
|
||||
if item_ref is None:
|
||||
return None
|
||||
data = item_ref.data
|
||||
data = jsonutils.loads(data) if data is not None else {}
|
||||
data["id"] = item_ref.id
|
||||
data["os_id"] = item_ref.os_id
|
||||
data["vpc_id"] = item_ref.vpc_id
|
||||
return data
|
@ -1,4 +0,0 @@
|
||||
This is a database migration repository.
|
||||
|
||||
More information at
|
||||
https://github.com/openstack/sqlalchemy-migrate
|
@ -1,19 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from migrate.versioning.shell import main
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(debug='False', repository='.')
|
@ -1,20 +0,0 @@
|
||||
[db_settings]
|
||||
# Used to identify which repository this database is versioned under.
|
||||
# You can use the name of your project.
|
||||
repository_id=ec2api
|
||||
|
||||
# The name of the database table used to track the schema version.
|
||||
# This name shouldn't already be used by your project.
|
||||
# If this is changed once a database is under version control, you'll need to
|
||||
# change the table name in each database too.
|
||||
version_table=migrate_version
|
||||
|
||||
# When committing a change script, Migrate will attempt to generate the
|
||||
# sql for all supported databases; normally, if one of them fails - probably
|
||||
# because you don't have that database installed - it is ignored and the
|
||||
# commit continues, perhaps ending successfully.
|
||||
# Databases in this list MUST compile successfully during a commit, or the
|
||||
# entire commit will fail. List the databases your application will actually
|
||||
# be using to ensure your updates to that database work properly.
|
||||
# This must be a list; example: ['postgres','sqlite']
|
||||
required_dbs=[]
|
@ -1,59 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Column, MetaData
|
||||
from sqlalchemy import PrimaryKeyConstraint, String, Table, Text
|
||||
from sqlalchemy import UniqueConstraint
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
items = Table('items', meta,
|
||||
Column("id", String(length=30)),
|
||||
Column("project_id", String(length=64)),
|
||||
Column("vpc_id", String(length=12)),
|
||||
Column("os_id", String(length=36)),
|
||||
Column("data", Text()),
|
||||
PrimaryKeyConstraint('id'),
|
||||
UniqueConstraint('os_id', name='items_os_id_idx'),
|
||||
mysql_engine="InnoDB",
|
||||
mysql_charset="utf8"
|
||||
)
|
||||
items.create()
|
||||
|
||||
tags = Table('tags', meta,
|
||||
Column("project_id", String(length=64)),
|
||||
Column("item_id", String(length=30)),
|
||||
Column("key", String(length=127)),
|
||||
Column("value", String(length=255)),
|
||||
PrimaryKeyConstraint('project_id', 'item_id', 'key'),
|
||||
mysql_engine="InnoDB",
|
||||
mysql_charset="utf8"
|
||||
)
|
||||
tags.create()
|
||||
|
||||
if migrate_engine.name == "mysql":
|
||||
# In Folsom we explicitly converted migrate_version to UTF8.
|
||||
sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
|
||||
migrate_engine.execute(sql)
|
||||
# Set default DB charset to UTF8.
|
||||
sql = (" ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" %
|
||||
migrate_engine.url.database)
|
||||
migrate_engine.execute(sql)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
raise NotImplementedError("Downgrade from Juno is unsupported.")
|
@ -1,86 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from migrate import exceptions as versioning_exceptions
|
||||
from migrate.versioning import api as versioning_api
|
||||
from migrate.versioning.repository import Repository
|
||||
import sqlalchemy
|
||||
|
||||
from ec2api.db.sqlalchemy import api as db_session
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
|
||||
INIT_VERSION = 0
|
||||
_REPOSITORY = None
|
||||
|
||||
get_engine = db_session.get_engine
|
||||
|
||||
|
||||
def db_sync(version=None):
|
||||
if version is not None:
|
||||
try:
|
||||
version = int(version)
|
||||
except ValueError:
|
||||
raise exception.EC2Exception(_("version should be an integer"))
|
||||
|
||||
current_version = db_version()
|
||||
repository = _find_migrate_repo()
|
||||
if version is None or version > current_version:
|
||||
return versioning_api.upgrade(get_engine(), repository, version)
|
||||
else:
|
||||
return versioning_api.downgrade(get_engine(), repository,
|
||||
version)
|
||||
|
||||
|
||||
def db_version():
|
||||
repository = _find_migrate_repo()
|
||||
try:
|
||||
return versioning_api.db_version(get_engine(), repository)
|
||||
except versioning_exceptions.DatabaseNotControlledError:
|
||||
meta = sqlalchemy.MetaData()
|
||||
engine = get_engine()
|
||||
meta.reflect(bind=engine)
|
||||
tables = meta.tables
|
||||
if len(tables) == 0:
|
||||
db_version_control(INIT_VERSION)
|
||||
return versioning_api.db_version(get_engine(), repository)
|
||||
else:
|
||||
# Some pre-Essex DB's may not be version controlled.
|
||||
# Require them to upgrade using Essex first.
|
||||
raise exception.EC2Exception(
|
||||
_("Upgrade DB using Essex release first."))
|
||||
|
||||
|
||||
def db_initial_version():
|
||||
return INIT_VERSION
|
||||
|
||||
|
||||
def db_version_control(version=None):
|
||||
repository = _find_migrate_repo()
|
||||
versioning_api.version_control(get_engine(), repository, version)
|
||||
return version
|
||||
|
||||
|
||||
def _find_migrate_repo():
|
||||
"""Get the path for the migrate repository."""
|
||||
global _REPOSITORY
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'migrate_repo')
|
||||
assert os.path.exists(path)
|
||||
if _REPOSITORY is None:
|
||||
_REPOSITORY = Repository(path)
|
||||
return _REPOSITORY
|
@ -1,62 +0,0 @@
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
SQLAlchemy models for ec2api data.
|
||||
"""
|
||||
|
||||
from oslo_db.sqlalchemy import models
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import Column, PrimaryKeyConstraint, String, Text
|
||||
from sqlalchemy import UniqueConstraint
|
||||
|
||||
BASE = declarative_base()
|
||||
|
||||
ITEMS_OS_ID_INDEX_NAME = 'items_os_id_idx'
|
||||
|
||||
|
||||
class EC2Base(models.ModelBase):
|
||||
metadata = None
|
||||
|
||||
def save(self, session=None):
|
||||
from ec2api.db.sqlalchemy import api
|
||||
|
||||
if session is None:
|
||||
session = api.get_session()
|
||||
|
||||
super(EC2Base, self).save(session=session)
|
||||
|
||||
|
||||
class Item(BASE, EC2Base):
|
||||
__tablename__ = 'items'
|
||||
__table_args__ = (
|
||||
PrimaryKeyConstraint('id'),
|
||||
UniqueConstraint('os_id', name=ITEMS_OS_ID_INDEX_NAME),
|
||||
)
|
||||
id = Column(String(length=30))
|
||||
project_id = Column(String(length=64))
|
||||
vpc_id = Column(String(length=12))
|
||||
os_id = Column(String(length=36))
|
||||
data = Column(Text())
|
||||
|
||||
|
||||
class Tag(BASE, EC2Base):
|
||||
__tablename__ = 'tags'
|
||||
__table_args__ = (
|
||||
PrimaryKeyConstraint('project_id', 'item_id', 'key'),
|
||||
)
|
||||
project_id = Column(String(length=64))
|
||||
item_id = Column(String(length=30))
|
||||
key = Column(String(length=127))
|
||||
value = Column(String(length=255))
|
@ -1,498 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""ec2api base exception handling.
|
||||
|
||||
Includes decorator for re-raising ec2api-type exceptions.
|
||||
|
||||
SHOULD include dedicated exception logging.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
exc_log_opts = [
|
||||
cfg.BoolOpt('fatal_exception_format_errors',
|
||||
default=False,
|
||||
help='Make exception message format errors fatal'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(exc_log_opts)
|
||||
|
||||
|
||||
class EC2APIException(Exception):
|
||||
"""Base EC2 API Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'msg_fmt' property. That msg_fmt will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
msg_fmt = _('An unknown exception occurred.')
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if not message:
|
||||
try:
|
||||
message = self.msg_fmt % kwargs
|
||||
except Exception as e:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception('Exception in string format operation for '
|
||||
'%s exception', self.__class__.__name__)
|
||||
for name, value in kwargs.items():
|
||||
LOG.error('%s: %s' % (name, value))
|
||||
|
||||
if CONF.fatal_exception_format_errors:
|
||||
raise e
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
message = self.msg_fmt
|
||||
elif not isinstance(message, str):
|
||||
LOG.error("Message '%(msg)s' for %(ex)s exception is not "
|
||||
"a string",
|
||||
{'msg': message, 'ex': self.__class__.__name__})
|
||||
if CONF.fatal_exception_format_errors:
|
||||
raise TypeError(_('Invalid exception message format'))
|
||||
else:
|
||||
message = self.msg_fmt
|
||||
|
||||
super(EC2APIException, self).__init__(message)
|
||||
|
||||
def format_message(self):
|
||||
# NOTE(mrodden): use the first argument to the python Exception object
|
||||
# which should be our full EC2APIException message, (see __init__)
|
||||
return self.args[0]
|
||||
|
||||
|
||||
# Internal ec2api exceptions
|
||||
|
||||
class EC2APIConfigNotFound(EC2APIException):
|
||||
msg_fmt = _("Could not find config at %(path)s")
|
||||
|
||||
|
||||
class EC2APIPasteAppNotFound(EC2APIException):
|
||||
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
|
||||
|
||||
|
||||
class EC2KeystoneDiscoverFailure(EC2APIException):
|
||||
msg_fmt = _("Could not discover keystone versions.")
|
||||
|
||||
|
||||
class EC2DBInvalidOsIdUpdate(EC2APIException):
|
||||
msg_fmt = _('Invalid update of os_id of %(item_id)s item '
|
||||
'from %(old_os_id)s to %(new_os_id)s')
|
||||
|
||||
|
||||
class EC2DBDuplicateEntry(EC2APIException):
|
||||
msg_fmt = _('Entry %(id)s already exists in DB.')
|
||||
|
||||
|
||||
# Internal ec2api metadata exceptions
|
||||
|
||||
class EC2MetadataException(EC2APIException):
|
||||
pass
|
||||
|
||||
|
||||
class EC2MetadataNotFound(EC2MetadataException):
|
||||
pass
|
||||
|
||||
|
||||
class EC2MetadataInvalidAddress(EC2MetadataException):
|
||||
pass
|
||||
|
||||
|
||||
# Intermediate exception classes to organize AWS exception hierarchy
|
||||
|
||||
class EC2Exception(EC2APIException):
|
||||
"""Base EC2 compliant exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'ec2_code' property if a new class name doesn't coincide with
|
||||
AWS Error Code.
|
||||
"""
|
||||
code = 400
|
||||
|
||||
|
||||
class EC2InvalidException(EC2Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EC2IncorrectStateException(EC2Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EC2DuplicateException(EC2InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class EC2InUseException(EC2InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class EC2NotFoundException(EC2InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class EC2OverlimitException(EC2Exception):
|
||||
pass
|
||||
|
||||
|
||||
# AWS compliant exceptions
|
||||
|
||||
class Unsupported(EC2Exception):
|
||||
msg_fmt = _("The specified request is unsupported. %(reason)s")
|
||||
|
||||
|
||||
class UnsupportedOperation(EC2Exception):
|
||||
msg_fmt = _('The specified request includes an unsupported operation.')
|
||||
|
||||
|
||||
class OperationNotPermitted(EC2Exception):
|
||||
msg_fmt = _('The specified operation is not allowed.')
|
||||
|
||||
|
||||
class InvalidRequest(EC2InvalidException):
|
||||
msg_fmt = _('The request received was invalid.')
|
||||
|
||||
|
||||
class InvalidAttribute(EC2InvalidException):
|
||||
msg_fmt = _("Attribute not supported: %(attr)s")
|
||||
|
||||
|
||||
class InvalidID(EC2InvalidException):
|
||||
msg_fmt = _("The ID '%(id)s' is not valid")
|
||||
|
||||
|
||||
class InvalidInput(EC2InvalidException):
|
||||
msg_fmt = _("Invalid input received: %(reason)s")
|
||||
|
||||
|
||||
class AuthFailure(EC2InvalidException):
|
||||
msg_fmt = _('Not authorized.')
|
||||
|
||||
|
||||
class ValidationError(EC2InvalidException):
|
||||
msg_fmt = _("The input fails to satisfy the constraints "
|
||||
"specified by an AWS service: '%(reason)s'")
|
||||
|
||||
|
||||
class MissingInput(EC2InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class MissingParameter(EC2InvalidException):
|
||||
msg_fmt = _("The required parameter '%(param)s' is missing")
|
||||
|
||||
|
||||
class InvalidParameter(EC2InvalidException):
|
||||
msg_fmt = _("The property '%(name)s' is not valid")
|
||||
|
||||
|
||||
class InvalidParameterValue(EC2InvalidException):
|
||||
msg_fmt = _("Value (%(value)s) for parameter %(parameter)s is invalid. "
|
||||
"%(reason)s")
|
||||
|
||||
|
||||
class InvalidFilter(EC2InvalidException):
|
||||
msg_fmt = _('The filter is invalid.')
|
||||
|
||||
|
||||
class InvalidParameterCombination(EC2InvalidException):
|
||||
msg_fmt = _('The combination of parameters in incorrect')
|
||||
|
||||
|
||||
class InvalidVpcRange(EC2InvalidException):
|
||||
ec2_code = 'InvalidVpc.Range'
|
||||
msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.")
|
||||
|
||||
|
||||
class InvalidVpcState(EC2InvalidException):
|
||||
msg_fmt = _('VPC %(vpc_id)s is currently attached to '
|
||||
'the Virtual Private Gateway %(vgw_id)s')
|
||||
|
||||
|
||||
class InvalidSubnetRange(EC2InvalidException):
|
||||
ec2_code = 'InvalidSubnet.Range'
|
||||
msg_fmt = _("The CIDR '%(cidr_block)s' is invalid.")
|
||||
|
||||
|
||||
class InvalidSubnetConflict(EC2InvalidException):
|
||||
ec2_code = 'InvalidSubnet.Conflict'
|
||||
msg_fmt = _("The CIDR '%(cidr_block)s' conflicts with another subnet")
|
||||
|
||||
|
||||
class InvalidInstanceId(EC2InvalidException):
|
||||
ec2_code = 'InvalidInstanceID'
|
||||
msg_fmt = _("There are multiple interfaces attached to instance "
|
||||
"'%(instance_id)s'. Please specify an interface ID for "
|
||||
"the operation instead.")
|
||||
|
||||
|
||||
class InvalidSnapshotIDMalformed(EC2InvalidException):
|
||||
ec2_code = 'InvalidSnapshotID.Malformed'
|
||||
# TODO(ft): Change the message with the real AWS message
|
||||
msg_fmg = _('The snapshot %(id)s ID is not valid')
|
||||
|
||||
|
||||
class InvalidBlockDeviceMapping(EC2InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class IncorrectState(EC2IncorrectStateException):
|
||||
msg_fmt = _("The resource is in incorrect state for the request - reason: "
|
||||
"'%(reason)s'")
|
||||
|
||||
|
||||
class DependencyViolation(EC2IncorrectStateException):
|
||||
msg_fmt = _('Object %(obj1_id)s has dependent resource %(obj2_id)s')
|
||||
|
||||
|
||||
class CannotDelete(EC2IncorrectStateException):
|
||||
msg_fmt = _('Cannot delete the default VPC security group')
|
||||
|
||||
|
||||
class ResourceAlreadyAssociated(EC2IncorrectStateException):
|
||||
ec2_code = 'Resource.AlreadyAssociated'
|
||||
|
||||
|
||||
class GatewayNotAttached(EC2IncorrectStateException):
|
||||
ec2_code = 'Gateway.NotAttached'
|
||||
msg_fmt = _("resource %(gw_id)s is not attached to network %(vpc_id)s")
|
||||
|
||||
|
||||
class IncorrectInstanceState(EC2IncorrectStateException):
|
||||
msg_fmt = _("The instance '%(instance_id)s' is not in a state from which "
|
||||
"the requested operation can be performed.")
|
||||
|
||||
|
||||
class InvalidAMIIDUnavailable(EC2IncorrectStateException):
|
||||
ec2_code = 'InvalidAMIID.Unavailable'
|
||||
# TODO(ft): Change the message with the real AWS message
|
||||
msg_fmt = _("Image %(image_id)s is not active.")
|
||||
|
||||
|
||||
class InvalidNetworkInterfaceInUse(EC2InUseException):
|
||||
ec2_code = 'InvalidNetworkInterface.InUse'
|
||||
msg_fmt = _('Interface: %(interface_ids)s in use.')
|
||||
|
||||
|
||||
class InvalidIPAddressInUse(EC2InUseException):
|
||||
ec2_code = 'InvalidIPAddress.InUse'
|
||||
msg_fmt = _('Address %(ip_address)s is in use.')
|
||||
|
||||
|
||||
class InvalidKeyPairDuplicate(EC2DuplicateException):
|
||||
ec2_code = 'InvalidKeyPair.Duplicate'
|
||||
msg_fmt = _("Key pair '%(key_name)s' already exists.")
|
||||
|
||||
|
||||
class InvalidPermissionDuplicate(EC2DuplicateException):
|
||||
ec2_code = 'InvalidPermission.Duplicate'
|
||||
msg_fmt = _('The specified rule already exists for that security group.')
|
||||
|
||||
|
||||
class InvalidGroupDuplicate(EC2DuplicateException):
|
||||
ec2_code = 'InvalidGroup.Duplicate'
|
||||
msg_fmt = _("Security group '%(name)s' already exists.")
|
||||
|
||||
|
||||
class RouteAlreadyExists(EC2DuplicateException):
|
||||
msg_fmt = _('The route identified by %(destination_cidr_block)s '
|
||||
'already exists.')
|
||||
|
||||
|
||||
class InvalidCustomerGatewayDuplicateIpAddress(EC2DuplicateException):
|
||||
ec2_code = 'InvalidCustomerGateway.DuplicateIpAddress'
|
||||
msg_fmt = _('Conflict among chosen gateway IP addresses.')
|
||||
|
||||
|
||||
class InvalidVpcIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidVpcID.NotFound'
|
||||
msg_fmt = _("The vpc ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidInternetGatewayIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidInternetGatewayID.NotFound'
|
||||
msg_fmt = _("The internetGateway ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidSubnetIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidSubnetID.NotFound'
|
||||
msg_fmt = _("The subnet ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidNetworkInterfaceIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidNetworkInterfaceID.NotFound'
|
||||
msg_fmt = _("Network interface %(id)s could not "
|
||||
"be found.")
|
||||
|
||||
|
||||
class InvalidAttachmentIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAttachmentID.NotFound'
|
||||
msg_fmt = _("Attachment %(id)s could not "
|
||||
"be found.")
|
||||
|
||||
|
||||
class InvalidInstanceIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidInstanceID.NotFound'
|
||||
msg_fmt = _("The instance ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidDhcpOptionsIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidDhcpOptionsID.NotFound'
|
||||
msg_fmt = _("The dhcp options ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidAddressNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAddress.NotFound'
|
||||
msg_fmt = _('The specified elastic IP address %(ip)s cannot be found.')
|
||||
|
||||
|
||||
class InvalidAllocationIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAllocationID.NotFound'
|
||||
msg_fmt = _("The allocation ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidAssociationIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAssociationID.NotFound'
|
||||
msg_fmt = _("The association ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidSecurityGroupIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidSecurityGroupID.NotFound'
|
||||
msg_fmt = _("The securityGroup ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidGroupNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidGroup.NotFound'
|
||||
msg_fmt = _("The security group ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidPermissionNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidPermission.NotFound'
|
||||
msg_fmg = _('The specified permission does not exist')
|
||||
|
||||
|
||||
class InvalidRouteTableIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidRouteTableID.NotFound'
|
||||
msg_fmt = _("The routeTable ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidRouteNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidRoute.NotFound'
|
||||
msg_fmt = _('No route with destination-cidr-block '
|
||||
'%(destination_cidr_block)s in route table %(route_table_id)s')
|
||||
|
||||
|
||||
class InvalidAMIIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAMIID.NotFound'
|
||||
msg_fmt = _("The image id '[%(id)s]' does not exist")
|
||||
|
||||
|
||||
class InvalidVolumeNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidVolume.NotFound'
|
||||
msg_fmt = _("The volume '%(id)s' does not exist.")
|
||||
|
||||
|
||||
class InvalidSnapshotNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidSnapshot.NotFound'
|
||||
msg_fmt = _("Snapshot %(id)s could not be found.")
|
||||
|
||||
|
||||
class InvalidKeypairNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidKeyPair.NotFound'
|
||||
msg_fmt = _("Keypair %(id)s is not found")
|
||||
|
||||
|
||||
class InvalidAvailabilityZoneNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidAvailabilityZone.NotFound'
|
||||
msg_fmt = _("Availability zone %(id)s not found")
|
||||
|
||||
|
||||
class InvalidGatewayIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidGatewayID.NotFound'
|
||||
msg_fmt = _("The gateway ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidVpnGatewayIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidVpnGatewayID.NotFound'
|
||||
msg_fmt = _("The vpnGateway ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidCustomerGatewayIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidCustomerGatewayID.NotFound'
|
||||
msg_fmt = _("The customerGateway ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidVpnConnectionIDNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidVpnConnectionID.NotFound'
|
||||
msg_fmt = _("The vpnConnection ID '%(id)s' does not exist")
|
||||
|
||||
|
||||
class InvalidVpnGatewayAttachmentNotFound(EC2NotFoundException):
|
||||
ec2_code = 'InvalidVpnGatewayAttachment.NotFound'
|
||||
msg_fmt = _("The attachment with vpn gateway ID '%(vgw_id)s' "
|
||||
"and vpc ID '%(vpc_id)s' does not exist")
|
||||
|
||||
|
||||
class ResourceLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('You have reached the limit of %(resource)s')
|
||||
|
||||
|
||||
class VpcLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('The maximum number of VPCs has been reached.')
|
||||
|
||||
|
||||
class SubnetLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('You have reached the limit on the number of subnets that you '
|
||||
'can create')
|
||||
|
||||
|
||||
class InsufficientFreeAddressesInSubnet(EC2OverlimitException):
|
||||
msg_fmt = _('The specified subnet does not have enough free addresses to '
|
||||
'satisfy the request.')
|
||||
|
||||
|
||||
class AddressLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('The maximum number of addresses has been reached.')
|
||||
|
||||
|
||||
class SecurityGroupLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('You have reached the limit of security groups')
|
||||
|
||||
|
||||
class RulesPerSecurityGroupLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _("You've reached the limit on the number of rules that "
|
||||
"you can add to a security group.")
|
||||
|
||||
|
||||
class VpnGatewayAttachmentLimitExceeded(EC2OverlimitException):
|
||||
msg_fmt = _('The maximum number of virtual private gateway attachments '
|
||||
'has been reached.')
|
||||
|
||||
|
||||
class InvalidGroupReserved(EC2InvalidException):
|
||||
ec2_code = 'InvalidGroup.Reserved'
|
||||
msg_fmt = _("The security group '%(group_name)' is reserved.")
|
||||
|
||||
|
||||
class VPCIdNotSpecified(EC2InvalidException):
|
||||
msg_fmt = _("No default VPC for this user.")
|
@ -1,51 +0,0 @@
|
||||
# Copyright (c) 2017 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from hacking import core
|
||||
|
||||
|
||||
_all_log_levels = {'critical', 'error', 'exception', 'info',
|
||||
'warning', 'debug'}
|
||||
|
||||
# Since _Lx() have been removed, we just need to check _()
|
||||
_all_hints = {'_'}
|
||||
|
||||
_log_translation_hint = re.compile(
|
||||
r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % {
|
||||
'levels': '|'.join(_all_log_levels),
|
||||
'hints': '|'.join(_all_hints),
|
||||
})
|
||||
|
||||
|
||||
@core.flake8ext
|
||||
def no_translate_logs(logical_line, filename):
|
||||
"""N537 - Don't translate logs.
|
||||
|
||||
Check for 'LOG.*(_('
|
||||
|
||||
Translators don't provide translations for log messages, and operators
|
||||
asked not to translate them.
|
||||
|
||||
* This check assumes that 'LOG' is a logger.
|
||||
|
||||
:param logical_line: The logical line to check.
|
||||
:param filename: The file name where the logical line exists.
|
||||
:returns: None if the logical line passes the check, otherwise a tuple
|
||||
is yielded that contains the offending index in logical line and a
|
||||
message describe the check validation failure.
|
||||
"""
|
||||
if _log_translation_hint.match(logical_line):
|
||||
yield (0, "N537: Log messages should not be translated!")
|
@ -1,36 +0,0 @@
|
||||
# Copyright 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
|
||||
|
||||
"""
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = 'ec2-api'
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
|
||||
def translate(value, user_locale):
|
||||
return oslo_i18n.translate(value, user_locale)
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
@ -1,284 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import posixpath
|
||||
|
||||
import httplib2
|
||||
from oslo_cache import core as cache_core
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import urllib.parse as urlparse
|
||||
import webob
|
||||
|
||||
from ec2api import context as ec2_context
|
||||
from ec2api import exception
|
||||
from ec2api.i18n import _
|
||||
from ec2api.metadata import api
|
||||
from ec2api import utils
|
||||
from ec2api import wsgi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('use_forwarded_for', 'ec2api.api.auth')
|
||||
|
||||
metadata_opts = [
|
||||
cfg.StrOpt('nova_metadata_ip',
|
||||
default='127.0.0.1',
|
||||
help=_("IP address used by Nova metadata server.")),
|
||||
cfg.IntOpt('nova_metadata_port',
|
||||
default=8775,
|
||||
help=_("TCP Port used by Nova metadata server.")),
|
||||
cfg.StrOpt('nova_metadata_protocol',
|
||||
default='http',
|
||||
choices=['http', 'https'],
|
||||
help=_("Protocol to access nova metadata, http or https")),
|
||||
cfg.BoolOpt('nova_metadata_insecure',
|
||||
default=False,
|
||||
help=_("Allow to perform insecure SSL (https) requests to "
|
||||
"nova metadata")),
|
||||
cfg.StrOpt('auth_ca_cert',
|
||||
help=_("Certificate Authority public key (CA cert) "
|
||||
"file for ssl")),
|
||||
cfg.StrOpt('nova_client_cert',
|
||||
default='',
|
||||
help=_("Client certificate for nova metadata api server.")),
|
||||
cfg.StrOpt('nova_client_priv_key',
|
||||
default='',
|
||||
help=_("Private key of client certificate.")),
|
||||
cfg.StrOpt('metadata_proxy_shared_secret',
|
||||
default='',
|
||||
help=_('Shared secret to sign instance-id request'),
|
||||
secret=True),
|
||||
cfg.IntOpt("cache_expiration",
|
||||
default=15,
|
||||
min=0,
|
||||
help=_('This option is the time (in seconds) to cache metadata. '
|
||||
'Increasing this setting should improve response times of the '
|
||||
'metadata API when under heavy load. Higher values may '
|
||||
'increase memory usage, and result in longer times for host '
|
||||
'metadata changes to take effect.'))
|
||||
]
|
||||
|
||||
CONF.register_opts(metadata_opts, group='metadata')
|
||||
cache_core.configure(CONF)
|
||||
|
||||
|
||||
class MetadataRequestHandler(wsgi.Application):
|
||||
"""Serve metadata."""
|
||||
|
||||
def __init__(self):
|
||||
if not CONF.cache.enabled:
|
||||
LOG.warning("Metadata doesn't use cache. "
|
||||
"Configure cache options to use cache.")
|
||||
self.cache_region = cache_core.create_region()
|
||||
cache_core.configure_cache_region(CONF, self.cache_region)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
LOG.debug('Request: %s', req)
|
||||
|
||||
path = req.path_info
|
||||
if path == '' or path[0] != '/':
|
||||
path = '/' + path
|
||||
path = posixpath.normpath(path)
|
||||
path_tokens = path.split('/')[1:]
|
||||
if path_tokens[0] == 'ec2':
|
||||
path_tokens = path_tokens[1:]
|
||||
|
||||
if path_tokens == ['']:
|
||||
resp = api.get_version_list()
|
||||
return self._add_response_data(req.response, resp)
|
||||
|
||||
try:
|
||||
requester = self._get_requester(req)
|
||||
if path_tokens[0] == 'openstack':
|
||||
return self._proxy_request(req, requester)
|
||||
|
||||
resp = self._get_metadata(path_tokens, requester)
|
||||
return self._add_response_data(req.response, resp)
|
||||
|
||||
except exception.EC2MetadataNotFound:
|
||||
return webob.exc.HTTPNotFound()
|
||||
except Exception:
|
||||
LOG.exception("Unexpected error.")
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
return webob.exc.HTTPInternalServerError(
|
||||
explanation=str(msg))
|
||||
|
||||
def _proxy_request(self, req, requester):
|
||||
headers = self._build_proxy_request_headers(requester)
|
||||
nova_ip_port = '%s:%s' % (CONF.metadata.nova_metadata_ip,
|
||||
CONF.metadata.nova_metadata_port)
|
||||
url = urlparse.urlunsplit((
|
||||
CONF.metadata.nova_metadata_protocol,
|
||||
nova_ip_port,
|
||||
req.path_info,
|
||||
req.query_string,
|
||||
''))
|
||||
|
||||
h = httplib2.Http(
|
||||
ca_certs=CONF.metadata.auth_ca_cert,
|
||||
disable_ssl_certificate_validation=(
|
||||
CONF.metadata.nova_metadata_insecure)
|
||||
)
|
||||
if (CONF.metadata.nova_client_cert and
|
||||
CONF.metadata.nova_client_priv_key):
|
||||
h.add_certificate(CONF.metadata.nova_client_priv_key,
|
||||
CONF.metadata.nova_client_cert,
|
||||
nova_ip_port)
|
||||
resp, content = h.request(url, method=req.method, headers=headers,
|
||||
body=req.body)
|
||||
|
||||
if resp.status == 200:
|
||||
LOG.debug(str(resp))
|
||||
req.response.content_type = resp['content-type']
|
||||
req.response.body = content
|
||||
return req.response
|
||||
elif resp.status == 403:
|
||||
LOG.warning(
|
||||
'The remote metadata server responded with Forbidden. This '
|
||||
'response usually occurs when shared secrets do not match.'
|
||||
)
|
||||
return webob.exc.HTTPForbidden()
|
||||
elif resp.status == 400:
|
||||
return webob.exc.HTTPBadRequest()
|
||||
elif resp.status == 404:
|
||||
return webob.exc.HTTPNotFound()
|
||||
elif resp.status == 409:
|
||||
return webob.exc.HTTPConflict()
|
||||
elif resp.status == 500:
|
||||
msg = _(
|
||||
'Remote metadata server experienced an internal server error.'
|
||||
)
|
||||
LOG.warning(msg)
|
||||
return webob.exc.HTTPInternalServerError(
|
||||
explanation=str(msg))
|
||||
else:
|
||||
raise Exception(_('Unexpected response code: %s') % resp.status)
|
||||
|
||||
def _build_proxy_request_headers(self, requester):
|
||||
signature = self._sign_instance_id(requester['os_instance_id'])
|
||||
return {
|
||||
'X-Forwarded-For': requester['private_ip'],
|
||||
'X-Instance-ID': requester['os_instance_id'],
|
||||
'X-Tenant-ID': requester['project_id'],
|
||||
'X-Instance-ID-Signature': signature,
|
||||
}
|
||||
|
||||
def _sign_instance_id(self, instance_id):
|
||||
return hmac.new(
|
||||
CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"),
|
||||
instance_id.encode(),
|
||||
hashlib.sha256).hexdigest()
|
||||
|
||||
def _get_requester(self, req):
|
||||
if req.headers.get('X-Metadata-Provider'):
|
||||
provider_id, remote_ip = self._unpack_nsx_request(req)
|
||||
context = ec2_context.get_os_admin_context()
|
||||
os_instance_id, project_id = (
|
||||
api.get_os_instance_and_project_id_by_provider_id(
|
||||
context, provider_id, remote_ip))
|
||||
else:
|
||||
os_instance_id, project_id, remote_ip = (
|
||||
self._unpack_neutron_request(req))
|
||||
return {'os_instance_id': os_instance_id,
|
||||
'project_id': project_id,
|
||||
'private_ip': remote_ip}
|
||||
|
||||
def _unpack_neutron_request(self, req):
|
||||
os_instance_id = req.headers.get('X-Instance-ID')
|
||||
project_id = req.headers.get('X-Tenant-ID')
|
||||
signature = req.headers.get('X-Instance-ID-Signature')
|
||||
remote_ip = req.headers.get('X-Forwarded-For')
|
||||
|
||||
if not remote_ip:
|
||||
raise exception.EC2MetadataInvalidAddress()
|
||||
|
||||
if os_instance_id is None:
|
||||
msg = _('X-Instance-ID header is missing from request.')
|
||||
elif project_id is None:
|
||||
msg = _('X-Tenant-ID header is missing from request.')
|
||||
elif not isinstance(os_instance_id, str):
|
||||
msg = _('Multiple X-Instance-ID headers found within request.')
|
||||
elif not isinstance(project_id, str):
|
||||
msg = _('Multiple X-Tenant-ID headers found within request.')
|
||||
else:
|
||||
msg = None
|
||||
|
||||
if msg:
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
self._validate_signature(signature, os_instance_id, remote_ip)
|
||||
return os_instance_id, project_id, remote_ip
|
||||
|
||||
def _unpack_nsx_request(self, req):
|
||||
remote_address = req.headers.get('X-Forwarded-For')
|
||||
if remote_address is None:
|
||||
msg = _('X-Forwarded-For is missing from request.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
provider_id = req.headers.get('X-Metadata-Provider')
|
||||
if provider_id is None:
|
||||
msg = _('X-Metadata-Provider is missing from request.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
remote_ip = remote_address.split(',')[0]
|
||||
|
||||
if CONF.metadata.metadata_proxy_shared_secret:
|
||||
signature = req.headers.get('X-Metadata-Provider-Signature')
|
||||
self._validate_signature(signature, provider_id, remote_ip)
|
||||
|
||||
return provider_id, remote_ip
|
||||
|
||||
def _validate_signature(self, signature, requester_id, requester_ip):
|
||||
expected_signature = hmac.new(
|
||||
CONF.metadata.metadata_proxy_shared_secret.encode("utf-8"),
|
||||
requester_id.encode(),
|
||||
hashlib.sha256).hexdigest()
|
||||
|
||||
if not (signature and
|
||||
utils.constant_time_compare(expected_signature, signature)):
|
||||
LOG.warning('X-Instance-ID-Signature: %(signature)s does '
|
||||
'not match the expected value: '
|
||||
'%(expected_signature)s for id: '
|
||||
'%(requester_id)s. Request From: '
|
||||
'%(requester_ip)s',
|
||||
{'signature': signature,
|
||||
'expected_signature': expected_signature,
|
||||
'requester_id': requester_id,
|
||||
'requester_ip': requester_ip})
|
||||
|
||||
msg = _('Invalid proxy request signature.')
|
||||
raise webob.exc.HTTPForbidden(explanation=msg)
|
||||
|
||||
def _get_metadata(self, path_tokens, requester):
|
||||
context = ec2_context.get_os_admin_context()
|
||||
# NOTE(ft): substitute project_id for context to instance's one.
|
||||
# It's needed for correct describe and auto update DB operations.
|
||||
# It doesn't affect operations via OpenStack's clients because
|
||||
# these clients use auth_token field only
|
||||
context.project_id = requester['project_id']
|
||||
return api.get_metadata_item(context, path_tokens,
|
||||
requester['os_instance_id'],
|
||||
requester['private_ip'],
|
||||
self.cache_region)
|
||||
|
||||
def _add_response_data(self, response, data):
|
||||
if isinstance(data, str):
|
||||
response.text = data
|
||||
else:
|
||||
response.body = data
|
||||
response.content_type = 'text/plain'
|
||||
return response
|
@ -1,310 +0,0 @@
|
||||
# Copyright 2014
|
||||
# The Cloudscaling Group, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import itertools
|
||||
|
||||
from novaclient import exceptions as nova_exception
|
||||
from oslo_cache import core as cache_core
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ec2api.api import clients
|
||||
from ec2api.api import ec2utils
|
||||
from ec2api.api import instance as instance_api
|
||||
from ec2api import exception
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
VERSIONS = [
|
||||
'1.0',
|
||||
'2007-01-19',
|
||||
'2007-03-01',
|
||||
'2007-08-29',
|
||||
'2007-10-10',
|
||||
'2007-12-15',
|
||||
'2008-02-01',
|
||||
'2008-09-01',
|
||||
'2009-04-04',
|
||||
]
|
||||
|
||||
VERSION_DATA = {
|
||||
'1.0': ['ami-id',
|
||||
'ami-launch-index',
|
||||
'ami-manifest-path',
|
||||
'hostname',
|
||||
'instance-id',
|
||||
'local-ipv4',
|
||||
'public-keys',
|
||||
'reservation-id',
|
||||
'security-groups'],
|
||||
'2007-01-19': ['local-hostname',
|
||||
'public-hostname',
|
||||
'public-ipv4'],
|
||||
'2007-03-01': ['product-codes'],
|
||||
'2007-08-29': ['instance-type'],
|
||||
'2007-10-10': ['ancestor-ami-ids',
|
||||
'ramdisk-id'],
|
||||
'2007-12-15': ['block-device-mapping'],
|
||||
'2008-02-01': ['kernel-id',
|
||||
'placement'],
|
||||
'2008-09-01': ['instance-action'],
|
||||
'2009-04-04': [],
|
||||
}
|
||||
|
||||
|
||||
def get_version_list():
|
||||
return _format_metadata_item(VERSIONS + ["latest"])
|
||||
|
||||
|
||||
def get_os_instance_and_project_id_by_provider_id(context, provider_id,
|
||||
fixed_ip):
|
||||
neutron = clients.neutron(context)
|
||||
os_subnets = neutron.list_subnets(advanced_service_providers=[provider_id],
|
||||
fields=['network_id'])
|
||||
if not os_subnets:
|
||||
raise exception.EC2MetadataNotFound()
|
||||
os_networks = [subnet['network_id']
|
||||
for subnet in os_subnets['subnets']]
|
||||
try:
|
||||
os_port = neutron.list_ports(
|
||||
fixed_ips='ip_address=' + fixed_ip,
|
||||
network_id=os_networks,
|
||||
fields=['device_id', 'tenant_id'])['ports'][0]
|
||||
except IndexError:
|
||||
raise exception.EC2MetadataNotFound()
|
||||
os_instance_id = os_port['device_id']
|
||||
project_id = os_port['tenant_id']
|
||||
return os_instance_id, project_id
|
||||
|
||||
|
||||
def get_metadata_item(context, path_tokens, os_instance_id, remote_ip,
|
||||
cache_region):
|
||||
version = path_tokens[0]
|
||||
if version == "latest":
|
||||
version = VERSIONS[-1]
|
||||
elif version not in VERSIONS:
|
||||
raise exception.EC2MetadataNotFound()
|
||||
|
||||
cache_key = 'ec2api-metadata-%s' % os_instance_id
|
||||
cache = cache_region.get(
|
||||
cache_key, expiration_time=CONF.metadata.cache_expiration)
|
||||
if cache and cache != cache_core.NO_VALUE:
|
||||
_check_instance_owner(context, os_instance_id, cache['owner_id'])
|
||||
LOG.debug("Using cached metadata for instance %s", os_instance_id)
|
||||
else:
|
||||
ec2_instance, ec2_reservation = (
|
||||
_get_ec2_instance_and_reservation(context, os_instance_id))
|
||||
|
||||
_check_instance_owner(context, os_instance_id,
|
||||
ec2_reservation['ownerId'])
|
||||
|
||||
metadata = _build_metadata(context, ec2_instance, ec2_reservation,
|
||||
os_instance_id, remote_ip)
|
||||
LOG.debug('get_metadata_item: result %s', str(metadata))
|
||||
cache = {'metadata': metadata,
|
||||
'owner_id': ec2_reservation['ownerId']}
|
||||
|
||||
cache_region.set(cache_key, cache)
|
||||
|
||||
metadata = cache['metadata']
|
||||
metadata = _cut_down_to_version(metadata, version)
|
||||
metadata_item = _find_path_in_tree(metadata, path_tokens[1:])
|
||||
return _format_metadata_item(metadata_item)
|
||||
|
||||
|
||||
def _get_ec2_instance_and_reservation(context, os_instance_id):
|
||||
instance_id = ec2utils.os_id_to_ec2_id(context, 'i', os_instance_id)
|
||||
LOG.debug('_get_ec2_instance_and_reservation(%s)', os_instance_id)
|
||||
try:
|
||||
ec2_reservations = instance_api.describe_instances(
|
||||
context, [instance_id])
|
||||
LOG.debug('_get_ec2_instance_and_reservation: result by id %s',
|
||||
str(ec2_reservations))
|
||||
except exception.InvalidInstanceIDNotFound:
|
||||
ec2_reservations = instance_api.describe_instances(
|
||||
context, filter=[{'name': 'instance-id',
|
||||
'value': [instance_id]}])
|
||||
LOG.debug('_get_ec2_instance_and_reservation: result by name %s',
|
||||
str(ec2_reservations))
|
||||
if (len(ec2_reservations['reservationSet']) != 1 or
|
||||
len(ec2_reservations['reservationSet'][0]['instancesSet']) != 1):
|
||||
LOG.error('Failed to get metadata for instance id: %s',
|
||||
os_instance_id)
|
||||
raise exception.EC2MetadataNotFound()
|
||||
|
||||
ec2_reservation = ec2_reservations['reservationSet'][0]
|
||||
ec2_instance = ec2_reservation['instancesSet'][0]
|
||||
|
||||
return ec2_instance, ec2_reservation
|
||||
|
||||
|
||||
def _check_instance_owner(context, os_instance_id, owner_id):
|
||||
# NOTE(ft): check for case of Neutron metadata proxy.
|
||||
# It sends project_id as X-Tenant-ID HTTP header.
|
||||
# We make sure it's correct
|
||||
if context.project_id != owner_id:
|
||||
LOG.warning('Tenant_id %(tenant_id)s does not match tenant_id '
|
||||
'of instance %(instance_id)s.',
|
||||
{'tenant_id': context.project_id,
|
||||
'instance_id': os_instance_id})
|
||||
raise exception.EC2MetadataNotFound()
|
||||
|
||||
|
||||
def _build_metadata(context, ec2_instance, ec2_reservation,
|
||||
os_instance_id, remote_ip):
|
||||
metadata = {
|
||||
'ami-id': ec2_instance['imageId'],
|
||||
'ami-launch-index': ec2_instance['amiLaunchIndex'],
|
||||
# NOTE (ft): the fake value as it is in Nova EC2 metadata
|
||||
'ami-manifest-path': 'FIXME',
|
||||
# NOTE (ft): empty value as it is in Nova EC2 metadata
|
||||
'ancestor-ami-ids': [],
|
||||
'block-device-mapping': _build_block_device_mappings(context,
|
||||
ec2_instance,
|
||||
os_instance_id),
|
||||
# NOTE(ft): Nova EC2 metadata returns instance's hostname with
|
||||
# dhcp_domain suffix if it's set in config.
|
||||
# But i don't see any reason to return a hostname differs from EC2
|
||||
# describe output one. If we need to consider dhcp_domain suffix
|
||||
# then we should do it in the describe operation
|
||||
'hostname': ec2_instance['privateDnsName'],
|
||||
# NOTE (ft): the fake value as it is in Nova EC2 metadata
|
||||
'instance-action': 'none',
|
||||
'instance-id': ec2_instance['instanceId'],
|
||||
'instance-type': ec2_instance['instanceType'],
|
||||
'local-hostname': ec2_instance['privateDnsName'],
|
||||
'local-ipv4': ec2_instance['privateIpAddress'] or remote_ip,
|
||||
'placement': {
|
||||
'availability-zone': ec2_instance['placement']['availabilityZone']
|
||||
},
|
||||
# NOTE (ft): empty value as it is in Nova EC2 metadata
|
||||
'product-codes': [],
|
||||
'public-hostname': ec2_instance['dnsName'],
|
||||
'public-ipv4': ec2_instance.get('ipAddress', ''),
|
||||
'reservation-id': ec2_reservation['reservationId'],
|
||||
'security-groups': [sg['groupName']
|
||||
for sg in ec2_reservation.get('groupSet', [])],
|
||||
}
|
||||
if 'kernelId' in ec2_instance:
|
||||
metadata['kernel-id'] = ec2_instance['kernelId']
|
||||
if 'ramdiskId' in ec2_instance:
|
||||
metadata['ramdisk-id'] = ec2_instance['ramdiskId']
|
||||
# public keys are strangely rendered in ec2 metadata service
|
||||
# meta-data/public-keys/ returns '0=keyname' (with no trailing /)
|
||||
# and only if there is a public key given.
|
||||
# '0=keyname' means there is a normally rendered dict at
|
||||
# meta-data/public-keys/0
|
||||
#
|
||||
# meta-data/public-keys/ : '0=%s' % keyname
|
||||
# meta-data/public-keys/0/ : 'openssh-key'
|
||||
# meta-data/public-keys/0/openssh-key : '%s' % publickey
|
||||
if ec2_instance['keyName']:
|
||||
metadata['public-keys'] = {
|
||||
'0': {'_name': "0=" + ec2_instance['keyName']}}
|
||||
nova = clients.nova(context)
|
||||
os_instance = nova.servers.get(os_instance_id)
|
||||
try:
|
||||
keypair = nova.keypairs._get(
|
||||
'/%s/%s?user_id=%s' % (nova.keypairs.keypair_prefix,
|
||||
ec2_instance['keyName'],
|
||||
os_instance.user_id),
|
||||
'keypair')
|
||||
except nova_exception.NotFound:
|
||||
pass
|
||||
else:
|
||||
metadata['public-keys']['0']['openssh-key'] = keypair.public_key
|
||||
|
||||
full_metadata = {'meta-data': metadata}
|
||||
|
||||
userdata = instance_api.describe_instance_attribute(
|
||||
context, ec2_instance['instanceId'], 'userData')
|
||||
if 'userData' in userdata:
|
||||
userdata = userdata['userData']['value']
|
||||
userdata = base64.b64decode(userdata)
|
||||
try:
|
||||
userdata = userdata.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
full_metadata['user-data'] = userdata
|
||||
|
||||
return full_metadata
|
||||
|
||||
|
||||
def _build_block_device_mappings(context, ec2_instance, os_instance_id):
|
||||
mappings = {'root': ec2_instance.get('rootDeviceName', ''),
|
||||
'ami': ec2utils.block_device_strip_dev(
|
||||
ec2_instance.get('rootDeviceName', ''))}
|
||||
if 'blockDeviceMapping' in ec2_instance:
|
||||
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
|
||||
# Right now sort by device name for deterministic
|
||||
# result.
|
||||
ebs_devices = [ebs['deviceName']
|
||||
for ebs in ec2_instance['blockDeviceMapping']]
|
||||
ebs_devices.sort()
|
||||
ebs_devices = {'ebs%d' % num: ebs
|
||||
for num, ebs in enumerate(ebs_devices)}
|
||||
mappings.update(ebs_devices)
|
||||
|
||||
# TODO(ft): extend Nova API to get ephemerals and swap
|
||||
return mappings
|
||||
|
||||
|
||||
def _cut_down_to_version(metadata, version):
|
||||
version_number = VERSIONS.index(version) + 1
|
||||
if version_number == len(VERSIONS):
|
||||
return metadata
|
||||
return {attr: metadata[attr]
|
||||
for attr in itertools.chain(
|
||||
*(VERSION_DATA[ver] for ver in VERSIONS[:version_number]))
|
||||
if attr in metadata}
|
||||
|
||||
|
||||
def _format_metadata_item(data):
|
||||
if isinstance(data, dict):
|
||||
output = ''
|
||||
for key in sorted(data.keys()):
|
||||
if key == '_name':
|
||||
continue
|
||||
if isinstance(data[key], dict):
|
||||
if '_name' in data[key]:
|
||||
output += str(data[key]['_name'])
|
||||
else:
|
||||
output += key + '/'
|
||||
else:
|
||||
output += key
|
||||
|
||||
output += '\n'
|
||||
return output[:-1]
|
||||
elif isinstance(data, list):
|
||||
return '\n'.join(data)
|
||||
else:
|
||||
return str(data)
|
||||
|
||||
|
||||
def _find_path_in_tree(data, path_tokens):
|
||||
# given a dict/list tree, and a path in that tree, return data found there.
|
||||
for i in range(0, len(path_tokens)):
|
||||
if isinstance(data, dict) or isinstance(data, list):
|
||||
if path_tokens[i] in data:
|
||||
data = data[path_tokens[i]]
|
||||
else:
|
||||
raise exception.EC2MetadataNotFound()
|
||||
else:
|
||||
if i != len(path_tokens) - 1:
|
||||
raise exception.EC2MetadataNotFound()
|
||||
data = data[path_tokens[i]]
|
||||
return data
|
@ -1,24 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy
|
||||
# of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
|
||||
import ec2api.metadata
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('metadata',
|
||||
itertools.chain(
|
||||
ec2api.metadata.metadata_opts,
|
||||
)),
|
||||
]
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user