diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index d6ed4d03..00000000
--- a/.coveragerc
+++ /dev/null
@@ -1,7 +0,0 @@
-[run]
-branch = True
-source = tricircle
-omit = tricircle/tests/*, tricircle/tempestplugin/*
-
-[report]
-ignore_errors = True
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 49e8d3a7..00000000
--- a/.gitignore
+++ /dev/null
@@ -1,55 +0,0 @@
-*.py[cod]
-
-# C extensions
-*.so
-
-# Packages
-*.egg
-*.egg-info
-dist
-build
-eggs
-parts
-var
-sdist
-develop-eggs
-.installed.cfg
-lib
-lib64
-
-# Installer logs
-pip-log.txt
-
-# Unit test / coverage reports
-.coverage
-.tox
-nosetests.xml
-.testrepository
-.venv
-.stestr
-
-# Translations
-*.mo
-
-# Mr Developer
-.mr.developer.cfg
-.project
-.pydevproject
-.idea
-
-# Complexity
-output/*.html
-output/*/index.html
-
-# Sphinx
-doc/build
-
-# pbr generates these
-AUTHORS
-ChangeLog
-
-# Editors
-*~
-.*.swp
-.*sw?
-
diff --git a/.stestr.conf b/.stestr.conf
deleted file mode 100644
index b5e41b4c..00000000
--- a/.stestr.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-test_path=./tricircle/tests/unit
-top_dir=./
\ No newline at end of file
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644
index 43200f9c..00000000
--- a/.testr.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
- OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
- ${PYTHON:-python} -m subunit.run discover $TRICIRCLE_TEST_DIRECTORY $LISTOPT $IDOPTION
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index 26f7afbe..00000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-- job:
- name: tricircle-functional-python3
- parent: legacy-dsvm-base
- run: playbooks/tricircle-dsvm-functional/run.yaml
- post-run: playbooks/tricircle-dsvm-functional/post.yaml
- timeout: 7800
- required-projects:
- - openstack/devstack-gate
- - openstack/tricircle
- - openstack/neutron
- - openstack/networking-sfc
- vars:
- devstack_localrc:
- USE_PYTHON3: true
-
-- job:
- name: tricircle-multiregion
- parent: legacy-dsvm-base-multinode
- run: playbooks/tricircle-dsvm-multiregion/run.yaml
- post-run: playbooks/tricircle-dsvm-multiregion/post.yaml
- timeout: 7800
- required-projects:
- - openstack/devstack-gate
- - openstack/networking-sfc
- - openstack/tricircle
-
-- job:
- name: tricircle-tox-lower-constraints
- parent: openstack-tox-lower-constraints
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
-
-- job:
- name: tricircle-tox-cover
- parent: openstack-tox-cover
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
-
-
-- project:
- templates:
- - openstack-python3-victoria-jobs-neutron
- - openstack-python3-victoria-jobs
- - check-requirements
- - publish-openstack-docs-pti
- - release-notes-jobs-python3
- check:
- jobs:
- - tricircle-tox-cover
- - tricircle-tox-lower-constraints
- - openstack-tox-pep8:
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
- - openstack-tox-py36:
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
- - tricircle-functional-python3
- - tricircle-multiregion
- gate:
- jobs:
- - tricircle-tox-lower-constraints
- - openstack-tox-pep8:
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
- - openstack-tox-py36:
- required-projects:
- - openstack/neutron
- - openstack/networking-sfc
- - tricircle-multiregion
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
deleted file mode 100644
index be2f8e08..00000000
--- a/CONTRIBUTING.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-If you would like to contribute to the development of OpenStack, you should
-follow the steps in this page:
-
- https://docs.openstack.org/infra/manual/developers.html
-
-If you already knew how the OpenStack CI system works and your
-OpenStack accounts is setup properly, you can start from the development
-workflow section in that documentation to know how you should commit your
-patch set for review via the Gerrit tool:
-
- https://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Any pull requests submitted through GitHub will be ignored.
-
-Any bug should be filed on Launchpad, not GitHub:
-
- https://bugs.launchpad.net/tricircle
diff --git a/HACKING.rst b/HACKING.rst
deleted file mode 100644
index 966afbf0..00000000
--- a/HACKING.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-================================
-The Tricircle Style Commandments
-================================
-
-Please read the OpenStack Style Commandments
- https://docs.openstack.org/hacking/latest/
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 8d968b6c..00000000
--- a/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/README.rst b/README.rst
index eb596ea4..86e34d67 100644
--- a/README.rst
+++ b/README.rst
@@ -1,57 +1,10 @@
-========================
-Team and repository tags
-========================
+This project is no longer maintained.
-.. image:: https://governance.openstack.org/tc/badges/tricircle.svg
- :target: https://governance.openstack.org/tc/reference/tags/index.html
+The contents of this repository are still available in the Git
+source code management system. To see the contents of this
+repository before it reached its end of life, please check out the
+previous commit with "git checkout HEAD^1".
-.. Change things from this point on
-
-=========
-Tricircle
-=========
-
-The purpose of the Tricircle project is to provide networking automation
-across Neutron servers in multi-region OpenStack clouds deployment.
-
-Each OpenStack cloud includes its own Nova, Cinder and Neutron, the Neutron
-servers in these OpenStack clouds are called local Neutron servers, all these
-local Neutron servers will be configured with the Tricircle Local Neutron
-Plugin. A separate Neutron server will be installed and run standalone as
-the coordinator of networking automation across local Neutron servers, this
-Neutron server will be configured with the Tricircle Central Neutron Plugin,
-and is called central Neutron server.
-
-Leverage the Tricircle Central Neutron Plugin and the Tricircle Local Neutron
-Plugin configured in these Neutron servers, the Tricircle can ensure the
-IP address pool, IP/MAC address allocation and network segment allocation
-being managed globally without conflict, and the Tricircle handles tenant
-oriented data link layer(Layer2) or network layer(Layer3) networking
-automation across local Neutron servers, resources like VMs, bare metal or
-containers of the tenant can communicate with each other via Layer2 or Layer3,
-no matter in which OpenStack cloud these resources are running on.
-
-Note: There are some our own definitions of Layer2/Layer3 networking
-across Neutron. To make sure what they are, please read our design
-documentation, especially "6.5 L2 Networking across Neutron". The wiki and
-design documentation are linked below.
-
-The Tricircle and multi-region OpenStack clouds will use shared
-KeyStone(with centralized or distributed deployment) or federated KeyStones.
-
-The Tricircle source code is distributed under the terms of the Apache
-License, Version 2.0. The full terms and conditions of this license are
-detailed in the LICENSE file.
-
-* Free software: Apache license
-* Design documentation: `Tricircle Design Blueprint `_
-* Wiki: https://wiki.openstack.org/wiki/tricircle
-* Installation guide: https://docs.openstack.org/tricircle/latest/install/index.html
-* Admin guide: https://docs.openstack.org/tricircle/latest/admin/index.html
-* Configuration guide: https://docs.openstack.org/tricircle/latest/configuration/index.html
-* Networking guide: https://docs.openstack.org/tricircle/latest/networking/index.html
-* Source: https://opendev.org/openstack/tricircle
-* Bugs: https://bugs.launchpad.net/tricircle
-* Blueprints: https://blueprints.launchpad.net/tricircle
-* Release notes: https://docs.openstack.org/releasenotes/tricircle
-* Contributing: https://docs.openstack.org/tricircle/latest/contributor/index.html
+For any further questions, please email
+openstack-discuss@lists.openstack.org or join #openstack-dev on
+Freenode.
diff --git a/devstack/admin-openrc.sh b/devstack/admin-openrc.sh
deleted file mode 100755
index c6063c3b..00000000
--- a/devstack/admin-openrc.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-export OS_PROJECT_DOMAIN_ID=default
-export OS_USER_DOMAIN_ID=default
-export OS_PROJECT_NAME=admin
-export OS_TENANT_NAME=admin
-export OS_USERNAME=admin
-export OS_PASSWORD=password
-export OS_AUTH_URL=http://127.0.0.1:5000
-export OS_IDENTITY_API_VERSION=3
-export OS_REGION_NAME=RegionOne
diff --git a/devstack/apache-tricircle-api.template b/devstack/apache-tricircle-api.template
deleted file mode 100644
index 10723045..00000000
--- a/devstack/apache-tricircle-api.template
+++ /dev/null
@@ -1,39 +0,0 @@
-# apache configuration template for tricircle-api
-
-Listen %PUBLICPORT%
-LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" tricircle_combined
-
-
- Require all granted
-
-
- WSGIDaemonProcess tricircle-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup tricircle-api
- WSGIScriptAlias / %PUBLICWSGI%
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%M"
-
- ErrorLog /var/log/%APACHE_NAME%/tricircle-api.log
- CustomLog /var/log/%APACHE_NAME%/tricircle_access.log tricircle_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-
-
-%SSLLISTEN%
-%SSLLISTEN% %SSLENGINE%
-%SSLLISTEN% %SSLCERTFILE%
-%SSLLISTEN% %SSLKEYFILE%
-%SSLLISTEN%
-
-Alias /tricircle %PUBLICWSGI%
-
- SetHandler wsgi-script
- Options +ExecCGI
-
- WSGIProcessGroup tricircle-api
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
-
diff --git a/devstack/local.conf.node_1.sample b/devstack/local.conf.node_1.sample
deleted file mode 100644
index 6ba54863..00000000
--- a/devstack/local.conf.node_1.sample
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# Sample DevStack local.conf.
-#
-# This sample file is intended to be used for your typical Tricircle DevStack
-# multi-node environment. This file has the configuration values for DevStack
-# to result in Central Neutron service and Tricircle Admin API service
-# registered in CentralRegion, and local Neutron service and remaining
-# services(e. g. Nova, Cinder, etc.) will be placed in RegionOne, but Keystone
-# will be registered in RegionOne and is shared by services in all the
-# regions.
-#
-# This file works with local.conf.node_2.sample to help you build a two-node
-# three-region Tricircle environment(Central Region, RegionOne and RegionTwo).
-#
-# Some options need to be changed to adapt to your environment, see README.rst
-# for detail.
-#
-
-[[local|localrc]]
-
-DATABASE_PASSWORD=password
-RABBIT_PASSWORD=password
-SERVICE_PASSWORD=password
-SERVICE_TOKEN=password
-ADMIN_PASSWORD=password
-
-HOST_IP=10.250.201.24
-
-Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
-Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=(vni_ranges=1001:2000)
-Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS=(flat_networks=bridge,extern)
-OVS_BRIDGE_MAPPINGS=bridge:br-vlan
-ML2_L3_PLUGIN=tricircle.network.local_l3_plugin.TricircleL3Plugin
-
-# Specify Central Region name
-# CENTRAL_REGION_NAME=CentralRegion
-
-# Specify port for central Neutron server
-# TRICIRCLE_NEUTRON_PORT=20001
-
-# Set to True to integrate Tricircle with Nova cell v2(experiment)
-# TRICIRCLE_DEPLOY_WITH_CELL=True
-
-TRICIRCLE_START_SERVICES=True
-enable_plugin tricircle https://github.com/openstack/tricircle/
-
-# Configure Neutron LBaaS, which will be removed after tricircle plugin enabling
-# enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
-# enable_plugin octavia https://github.com/openstack/octavia.git
-# ENABLED_SERVICES+=,q-lbaasv2
-# ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-disable_service horizon
-
-# Enable l2population for vxlan network
-[[post-config|/$Q_PLUGIN_CONF_FILE]]
-
-[ml2]
-mechanism_drivers = openvswitch,linuxbridge,l2population
-
-[agent]
-tunnel_types=vxlan
-l2_population=True
diff --git a/devstack/local.conf.node_2.sample b/devstack/local.conf.node_2.sample
deleted file mode 100644
index 35bfd2fd..00000000
--- a/devstack/local.conf.node_2.sample
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# Sample DevStack local.conf.
-#
-# This sample file is intended to be used for your typical Tricircle DevStack
-# multi-node environment. As this file has configuration values for DevStack
-# to result in RegionTwo running original Nova, Cinder and Neutron, and
-# the local Neutron will be configured with Tricircle Local Neutron Plugin
-# to work with central Neutron with Tricircle Central Neutron Plugin.
-#
-# This file works with local.conf.node_1.sample to help you build a two-node
-# three-region environment(CentralRegion, RegionOne and RegionTwo). Keystone in
-# RegionOne is shared by services in all the regions.
-#
-# Some options need to be changed to adapt to your environment, see README.rst
-# for detail.
-#
-
-[[local|localrc]]
-
-DATABASE_PASSWORD=password
-RABBIT_PASSWORD=password
-SERVICE_PASSWORD=password
-SERVICE_TOKEN=password
-ADMIN_PASSWORD=password
-
-HOST_IP=10.250.201.25
-REGION_NAME=RegionTwo
-KEYSTONE_REGION_NAME=RegionOne
-SERVICE_HOST=$HOST_IP
-KEYSTONE_SERVICE_HOST=10.250.201.24
-KEYSTONE_AUTH_HOST=10.250.201.24
-
-Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
-Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=(vni_ranges=1001:2000)
-Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS=(flat_networks=bridge,extern)
-OVS_BRIDGE_MAPPINGS=bridge:br-vlan,extern:br-ext
-ML2_L3_PLUGIN=tricircle.network.local_l3_plugin.TricircleL3Plugin
-
-# Specify Central Region name
-# CENTRAL_REGION_NAME=CentralRegion
-
-# Specify port for central Neutron server
-# TRICIRCLE_NEUTRON_PORT=20001
-
-# Set to True to integrate Tricircle with Nova cell v2(experiment)
-# TRICIRCLE_DEPLOY_WITH_CELL=True
-
-TRICIRCLE_START_SERVICES=False
-enable_plugin tricircle https://github.com/openstack/tricircle/
-
-# Configure Neutron LBaaS, which will be removed after tricircle plugin enabling
-# enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
-# enable_plugin octavia https://github.com/openstack/octavia.git
-# ENABLED_SERVICES+=,q-lbaasv2
-# ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-disable_service horizon
-
-# Enable l2population for vxlan network
-[[post-config|/$Q_PLUGIN_CONF_FILE]]
-
-[ml2]
-mechanism_drivers = openvswitch,linuxbridge,l2population
-
-[agent]
-tunnel_types=vxlan
-l2_population=True
diff --git a/devstack/local.conf.sample b/devstack/local.conf.sample
deleted file mode 100644
index ef4a567e..00000000
--- a/devstack/local.conf.sample
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Sample DevStack local.conf.
-#
-# This sample file is intended to be used for your typical Tricircle DevStack
-# environment that's running all of OpenStack on a single host.
-#
-# No changes to this sample configuration are required for this to work.
-#
-
-[[local|localrc]]
-
-DATABASE_PASSWORD=password
-RABBIT_PASSWORD=password
-SERVICE_PASSWORD=password
-SERVICE_TOKEN=password
-ADMIN_PASSWORD=password
-
-HOST_IP=127.0.0.1
-
-# Specify Central Region name
-# CENTRAL_REGION_NAME=CentralRegion
-
-# Specify port for central Neutron server
-# TRICIRCLE_NEUTRON_PORT=20001
-
-enable_plugin tricircle https://github.com/openstack/tricircle/
-
-# disable_service horizon
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
deleted file mode 100755
index 5dbe3b17..00000000
--- a/devstack/plugin.sh
+++ /dev/null
@@ -1,476 +0,0 @@
-# Devstack extras script to install Tricircle
-
-# Test if any tricircle services are enabled
-# is_tricircle_enabled
-function is_tricircle_enabled {
- [[ ,${ENABLED_SERVICES} =~ ,"t-api" ]] && return 0
- return 1
-}
-
-# create_tricircle_accounts() - Set up common required tricircle
-# service accounts in keystone
-# Project User Roles
-# -------------------------------------------------------------------------
-# $SERVICE_TENANT_NAME tricircle service
-
-function create_tricircle_accounts {
- if [[ "$ENABLED_SERVICES" =~ "t-api" ]]; then
- create_service_user "tricircle" "admin"
- local tricircle_api=$(get_or_create_service "tricircle" \
- "tricircle" "Cross Neutron Networking Automation Service")
-
- local tricircle_api_url="$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST/tricircle/v1.0"
- if [[ "$TRICIRCLE_DEPLOY_WITH_WSGI" == "False" ]]; then
- tricircle_api_url="$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0/"
- fi
-
- get_or_create_endpoint $tricircle_api \
- "$CENTRAL_REGION_NAME" \
- "$tricircle_api_url" \
- "$tricircle_api_url" \
- "$tricircle_api_url"
- fi
-}
-
-# create_tricircle_cache_dir() - Set up cache dir for tricircle
-function create_tricircle_cache_dir {
-
- # Delete existing dir
- sudo rm -rf $TRICIRCLE_AUTH_CACHE_DIR
- sudo mkdir -p $TRICIRCLE_AUTH_CACHE_DIR
- sudo chown `whoami` $TRICIRCLE_AUTH_CACHE_DIR
-}
-
-# common config-file configuration for tricircle services
-function init_common_tricircle_conf {
- local conf_file=$1
-
- touch $conf_file
- iniset $conf_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $conf_file DEFAULT verbose True
- iniset $conf_file DEFAULT use_syslog $SYSLOG
- iniset $conf_file DEFAULT tricircle_db_connection `database_connection_url tricircle`
-
- iniset $conf_file client auth_url http://$KEYSTONE_SERVICE_HOST/identity
- iniset $conf_file client identity_url http://$KEYSTONE_SERVICE_HOST/identity/v3
- iniset $conf_file client admin_username admin
- iniset $conf_file client admin_password $ADMIN_PASSWORD
- iniset $conf_file client admin_tenant demo
- iniset $conf_file client auto_refresh_endpoint True
- iniset $conf_file client top_region_name $CENTRAL_REGION_NAME
-
- iniset $conf_file oslo_concurrency lock_path $TRICIRCLE_STATE_PATH/lock
- iniset_rpc_backend tricircle $conf_file
-}
-
-function init_local_nova_conf {
- iniset $NOVA_CONF glance api_servers http://$KEYSTONE_SERVICE_HOST/image
- iniset $NOVA_CONF placement os_region_name $CENTRAL_REGION_NAME
-}
-
-# common config-file configuration for local Neutron(s)
-function init_local_neutron_conf {
-
- iniset $NEUTRON_CONF DEFAULT core_plugin tricircle.network.local_plugin.TricirclePlugin
- if [[ "$TRICIRCLE_DEPLOY_WITH_CELL" == "True" ]]; then
- iniset $NEUTRON_CONF nova region_name $CENTRAL_REGION_NAME
- fi
-
- iniset $NEUTRON_CONF client auth_url http://$KEYSTONE_SERVICE_HOST/identity
- iniset $NEUTRON_CONF client identity_url http://$KEYSTONE_SERVICE_HOST/identity/v3
- iniset $NEUTRON_CONF client admin_username admin
- iniset $NEUTRON_CONF client admin_password $ADMIN_PASSWORD
- iniset $NEUTRON_CONF client admin_tenant demo
- iniset $NEUTRON_CONF client auto_refresh_endpoint True
- iniset $NEUTRON_CONF client top_pod_name $CENTRAL_REGION_NAME
-
- iniset $NEUTRON_CONF tricircle real_core_plugin neutron.plugins.ml2.plugin.Ml2Plugin
- iniset $NEUTRON_CONF tricircle local_region_name $REGION_NAME
- iniset $NEUTRON_CONF tricircle central_neutron_url http://$KEYSTONE_SERVICE_HOST:$TRICIRCLE_NEUTRON_PORT
-}
-
-# Set the environment variables for local Neutron(s)
-function init_local_neutron_variables {
-
- export Q_USE_PROVIDERNET_FOR_PUBLIC=True
-
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-}
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-}
- # if VLAN options were not set in local.conf, use default VLAN bridge
- # and VLAN options
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then
-
- export TRICIRCLE_ADD_DEFAULT_BRIDGES=True
-
- local vlan_option="bridge:$TRICIRCLE_DEFAULT_VLAN_RANGE"
- local ext_option="extern:$TRICIRCLE_DEFAULT_EXT_RANGE"
- local vlan_ranges=(network_vlan_ranges=$vlan_option,$ext_option)
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=$vlan_ranges
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS="vni_ranges=$TRICIRCLE_DEFAULT_VXLAN_RANGE"
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$TRICIRCLE_DEFAULT_FLAT_NETWORKS"
-
- local vlan_mapping="bridge:$TRICIRCLE_DEFAULT_VLAN_BRIDGE"
- local ext_mapping="extern:$TRICIRCLE_DEFAULT_EXT_BRIDGE"
- OVS_BRIDGE_MAPPINGS=$vlan_mapping,$ext_mapping
-
- fi
- if [ "$TRICIRCLE_ENABLE_TRUNK" == "True" ]; then
- _neutron_service_plugin_class_add trunk
- fi
-}
-
-function add_default_bridges {
-
- if [ "$TRICIRCLE_ADD_DEFAULT_BRIDGES" == "True" ]; then
- _neutron_ovs_base_add_bridge $TRICIRCLE_DEFAULT_VLAN_BRIDGE
- _neutron_ovs_base_add_bridge $TRICIRCLE_DEFAULT_EXT_BRIDGE
- fi
-}
-
-function configure_tricircle_api {
-
- if is_service_enabled t-api ; then
- echo "Configuring Tricircle API"
-
- init_common_tricircle_conf $TRICIRCLE_API_CONF
-
- setup_colorized_logging $TRICIRCLE_API_CONF DEFAULT tenant_name
-
- if is_service_enabled keystone; then
-
- create_tricircle_cache_dir
-
- # Configure auth token middleware
- configure_auth_token_middleware $TRICIRCLE_API_CONF tricircle \
- $TRICIRCLE_AUTH_CACHE_DIR
-
- else
- iniset $TRICIRCLE_API_CONF DEFAULT auth_strategy noauth
- fi
-
- fi
-}
-
-# configure_tricircle_api_wsgi() - Set WSGI config files
-function configure_tricircle_api_wsgi {
- local tricircle_api_apache_conf
- local venv_path=""
- local tricircle_bin_dir=""
- local tricircle_ssl_listen="#"
-
- tricircle_bin_dir=$(get_python_exec_prefix)
- tricircle_api_apache_conf=$(apache_site_config_for tricircle-api)
-
- if is_ssl_enabled_service "tricircle-api"; then
- tricircle_ssl_listen=""
- tricircle_ssl="SSLEngine On"
- tricircle_certfile="SSLCertificateFile $TRICIRCLE_SSL_CERT"
- tricircle_keyfile="SSLCertificateKeyFile $TRICIRCLE_SSL_KEY"
- fi
-
- # configure venv bin if VENV is used
- if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["tricircle"]}/lib/$(python_version)/site-packages"
- tricircle_bin_dir=${PROJECT_VENV["tricircle"]}/bin
- fi
-
- sudo cp $TRICIRCLE_API_APACHE_TEMPLATE $tricircle_api_apache_conf
- sudo sed -e "
- s|%TRICIRCLE_BIN%|$tricircle_bin_dir|g;
- s|%PUBLICPORT%|$TRICIRCLE_API_PORT|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$tricircle_bin_dir/tricircle-api-wsgi|g;
- s|%SSLENGINE%|$tricircle_ssl|g;
- s|%SSLCERTFILE%|$tricircle_certfile|g;
- s|%SSLKEYFILE%|$tricircle_keyfile|g;
- s|%SSLLISTEN%|$tricircle_ssl_listen|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- s|%APIWORKERS%|$API_WORKERS|g
- " -i $tricircle_api_apache_conf
-}
-
-# start_tricircle_api_wsgi() - Start the API processes ahead of other things
-function start_tricircle_api_wsgi {
- enable_apache_site tricircle-api
- restart_apache_server
- tail_log tricircle-api /var/log/$APACHE_NAME/tricircle-api.log
-
- echo "Waiting for tricircle-api to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $TRICIRCLE_API_PROTOCOL://$TRICIRCLE_API_HOST/tricircle; then
- die $LINENO "tricircle-api did not start"
- fi
-}
-
-# stop_tricircle_api_wsgi() - Disable the api service and stop it.
-function stop_tricircle_api_wsgi {
- disable_apache_site tricircle-api
- restart_apache_server
-}
-
-# cleanup_tricircle_api_wsgi() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_tricircle_api_wsgi {
- sudo rm -f $(apache_site_config_for tricircle-api)
-}
-
-function configure_tricircle_xjob {
- if is_service_enabled t-job ; then
- echo "Configuring Tricircle xjob"
-
- init_common_tricircle_conf $TRICIRCLE_XJOB_CONF
-
- setup_colorized_logging $TRICIRCLE_XJOB_CONF DEFAULT
- fi
-}
-
-function start_central_nova_server {
- local local_region=$1
- local central_region=$2
- local central_neutron_port=$3
-
- echo "Configuring Nova API for Tricircle to work with cell V2"
-
- iniset $NOVA_CONF neutron region_name $central_region
- iniset $NOVA_CONF neutron url "$Q_PROTOCOL://$SERVICE_HOST:$central_neutron_port"
-
- # Here we create new endpoints for central region instead of updating the
- # endpoints in local region because at the end of devstack, the script tries
- # to query the nova api in local region to check whether the nova-compute
- # service is running. If we update the endpoint region from local region to
- # central region, the check will fail and thus devstack fails
- nova_url=$(openstack endpoint list --service compute --interface public --region $local_region -c URL -f value)
- get_or_create_endpoint "compute" "$central_region" "$nova_url"
- nova_legacy_url=$(openstack endpoint list --service compute_legacy --interface public --region $local_region -c URL -f value)
- get_or_create_endpoint "compute_legacy" "$central_region" "$nova_legacy_url"
-
- central_image_endpoint_id=$(openstack endpoint list --service image --interface public --region $central_region -c ID -f value)
- if [[ -z "$central_image_endpoint_id" ]]; then
- glance_url=$(openstack endpoint list --service image --interface public --region $local_region -c URL -f value)
- get_or_create_endpoint "image" "$central_region" "$glance_url"
- fi
-
- place_endpoint_id=$(openstack endpoint list --service placement --interface public --region $local_region -c ID -f value)
- openstack endpoint set --region $central_region $place_endpoint_id
-
- restart_service devstack@n-api
- restart_apache_server
-}
-
-function start_central_neutron_server {
- local server_index=0
- local region_name=$1
- local q_port=$2
-
- get_or_create_service "neutron" "network" "Neutron Service"
- get_or_create_endpoint "network" \
- "$region_name" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/"
-
- # reconfigure central neutron server to use our own central plugin
- echo "Configuring central Neutron plugin for Tricircle"
-
- cp $NEUTRON_CONF $NEUTRON_CONF.$server_index
- iniset $NEUTRON_CONF.$server_index database connection `database_connection_url $Q_DB_NAME$server_index`
- iniset $NEUTRON_CONF.$server_index DEFAULT bind_port $q_port
- iniset $NEUTRON_CONF.$server_index DEFAULT core_plugin "tricircle.network.central_plugin.TricirclePlugin"
- iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins ""
- iniset $NEUTRON_CONF.$server_index DEFAULT tricircle_db_connection `database_connection_url tricircle`
- iniset $NEUTRON_CONF.$server_index DEFAULT notify_nova_on_port_data_changes False
- iniset $NEUTRON_CONF.$server_index DEFAULT notify_nova_on_port_status_changes False
- iniset $NEUTRON_CONF.$server_index client admin_username admin
- iniset $NEUTRON_CONF.$server_index client admin_password $ADMIN_PASSWORD
- iniset $NEUTRON_CONF.$server_index client admin_tenant demo
- iniset $NEUTRON_CONF.$server_index client auto_refresh_endpoint True
- iniset $NEUTRON_CONF.$server_index client top_region_name $CENTRAL_REGION_NAME
-
- local service_plugins=''
- if [ "$TRICIRCLE_ENABLE_TRUNK" == "True" ]; then
- service_plugins+=",tricircle.network.central_trunk_plugin.TricircleTrunkPlugin"
- fi
- if [ "$TRICIRCLE_ENABLE_SFC" == "True" ]; then
- service_plugins+=",networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,tricircle.network.central_sfc_plugin.TricircleSfcPlugin"
- iniset $NEUTRON_CONF.$server_index sfc drivers tricircle_sfc
- iniset $NEUTRON_CONF.$server_index flowclassifier drivers tricircle_fc
- fi
-
- if [ "$TRICIRCLE_ENABLE_QOS" == "True" ]; then
- service_plugins+=",tricircle.network.central_qos_plugin.TricircleQosPlugin"
- fi
-
- if [ -n service_plugins ]; then
- service_plugins=$(echo $service_plugins| sed 's/^,//')
- iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins "$service_plugins"
- fi
-
- local type_drivers=''
- local tenant_network_types=''
- if [ "$Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS" != "" ]; then
- type_drivers+=,vxlan
- tenant_network_types+=,vxlan
- iniset $NEUTRON_CONF.$server_index tricircle vni_ranges `echo $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS | awk -F= '{print $2}'`
- fi
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
- type_drivers+=,vlan
- tenant_network_types+=,vlan
- iniset $NEUTRON_CONF.$server_index tricircle network_vlan_ranges `echo $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS | awk -F= '{print $2}'`
- fi
- if [ "Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" != "" ]; then
- type_drivers+=,flat
- tenant_network_types+=,flat
- iniset $NEUTRON_CONF.$server_index tricircle flat_networks `echo $Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS | awk -F= '{print $2}'`
- fi
- type_drivers+=,local
- tenant_network_types+=,local
- # remove the heading ","
- type_drivers=$(echo $type_drivers | sed 's/^,//')
- tenant_network_types=$(echo $tenant_network_types | sed 's/^,//')
-
- iniset $NEUTRON_CONF.$server_index tricircle type_drivers $type_drivers
- iniset $NEUTRON_CONF.$server_index tricircle tenant_network_types $tenant_network_types
- iniset $NEUTRON_CONF.$server_index tricircle enable_api_gateway False
-
- # reconfigure api-paste.ini in central neutron server
- local API_PASTE_INI=$NEUTRON_CONF_DIR/api-paste.ini
- sudo sed -e "
- /^keystone.*neutronapiapp/s/neutronapiapp/request_source &/;
- /app:neutronapiapp/i\[filter:request_source]\npaste.filter_factory = tricircle.common.request_source:RequestSource.factory\n
- " -i $API_PASTE_INI
-
- # default value of bridge_network_type is vxlan
-
- if [ "$TRICIRCLE_ENABLE_QOS" == "True" ]; then
- local p_exist=$(grep "^extension_drivers" /$Q_PLUGIN_CONF_FILE)
- if [[ $p_exist != "" ]];then
- if ! [[ $(echo $p_exist | grep "qos") ]];then
- sed -i "s/$p_exist/$p_exist,qos/g" /$Q_PLUGIN_CONF_FILE
- fi
- else
- sed -i "s/^\[ml2\]/\[ml2\]\nextension_drivers = qos/g" /$Q_PLUGIN_CONF_FILE
- fi
- fi
-
- recreate_database $Q_DB_NAME$server_index
- $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-
- enable_service q-svc$server_index
- run_process q-svc$server_index "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE"
-}
-
-# install_tricircleclient() - Collect source and prepare
-function install_tricircleclient {
- if use_library_from_git "python-tricircleclient"; then
- git_clone_by_name "python-tricircleclient"
- setup_dev_lib "python-tricircleclient"
- else
- pip_install_gr tricircleclient
- fi
-}
-
-
-# if the plugin is enabled to run, that means the Tricircle is enabled
-# by default, so no need to judge the variable Q_ENABLE_TRICIRCLE
-
-if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
- echo_summary "Tricircle pre-install"
-
- # init_local_neutron_variables before installation
- init_local_neutron_variables
-
-elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing Tricircle"
-elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-
- echo_summary "Configuring Tricircle"
- install_tricircleclient
- export NEUTRON_CREATE_INITIAL_NETWORKS=False
- sudo install -d -o $STACK_USER -m 755 $TRICIRCLE_CONF_DIR
-
- if [[ "$TRICIRCLE_START_SERVICES" == "True" ]]; then
- enable_service t-api t-job
- configure_tricircle_api
- configure_tricircle_xjob
-
- if [[ "$TRICIRCLE_DEPLOY_WITH_WSGI" == "True" ]]; then
- configure_tricircle_api_wsgi
- fi
- fi
-
- echo export PYTHONPATH=\$PYTHONPATH:$TRICIRCLE_DIR >> $RC_DIR/.localrc.auto
-
- setup_package $TRICIRCLE_DIR -e
-
- if [[ "$TRICIRCLE_START_SERVICES" == "True" ]]; then
- recreate_database tricircle
- tricircle-db-manage --config-file="$TRICIRCLE_API_CONF" db_sync
-
- if is_service_enabled q-svc ; then
- start_central_neutron_server $CENTRAL_REGION_NAME $TRICIRCLE_NEUTRON_PORT
- fi
- fi
-
- # update the local neutron.conf after the central Neutron has started
- init_local_neutron_conf
-
- if [[ "$TRICIRCLE_DEPLOY_WITH_CELL" == "True" ]]; then
- # update the local nova.conf
- init_local_nova_conf
- else
- iniset $NOVA_CONF glance region_name $REGION_NAME
- fi
-
- # add default bridges br-vlan, br-ext if needed, ovs-vsctl
- # is just being installed before this stage
- add_default_bridges
-
-elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- echo_summary "Initializing Tricircle Service"
-
- if [[ ${USE_VENV} = True ]]; then
- PROJECT_VENV["tricircle"]=${TRICIRCLE_DIR}.venv
- TRICIRCLE_BIN_DIR=${PROJECT_VENV["tricircle"]}/bin
- else
- TRICIRCLE_BIN_DIR=$(get_python_exec_prefix)
- fi
-
- if is_service_enabled t-api; then
-
- create_tricircle_accounts
-
- if [[ "$TRICIRCLE_DEPLOY_WITH_WSGI" == "True" ]]; then
- start_tricircle_api_wsgi
- else
- run_process t-api "$TRICIRCLE_BIN_DIR/tricircle-api --config-file $TRICIRCLE_API_CONF"
- fi
-
- if [[ "$TRICIRCLE_DEPLOY_WITH_CELL" == "True" && "$TRICIRCLE_START_SERVICES" == "True" ]]; then
- start_central_nova_server $REGION_NAME $CENTRAL_REGION_NAME $TRICIRCLE_NEUTRON_PORT
- fi
- fi
-
- if is_service_enabled t-job; then
- run_process t-job "$TRICIRCLE_BIN_DIR/tricircle-xjob --config-file $TRICIRCLE_XJOB_CONF"
- fi
-fi
-
-if [[ "$1" == "unstack" ]]; then
-
- if is_service_enabled t-api; then
- if [[ "$TRICIRCLE_DEPLOY_WITH_WSGI" == "True" ]]; then
- stop_tricircle_api_wsgi
- clean_tricircle_api_wsgi
- else
- stop_process t-api
- fi
- fi
-
- if is_service_enabled t-job; then
- stop_process t-job
- fi
-
- if is_service_enabled q-svc0; then
- stop_process q-svc0
- fi
-fi
diff --git a/devstack/settings b/devstack/settings
deleted file mode 100644
index 3f6a5339..00000000
--- a/devstack/settings
+++ /dev/null
@@ -1,50 +0,0 @@
-# Git information
-TRICIRCLE_REPO=${TRICIRCLE_REPO:-https://opendev.org/openstack/tricircle/}
-TRICIRCLE_DIR=$DEST/tricircle
-TRICIRCLE_BRANCH=${TRICIRCLE_BRANCH:-master}
-
-# common variables
-CENTRAL_REGION_NAME=${CENTRAL_REGION_NAME:-CentralRegion}
-TRICIRCLE_NEUTRON_PORT=${TRICIRCLE_NEUTRON_PORT:-20001}
-TRICIRCLE_START_SERVICES=${TRICIRCLE_START_SERVICES:-True}
-TRICIRCLE_DEPLOY_WITH_WSGI=${TRICIRCLE_DEPLOY_WITH_WSGI:-True}
-TRICIRCLE_DEPLOY_WITH_CELL=${TRICIRCLE_DEPLOY_WITH_CELL:-False}
-
-# extensions working with tricircle
-TRICIRCLE_ENABLE_TRUNK=${TRICIRCLE_ENABLE_TRUNK:-False}
-TRICIRCLE_ENABLE_SFC=${TRICIRCLE_ENABLE_SFC:-False}
-TRICIRCLE_ENABLE_QOS=${TRICIRCLE_ENABLE_QOS:-False}
-
-# these default settings are used for devstack based gate/check jobs
-TRICIRCLE_DEFAULT_VLAN_BRIDGE=${TRICIRCLE_DEFAULT_VLAN_BRIDGE:-br-vlan}
-TRICIRCLE_DEFAULT_VLAN_RANGE=${TRICIRCLE_DEFAULT_VLAN_RANGE:-101:150}
-TRICIRCLE_DEFAULT_EXT_BRIDGE=${TRICIRCLE_DEFAULT_EXT_BRIDGE:-br-ext}
-TRICIRCLE_DEFAULT_EXT_RANGE=${TRICIRCLE_DEFAULT_EXT_RANGE:-151:200}
-TRICIRCLE_ADD_DEFAULT_BRIDGES=${TRICIRCLE_ADD_DEFAULT_BRIDGES:-False}
-TRICIRCLE_DEFAULT_VXLAN_RANGE=${TRICIRCLE_DEFAULT_VXLAN_RANGE:-1001:2000}
-TRICIRCLE_DEFAULT_FLAT_NETWORKS=${TRICIRCLE_DEFAULT_FLAT_NETWORKS:-bridge,extern}
-
-TRICIRCLE_CONF_DIR=${TRICIRCLE_CONF_DIR:-/etc/tricircle}
-TRICIRCLE_STATE_PATH=${TRICIRCLE_STATE_PATH:-/var/lib/tricircle}
-
-# tricircle rest admin api
-TRICIRCLE_API=$TRICIRCLE_DIR/tricircle/cmd/api.py
-TRICIRCLE_API_CONF=$TRICIRCLE_CONF_DIR/api.conf
-TRICIRCLE_API_APACHE_TEMPLATE=$TRICIRCLE_DIR/devstack/apache-tricircle-api.template
-
-TRICIRCLE_API_LISTEN_ADDRESS=${TRICIRCLE_API_LISTEN_ADDRESS:-0.0.0.0}
-TRICIRCLE_API_HOST=${TRICIRCLE_API_HOST:-$SERVICE_HOST}
-TRICIRCLE_API_PORT=${TRICIRCLE_API_PORT:-19999}
-TRICIRCLE_API_PROTOCOL=${TRICIRCLE_API_PROTOCOL:-$SERVICE_PROTOCOL}
-
-# tricircle xjob
-TRICIRCLE_XJOB_CONF=$TRICIRCLE_CONF_DIR/xjob.conf
-
-TRICIRCLE_AUTH_CACHE_DIR=${TRICIRCLE_AUTH_CACHE_DIR:-/var/cache/tricircle}
-
-export PYTHONPATH=$PYTHONPATH:$TRICIRCLE_DIR
-
-# Set up default directories for client
-GITREPO["python-tricircleclient"]=${TRICIRCLE_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-tricircleclient.git}
-GITBRANCH["python-tricircleclient"]=${TRICIRCLE_PYTHONCLIENT_BRANCH:-master}
-GITDIR["python-tricircleclient"]=$DEST/python-tricircleclient
diff --git a/devstack/verify_cross_pod_install.sh b/devstack/verify_cross_pod_install.sh
deleted file mode 100755
index ba8f5fac..00000000
--- a/devstack/verify_cross_pod_install.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/bin/bash
-#
-# Script name: verify_cross_pod_install.sh
-# This script is to verify the installation of Tricircle in cross pod L3 networking.
-# It verify both east-west and north-south networks.
-#
-# In this script, there are some parameters you need to consider before running it.
-#
-# 1, Post URL whether is 127.0.0.1 or something else,
-# 2, This script creates 2 subnets 10.0.1.0/24 and 10.0.2.0/24, Change these if needed.
-# 3, This script creates external subnet ext-net 10.50.11.0/26, Change it according to
-# your own environment.
-# 4, The floating ip attached to the VM with ip 10.0.2.3, created by the script
-# "verify_cross_pod_install.sh", modify it according to your own environment.
-#
-# Change the parameters according to your own environment.
-# Finally, execute "verify_cross_pod_install.sh" in the Node1.
-#
-# Author: Pengfei Shi
-#
-
-set -o xtrace
-
-TEST_DIR=$(pwd)
-echo "Test work directory is $TEST_DIR."
-
-if [ ! -r admin-openrc.sh ];then
- set -o xtrace
- echo "Your work directory doesn't have admin-openrc.sh,"
- echo "Please check whether you are in tricircle/devstack/ or not and run this script."
-exit 1
-fi
-
-echo "Beginning the verify testing..."
-
-echo "Import client environment variables:"
-source $TEST_DIR/admin-openrc.sh
-
-echo "******************************"
-echo "* Verify Endpoint *"
-echo "******************************"
-
-echo "List openstack endpoint:"
-openstack --debug endpoint list
-
-token=$(openstack token issue | awk 'NR==5 {print $4}')
-
-echo $token
-
-openstack multiregion networking pod create --region-name RegionOne
-
-openstack multiregion networking pod create --region-name Pod1 --availability-zone az1
-
-openstack multiregion networking pod create --region-name Pod2 --availability-zone az2
-
-echo "******************************"
-echo "* Verify Nova *"
-echo "******************************"
-
-echo "Show nova aggregate:"
-nova aggregate-list
-
-neutron net-create --availability-zone-hint az1 net1
-
-neutron net-create --availability-zone-hint az2 net2
-
-echo "Create external network ext-net:"
-neutron net-create --router:external --provider:network_type vlan --provider:physical_network extern --availability-zone-hint Pod2 ext-net
-
-echo "Create test flavor:"
-nova flavor-create test 1 1024 10 1
-
-echo "******************************"
-echo "* Verify Neutron *"
-echo "******************************"
-
-echo "Create external subnet with floating ips:"
-neutron subnet-create --name ext-subnet --disable-dhcp ext-net 10.50.11.0/26 --allocation-pool start=10.50.11.30,end=10.50.11.50 --gateway 10.50.11.1
-
-echo "Create router for subnets:"
-neutron router-create router
-
-echo "Set router external gateway:"
-neutron router-gateway-set router ext-net
-
-echo "Create net1 in Node1:"
-neutron subnet-create net1 10.0.1.0/24
-
-echo "Create net2 in Node2:"
-neutron subnet-create net2 10.0.2.0/24
-
-net1_id=$(neutron net-list |grep net1 | awk '{print $2}')
-net2_id=$(neutron net-list |grep net2 | awk '{print $2}')
-image_id=$(glance image-list |awk 'NR==4 {print $2}')
-
-echo "Boot vm1 in az1:"
-nova boot --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az1 vm1
-echo "Boot vm2 in az2:"
-nova boot --flavor 1 --image $image_id --nic net-id=$net2_id --availability-zone az2 vm2
-
-subnet1_id=$(neutron net-list |grep net1 |awk '{print $6}')
-subnet2_id=$(neutron net-list |grep net2 |awk '{print $6}')
-
-echo "Add interface of subnet1:"
-neutron router-interface-add router $subnet1_id
-echo "Add interface of subnet2:"
-neutron router-interface-add router $subnet2_id
-
-echo "******************************"
-echo "* Verify VNC connection *"
-echo "******************************"
-
-echo "Get the VNC url of vm1:"
-nova --os-region-name Pod1 get-vnc-console vm1 novnc
-echo "Get the VNC url of vm2:"
-nova --os-region-name Pod2 get-vnc-console vm2 novnc
-
-echo "**************************************"
-echo "* Verify External network *"
-echo "**************************************"
-
-echo "Create floating ip:"
-neutron floatingip-create ext-net
-
-echo "Show floating ips:"
-neutron floatingip-list
-
-echo "Show neutron ports:"
-neutron port-list
-
-floatingip_id=$(neutron floatingip-list | awk 'NR==4 {print $2}')
-port_id=$(neutron port-list |grep 10.0.2.3 |awk '{print $2}')
-
-echo "Associate floating ip:"
-neutron floatingip-associate $floatingip_id $port_id
diff --git a/devstack/verify_top_install.sh b/devstack/verify_top_install.sh
deleted file mode 100755
index d638b535..00000000
--- a/devstack/verify_top_install.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-#
-# Script name: verify_top_install.sh
-# This script is to verify the installation of Tricircle in Top OpenStack.
-#
-# In this script, there are some parameters you need to consider before running it.
-#
-# 1, Post URL whether is 127.0.0.1 or something else,
-# 2, This script create a subnet called net1 10.0.0.0/24, Change these if needed.
-#
-# Change the parameters according to your own environment.
-# Execute "verify_top_install.sh" in the top OpenStack
-#
-# Author: Pengfei Shi
-#
-
-set -o xtrace
-
-TEST_DIR=$(pwd)
-echo "Test work directory is $TEST_DIR."
-
-if [ ! -r admin-openrc.sh ];then
- set -o xtrace
- echo "Your work directory doesn't have admin-openrc.sh,"
- echo "Please check whether you are in tricircle/devstack/ or not and run this script."
-exit 1
-fi
-
-echo "Beginning the verify testing..."
-
-echo "Import client environment variables:"
-source $TEST_DIR/admin-openrc.sh
-
-echo "******************************"
-echo "* Verify Endpoint *"
-echo "******************************"
-
-echo "List openstack endpoint:"
-
-openstack --debug endpoint list
-
-token=$(openstack token issue | awk 'NR==5 {print $4}')
-
-echo $token
-
-openstack multiregion networking pod create --region-name RegionOne
-
-openstack multiregion networking pod create --region-name Pod1 --availability-zone az1
-
-echo "******************************"
-echo "* Verify Nova *"
-echo "******************************"
-
-echo "Show nova aggregate:"
-nova --debug aggregate-list
-
-echo "Create test flavor:"
-nova --debug flavor-create test 1 1024 10 1
-
-echo "******************************"
-echo "* Verify Neutron *"
-echo "******************************"
-
-echo "Create net1:"
-neutron --debug net-create net1
-
-echo "Create subnet of net1:"
-neutron --debug subnet-create net1 10.0.0.0/24
-
-image_id=$(glance image-list |awk 'NR==4 {print $2}')
-net_id=$(neutron net-list|grep net1 |awk '{print $2}')
-
-echo "Boot vm1 in az1:"
-nova --debug boot --flavor 1 --image $image_id --nic net-id=$net_id --availability-zone az1 vm1
-
-echo "******************************"
-echo "* Verify Cinder *"
-echo "******************************"
-
-echo "Create a volume in az1:"
-cinder --debug create --availability-zone=az1 1
-
-echo "Show volume list:"
-cinder --debug list
-volume_id=$(cinder list |grep lvmdriver-1 | awk '{print $2}')
-
-echo "Show detailed volume info:"
-cinder --debug show $volume_id
-
-echo "Delete test volume:"
-cinder --debug delete $volume_id
-cinder --debug list
diff --git a/doc/source/admin/api_v1.rst b/doc/source/admin/api_v1.rst
deleted file mode 100644
index a892f8b8..00000000
--- a/doc/source/admin/api_v1.rst
+++ /dev/null
@@ -1,1417 +0,0 @@
-=======================
-The Tricircle Admin API
-=======================
-This Admin API documentation describes the ways of interacting with the
-Tricircle service via HTTP protocol using Representational State Transfer(ReST).
-
-API Versions
-============
-In order to bring new features to users over time, versioning is supported
-by the Tricircle. The latest version of the Tricircle is v1.0.
-
-The Version APIs work the same as other APIs as they still require
-authentication.
-
-+------------------+----------------+-----+-----------------------------------------------+
-|**GET** |/ | |List All Major versions |
-| | | | |
-|**GET** |/{api_version} | |Show Details of Specific API Version |
-+------------------+----------------+-----+-----------------------------------------------+
-
-Service URLs
-============
-All API calls through the rest of this document require authentication with
-the OpenStack Identity service. They also require a base service url that can
-be got from the OpenStack Tricircle endpoint. This will be the root url that
-every call below will be added to build a full path.
-
-For instance, if the Tricircle service url is ``http://127.0.0.1/tricircle/v1.0``
-then the full API call for ``/pods`` is ``http://127.0.0.1/tricircle/v1.0/pods``.
-
-As such, for the rest of this document we will leave out the root url where
-``GET /pods`` really means ``GET {tricircle_service_url}/pods``.
-
-Pod
-===
-A pod represents a region in Keystone. When operating a pod, the Tricircle
-decides the correct endpoints to send request based on the region of the pod.
-Considering the architecture of the Tricircle, we have two kinds of pods: pod
-for central Neutron and pod for local Neutron.
-
-
-+------------------+---------+-----------------------------------+------------------------+
-|**GET** |/pods | |Retrieve Pod List |
-+------------------+---------+-----------------------------------+------------------------+
-
-This fetches all the pods, including pod for central Neutron and pod(s) for
-local Neutron.
-
-Normal Response Code: 200
-
-**Response**
-
-Pods contains a list of pod instances whose attributes are described in the
-following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|region_name|body | string |region_name is specified by user but must match the |
-| | | |region name registered in Keystone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|az_name |body | string |When az_name is empty, it means this is a pod for |
-| | | |central Neutron. If az_name is not empty, it means |
-| | | |the pod will belong to this availability zone. |
-| | | |Multiple pods with the same az_name means that these |
-| | | |pods are under the same availability zone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
-| | | |Neutron when creating network, router objects. It |
-| | | |could be empty. If it's empty, then no az parameter |
-| | | |will be added to the request forwarded to the pod for|
-| | | |local Neutron. If the pod_az_name is different from |
-| | | |az_name, then the az parameter will be replaced with |
-| | | |the pod_az_name when the request is forwarded to |
-| | | |relevant pod for local Neutron. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|dc_name |body | string |dc_name is the name of the data center where the pod |
-| | | |is located. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /pods``.
-
-::
-
- {
- "pods": [
- {
- "dc_name": "",
- "pod_az_name": "",
- "pod_id": "1a51bee7-10f0-47e8-bb4a-70f51394069c",
- "az_name": "",
- "region_name": "RegionOne"
- },
- {
- "dc_name": "",
- "pod_az_name": "",
- "pod_id": "22cca6ad-b791-4805-af14-923c5224fcd2",
- "az_name": "az2",
- "region_name": "Pod2"
- },
- {
- "dc_name": "",
- "pod_az_name": "",
- "pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2",
- "az_name": "az1",
- "region_name": "Pod1"
- }
- ]
- }
-
-+------------------+-------------------+-----------------------+-------------------------------+
-|**GET** |/pods/{pod_id} | |Retrieve a Single Pod |
-+------------------+-------------------+-----------------------+-------------------------------+
-
-This fetches a pod for central Neutron or a pod for local Neutron.
-
-Normal Response Code: 200
-
-**Request**
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|pod_id |path | string |pod_id is a uuid attribute of the pod object. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-Here are two kinds of pods, including pod for central Neutron and pod for local
-Neutron. az_name is one of its attributes. A pod with empty az_name is for
-central Neutron, otherwise a pod with az_name specified is for local Neutron.
-All of its attributes are described in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|region_name|body | string |region_name is specified by user but must match the |
-| | | |region name registered in Keystone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|az_name |body | string |When az_name is empty, it means this is a pod for |
-| | | |central Neutron. If az_name is not empty, it means |
-| | | |the pod will belong to this availability zone. |
-| | | |Multiple pods with the same az_name means that these |
-| | | |pods are under the same availability zone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
-| | | |Neutron when creating network, router objects. It |
-| | | |could be empty. If it's empty, then no az parameter |
-| | | |will be added to the request forwarded to the pod for|
-| | | |local Neutron. If the pod_az_name is different from |
-| | | |az_name, then the az parameter will be replaced with |
-| | | |the pod_az_name when the request is forwarded to |
-| | | |relevant pod for local Neutron. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|dc_name |body | string |dc_name is the name of the data center where the pod |
-| | | |is located. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /pods/{pod_id}``.
-
-::
-
- {
- "pod": {
- "dc_name": "",
- "pod_az_name": "",
- "pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2",
- "az_name": "az1",
- "region_name": "Pod1"
- }
- }
-
-+---------------+-------+------------------------------------+--------------------+
-|**POST** |/pods | |Create a Pod |
-+---------------+-------+------------------------------------+--------------------+
-
-This creates a pod for central Neutron or a pod for local Neutron.
-
-Normal Response Code: 200
-
-**Request**
-
-Some essential attributes of the pod instance are required and described
-in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|region_name|body | string |region_name is specified by user but must match the |
-| | | |region name registered in Keystone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|az_name |body | string |When az_name is empty, it means this is a pod for |
-| | | |central Neutron. If az_name is not empty, it means |
-| | | |the pod will belong to this availability zone. |
-| | | |Multiple pods with the same az_name means that these |
-| | | |pods are under the same availability zone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
-| | | |Neutron when creating network, router objects. It |
-| | | |could be empty. If it's empty, then no az parameter |
-| | | |will be added to the request forwarded to the pod for|
-| | | |local Neutron. If the pod_az_name is different from |
-| | | |az_name, then the az parameter will be replaced with |
-| | | |the pod_az_name when the request is forwarded to |
-| | | |relevant pod for local Neutron. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|dc_name |body | string |dc_name is the name of the data center where the pod |
-| | | |is located. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-An id is assigned to a pod instance when it's created. All of its attributes
-are listed below.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|pod_id |body | string |pod_id is automatically generated when creating a pod|
-+-----------+-------+---------------+-----------------------------------------------------+
-|region_name|body | string |region_name is specified by user but must match the |
-| | | |region name registered in Keystone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|az_name |body | string |When az_name is empty, it means this is a pod for |
-| | | |central Neutron. If az_name is not empty, it means |
-| | | |the pod will belong to this availability zone. |
-| | | |Multiple pods with the same az_name means that these |
-| | | |pods are under the same availability zone. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
-| | | |Neutron when creating network, router objects. It |
-| | | |could be empty. If it's empty, then no az parameter |
-| | | |will be added to the request forwarded to the pod for|
-| | | |local Neutron. If the pod_az_name is different from |
-| | | |az_name, then the az parameter will be replaced with |
-| | | |the pod_az_name when the request is forwarded to |
-| | | |relevant pod for local Neutron. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|dc_name |body | string |dc_name is the name of the data center where the pod |
-| | | |is located. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Request Example**
-
-This is an example of request information for ``POST /pods``.
-
-::
-
- {
- "pod": {
- "region_name": "Pod3",
- "az_name": "az1",
- "pod_az_name": "az1",
- "dc_name": "data center 1"
- }
- }
-
-**Response Example**
-
-This is an example of response information for ``POST /pods``.
-
-::
-
- {
- "pod": {
- "dc_name": "data center 1",
- "pod_az_name": "az1",
- "pod_id": "e02e03b8-a94f-4eb1-991e-a8a271cc2313",
- "az_name": "az1",
- "region_name": "Pod3"
- }
- }
-
-
-+------------------+-----------------+------------------------+-------------------------+
-|**DELETE** |/pods/{pod_id} | |Delete a Pod |
-+------------------+-----------------+------------------------+-------------------------+
-
-This deletes a pod for central Neutron or a pod for local Neutron from
-availability-zone.
-
-Normal Response Code: 200
-
-**Request**
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|pod_id |path | string |pod_id is a uuid attribute of the pod object. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-There is no response. But we can list all the pods to verify whether the
-specific pod has been deleted or not.
-
-Resource Routing
-================
-The Tricircle is responsible for resource(for example, network, subnet, port,
-router, etc) creation both in local Neutron and central Neutron.
-
-In order to dispatch resource operation request to the proper local Neutron,
-we need a resource routing table, which maps a resource from the central
-Neutron to local Neutron where it's located.
-
-When user issues a resource update, query or delete request, central Neutron
-will capture this request and extract resource id from the request, then
-dispatch the request to target local Neutron on the basis of the routing table.
-
-
-+------------------+-------------+--------------------+---------------------------------+
-|**GET** |/routings | |Retrieve All Resource Routings |
-+------------------+-------------+--------------------+---------------------------------+
-
-This fetches all the resource routing entries by default, but we can
-apply filter(s) on the returned values to only show the specific routing
-entries. Accordingly the filtering condition(s) will be added to the tail of
-the service url separated by question mark. For example, the default service
-url is ``GET /routings``, when filtering is applied, the service url becomes
-``GET /routings?attribute=attribute_value``. One or multiple conditions are
-supported. What's more, project ID filter in URL query string will be ignored,
-and only the project ID in which the user is authorized will be used as the filter.
-
-All items returned are sorted in descending order by ID. Because the ID is a
-big integer, ID with greater value means they are newly added to the resource
-routing table. So most recently created items will be shown first.
-
-To reduce load on service, list operation returns a maximum number of items
-at a time by pagination. To navigate the collection, the parameters limit
-and marker can be set in the URI. For example: ``GET /v1.0/routings?limit=2000&marker=500``.
-The marker parameter is the ID of the last item in the previous list.
-If marker is specified, we can get the results after this item. A marker
-with an invalid ID results in a bad request. The limit parameter sets
-the page size. If the client requests a limit beyond the maximum limit
-in configuration, then this maximum limit will be used. For each list request,
-if there are more items waiting to be shown besides those already in page,
-then a link to next page will be given. Using this link and same filtering
-conditions we can retrieve the following items. If the total number of items is
-less than the limit, then no next page link in the response. If user lists the
-routings without limit value specified, then maximum limit value will be
-used to control page size for protecting service.
-
-Both limit and marker parameters are optional, they can be specified together
-or separately. Pagination and filtering can work together in routing list
-operations.
-
-Normal Response Code: 200
-
-**Response**
-
-The resource routing set contains a list of resource routing entries whose
-attributes are described in the following table.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |body | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|created_at |body | timestamp |created time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|updated_at |body | timestamp |updated time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /routings``. By default, all
-the resource routing entries will be returned. As there is no page size limit
-provided by the client, so default maximum pagination limit is used. As is shown,
-because total number of items is less than the limit, therefore no next page link
-in the response.
-
-::
-
- {
- "routings": [
- {
- "updated_at": "2016-09-25 03:16:33"",
- "created_at": "2016-09-25 03:16:32",
- "top_id": "4487087e-34c7-40d8-8553-3a4206d0591b",
- "id": 3,
- "bottom_id": "834ef10b-a96f-460c-b448-b39b9f3e6b52",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "resource_type": "security_group"
- },
- {
- "updated_at": "2016-09-25 03:16:31",
- "created_at": "2016-09-25 03:16:30",
- "top_id": "a4d786fd-0511-4fac-be45-8b9ee447324b",
- "id": 2,
- "bottom_id": "7a05748c-5d1a-485e-bd5c-e52bc39b5414",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "resource_type": "network"
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/routings?limit=2``,
-to retrieve the first page we can only provide limit value.
-
-::
-
- {
- "routings": [
- {
- "updated_at": null,
- "created_at": "2017-06-11 12:52:46",
- "top_id": "e091d3ad-a5a9-41a1-a948-54e2a1583b8d",
- "id": 8,
- "bottom_id": "e091d3ad-a5a9-41a1-a948-54e2a1583b8d",
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "07ce2e57-fdba-4a6a-a7ce-44528108380d",
- "resource_type": "security_group"
- },
- {
- "updated_at": null,
- "created_at": "2017-06-11 12:52:46",
- "top_id": "90806f6a-2c79-4cdf-8db4-de1f3e46fe1f",
- "id": 6,
- "bottom_id": "90806f6a-2c79-4cdf-8db4-de1f3e46fe1f",
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "07ce2e57-fdba-4a6a-a7ce-44528108380d",
- "resource_type": "network"
- }
- ],
- "routings_links": [
- {
- "href": "/v1.0/routings?limit=2&marker=6",
- "rel": "next"
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/routings?limit=2&marker=6``,
-with the help of the link to next page, we can get the following items.
-
-::
-
- {
- "routings": [
- {
- "updated_at": null,
- "created_at": "2017-06-11 12:52:46",
- "top_id": "724b5ae0-d4eb-4165-a2cc-e6428719cab3",
- "id": 5,
- "bottom_id": "724b5ae0-d4eb-4165-a2cc-e6428719cab3",
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "07ce2e57-fdba-4a6a-a7ce-44528108380d",
- "resource_type": "subnet"
- },
- {
- "updated_at": null,
- "created_at": "2017-06-11 12:50:01",
- "top_id": "64b886de-62ca-4713-9461-bd77c79e2282",
- "id": 4,
- "bottom_id": null,
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "8ae8c849-ce30-43bb-8346-d4da6678fc9c",
- "resource_type": "network"
- }
- ],
- "routings_links": [
- {
- "href": "/v1.0/routings?limit=2&marker=4",
- "rel": "next"
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/routings?limit=2&resource_type=port``.
-When filter and limit are applied to the list operation, we can restrict the total number of
-specific routing entries.
-
-::
-
- {
- "routings": [
- {
- "updated_at": "2017-06-11 12:49:41",
- "created_at": "2017-06-11 12:49:41",
- "top_id": "interface_RegionOne_724b5ae0-d4eb-4165-a2cc-e6428719cab3",
- "id": 3,
- "bottom_id": "73845c04-a709-4b0d-a70e-71923c4c5bfc",
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "07ce2e57-fdba-4a6a-a7ce-44528108380d",
- "resource_type": "port"
- },
- {
- "updated_at": "2017-06-11 12:49:03",
- "created_at": "2017-06-11 12:49:03",
- "top_id": "dhcp_port_724b5ae0-d4eb-4165-a2cc-e6428719cab3",
- "id": 2,
- "bottom_id": "4c6f2e86-7455-4fe5-8cbc-7c3d6bc7625f",
- "project_id": "3b2a11d52ec44d7bb8c53a18fd5105d6",
- "pod_id": "6073e33e-4d4f-45dc-961a-d7d3b4a8e7f7",
- "resource_type": "port"
- }
- ],
- "routings_links": [
- {
- "href": "/v1.0/routings?limit=2&marker=2",
- "rel": "next"
- }
- ]
- }
-
-+------------------+---------------+---------------+-------------------------------------+
-|**GET** |/routings/{id} | |Retrieve a Single Resource Routing |
-+------------------+---------------+---------------+-------------------------------------+
-
-This fetches a single resource routing entry.
-
-Normal Response Code: 200
-
-**Request**
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |path | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-A kind of resource in central Neutron, when it is created by the Tricircle, is
-mapped to the same resource in local Neutron. Resource routing records this
-mapping relationship. All of its attributes are described in the following
-table.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |body | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|created_at |body | timestamp |created time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|updated_at |body | timestamp |updated time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /routings/{id}``.
-
-::
-
- {
- "routing": {
- "updated_at": null,
- "created_at": "2016-10-25 13:10:26",
- "top_id": "09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe",
- "id": 43,
- "bottom_id": "dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "resource_type": "subnet"
- }
- }
-
-+------------------+---------------+-----------------+-----------------------------------+
-|**POST** |/routings | |Create a Resource Routing |
-+------------------+---------------+-----------------+-----------------------------------+
-
-This creates a resource routing. For a kind of resource created in central
-Neutron, it is mapped to the same resource in local Neutron.
-
-Normal Response Code: 200
-
-**Request**
-
-Some essential fields of the resource routing entry are required and described
-in the following table.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-An id is assigned to the resource routing when it's created. All routing
-entry's attributes are listed below.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |body | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|created_at |body | timestamp |created time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|updated_at |body | timestamp |updated time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Request Example**
-
-This is an example of request information for ``POST /routings``.
-
-::
-
- {
- "routing": {
- "top_id": "09fd7cc9-d169-4b5a-88e8-436ecf4d0bfg",
- "bottom_id": "dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ek",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "resource_type": "subnet"
- }
- }
-
-**Response Example**
-
-This is an example of response information for ``POST /routings``.
-
-::
-
- {
- "routing": {
- "updated_at": null,
- "created_at": "2016-11-03 03:06:38",
- "top_id": "09fd7cc9-d169-4b5a-88e8-436ecf4d0bfg",
- "id": 45,
- "bottom_id": "dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ek",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "resource_type": "subnet"
- }
- }
-
-+------------------+---------------+-----------------+-----------------------------------+
-|**DELETE** |/routings/{id} | |Delete a Resource Routing |
-+------------------+---------------+-----------------+-----------------------------------+
-
-This deletes a resource routing entry. But deleting an existing routing entry
-created by Tricircle itself may cause problem: Central Neutron may make wrong
-judgement on whether the resource exists or not without this routing entry.
-Moreover, related request can't be forwarded to the proper local Neutron
-either.
-
-Normal Response Code: 200
-
-**Request**
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |path | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-There is no response. But we can list all the resource routing entries to
-verify whether the specific routing entry has been deleted or not.
-
-+------------------+---------------+-----------------+-----------------------------------+
-|**PUT** |/routings/{id} | |Update a Resource Routing |
-+------------------+---------------+-----------------+-----------------------------------+
-
-This updates an existing resource routing entry. But updating an existing
-routing entry created by Tricircle itself may cause problem: Central Neutron
-may make wrong judgement on whether the resource exists or not without this
-routing entry. Moreover, related request can't be forwarded to the proper local
-Neutron either.
-
-Normal Response Code: 200
-
-**Request**
-
-Some specific attributes of the resource routing entry can be updated, but they
-are only limited to the fields in the following table, other fields can not be
-updated manually.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-Some specific fields of the resource routing entry will be updated. All
-attributes of routing entry are listed below.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |body | biginteger |id is the unique identification of the resource |
-| | | |routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|top_id |body | string |top_id denotes the resource id on central Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|bottom_id |body | string |bottom_id denotes the resource id on local Neutron. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|pod_id |body | string |pod_id is the uuid of one pod(i.e., one region). |
-+-------------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|resource_type|body | string |resource_type denotes one of the available resource |
-| | | |types, including network, subnet, port, router and |
-| | | |security_group. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|created_at |body | timestamp |created time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-|updated_at |body | timestamp |updated time of the resource routing. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Request Example**
-
-This is an example of request information for ``PUT /routings/{id}``.
-
-::
-
- {
- "routing": {
- "resource_type": "router"
- }
- }
-
-**Response Example**
-
-This is an example of response information for ``PUT /routings/{id}``. The change
-of the field updated_at will be showed next time we retrieve this routing entry
-from the database.
-
-::
-
- {
- "routing": {
- "updated_at": null,
- "created_at": "2016-11-03 03:06:38",
- "top_id": "09fd7cc9-d169-4b5a-88e8-436ecf4d0bfg",
- "id": 45,
- "bottom_id": "dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ek",
- "project_id": "d937fe2ad1064a37968885a58808f7a3",
- "pod_id": "444a8ce3-9fb6-4a0f-b948-6b9d31d6b202",
- "resource_type": "router"
- }
- }
-
-Asynchronous Job
-================
-Tricircle XJob provides OpenStack multi-region functionality. It receives jobs
-from the Admin API or Tricircle Central Neutron Plugin and handles them
-asynchronously in Local Neutron(s).
-
-However, XJob server may strike occasionally so the tenants or administrators
-need to know the job status and delete or redo the failed job if necessary.
-Asynchronous job management APIs provide such functionality and allow user
-to perform CRUD operations on a job. For example, when there is a need to
-synchronize resource between central Neutron and local Neutron, administrator
-can create a job to process it.
-
-Jobs are categorized into different groups according to their phases in
-lifespan. Each job lives from birth till death. Right after a job is created,
-its status is NEW. After picked up by the job handler its status becomes
-RUNNING. Then if executed successfully, its status will be SUCCESS, otherwise
-its status will be set to FAIL. But not all jobs go through the three phases.
-For job whose status is NEW, if a newer job performing the same task comes,
-then this newer job will be picked up by job handler, the status of the
-relatively old job won't be changed until this job is cleaned from the job
-queue. A NEW job may also expire if it waits for too long, then its status is
-set to FAIL directly and skips the RUNNING phase. The expiration time span is
-set by administrator. All failed jobs have the opportunity to run again in next
-cycle of a periodical task.
-
-After a job runs successfully it will be moved to job log table automatically,
-the older versions of this job like new and failed jobs are removed from job
-table at the same time.
-
-There are two places to store jobs. All active jobs are stored in job table,
-including NEW, RUNNING, FAIL jobs and a small bunch of SUCCESS jobs that
-haven't been moved to job log table timely. But job log table only contains
-SUCCESS jobs, they can be listed and shown like other jobs in job table,
-but when performing delete or redo operation on them, an exception will
-be raised.
-
-+------------------+----------------+---------------------------+------------------------+
-|**GET** |/jobs | |Retrieve Job List |
-+------------------+----------------+---------------------------+------------------------+
-
-By default, this fetches all of the jobs including active jobs like NEW, FAIL
-and RUNNING jobs as well as SUCCESS jobs from job log. We can filter them by
-job type and job status to only get the specific kind of job entries, project ID
-filter in URL query string will be ignored, and only the project ID in which
-the user is authorized will be used as the filter.
-Accordingly the filtering condition will be added to the tail
-of the service url separated by question mark. For example, the default
-service url is ``GET /jobs``. Using a filter the service url becomes
-``GET /jobs?filter_name=value``. One or multiple filtering conditions are
-supported. Particularly, job status is case insensitive when filtering the
-jobs, so both ``GET /jobs?status=NEW`` and ``GET /jobs?status=new`` will return
-the same job set.
-
-To reduce the load on service, job list operation also supports pagination as
-resource routing does. It takes ``limit`` parameter as page size, and takes the
-item following ``marker`` parameter as starting point for next list operation.
-
-All items in job log table are successful jobs, there are nothing new. So job table
-will be searched ahead of job log table. Then failed, new or running jobs will be
-shown first. These jobs are sorted by timestamp in descending order, if two or more
-jobs have the same timestamp, then they'll be further sorted by job id in descending
-order.
-
-Normal Response Code: 200
-
-**Response**
-
-In normal case, a set of expected jobs will be returned. For invalid filtering
-value, an empty set will be returned. For unsupported filter name, an error
-will be raised.
-
-The attributes of single job are described in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|id |body | string |id is a uuid attribute of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|status |body | string |job status, including NEW, RUNNING, SUCCESS, FAIL. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | object |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|timestamp |body | timestamp |create or update time of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /jobs``. By default, all the
-job entries will be retrieved.
-
-
-::
-
- {
- "jobs": [
- {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- },
- {
- "id": "b01fe514-5211-4758-bbd1-9f32141a7ac2",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "seg_rule_setup",
- "timestamp": "2017-03-01 17:14:44",
- "status": "FAIL",
- "resource": {
- "project_id": "d01246bc5792477d9062a76332b7514a"
- }
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/jobs?limit=2``. When
-total number of items is equal or greater than limit value, then a link to next
-page will be returned.
-
-::
-
- {
- "jobs": [
- {
- "status": "SUCCESS",
- "resource": {
- "network_id": "7bf3ef1c-1f03-47b5-8191-a3d56938581b",
- "pod_id": "e6880238-3764-4de7-8644-3c09cff85b03"
- },
- "timestamp": "2017-07-26 22:36:48",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "shadow_port_setup",
- "id": "ee035edb-87a6-4dc4-ba00-06d6e62e9ad4"
- },
- {
- "status": "SUCCESS",
- "resource": {
- "router_id": "dbaa0b04-0686-45b6-8bac-a61269517c14"
- },
- "timestamp": "2017-07-26 22:36:39",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "configure_route",
- "id": "a10d9078-dee4-45d4-a352-d89d7072b766"
- }
- ],
- "jobs_links": [
- {
- "href": "/v1.0/jobs?limit=2&marker=a10d9078-dee4-45d4-a352-d89d7072b766",
- "rel": "next"
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/jobs?limit=2&marker=a10d9078-dee4-45d4-a352-d89d7072b766``.
-When marker is provided, the next list operation will start from the item
-following the marker.
-
-::
-
- {
- "jobs": [
- {
- "status": "SUCCESS",
- "resource": {
- "router_id": "dbaa0b04-0686-45b6-8bac-a61269517c14"
- },
- "timestamp": "2017-07-26 22:36:20",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "configure_route",
- "id": "89ae3921-2349-49fb-85ad-804b8ca99053"
-
- },
- {
- "status": "SUCCESS",
- "resource": {
- "network_id": "7bf3ef1c-1f03-47b5-8191-a3d56938581b",
- "pod_id": "e6880238-3764-4de7-8644-3c09cff85b03"
- },
- "timestamp": "2017-07-26 22:36:12",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "shadow_port_setup",
- "id": "afd8761b-b082-4fce-af00-afee07c3b923"
- }
- ],
- "jobs_links": [
- {
- "href": "/v1.0/jobs?limit=2&marker=afd8761b-b082-4fce-af00-afee07c3b923",
- "rel": "next"
- }
- ]
- }
-
-This is an example of response information for ``GET /v1.0/jobs?limit=2&marker=a10d9078-dee4-45d4-a352-d89d7072b766&type=shadow_port_setup``.
-Pagination and filtering requirements can be met by specifying limit and
-filtering conditions at the same time. If there are more items waiting to
-be shown besides those already in page, then a link to next page will be
-returned. Using this link and same filtering conditions we can retrieve the
-following items.
-
-::
-
- {
- "jobs": [
- {
- "status": "SUCCESS",
- "resource": {
- "network_id": "7bf3ef1c-1f03-47b5-8191-a3d56938581b",
- "pod_id": "e6880238-3764-4de7-8644-3c09cff85b03"
- },
- "timestamp": "2017-07-26 22:36:12",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "shadow_port_setup",
- "id": "afd8761b-b082-4fce-af00-afee07c3b923"
-
- },
- {
- "status": "SUCCESS",
- "resource": {
- "network_id": "fb53ea2d-a0e8-4ed5-a2b2-f0e2fce9ff4f",
- "pod_id": "e6880238-3764-4de7-8644-3c09cff85b03"
- },
- "timestamp": "2017-07-26 22:33:45",
- "project_id": "cab94f5a2c6346fe956d3f45ccf84c82",
- "type": "shadow_port_setup",
- "id": "592ade1c-12a5-4ca3-9f75-4810c25a1604"
- }
- ],
- "jobs_links": [
- {
- "href": "/v1.0/jobs?limit=2&marker=592ade1c-12a5-4ca3-9f75-4810c25a1604",
- "rel": "next"
- }
- ]
- }
-
-+------------------+-------------------+-----------------------+-------------------------------+
-|**GET** |/jobs/detail | |Retrieve Jobs with Filter(s) |
-+------------------+-------------------+-----------------------+-------------------------------+
-
-Retrieve jobs from the Tricircle database. We can filter them by project ID,
-job type and job status. It functions the same as service ``GET /jobs``.
-
-Normal Response Code: 200
-
-**Response**
-
-A list of jobs will be returned. The attributes of single job are described
-in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|id |body | string |id is a uuid attribute of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|status |body | string |job status, including NEW, RUNNING, SUCCESS, FAIL. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | object |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|timestamp |body | timestamp |create or update time of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /jobs/detail``.
-::
-
- {
- "jobs": [
- {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- },
- {
- "id": "b01fe514-5211-4758-bbd1-9f32141a7ac2",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "seg_rule_setup",
- "timestamp": "2017-03-01 17:14:44",
- "status": "FAIL",
- "resource": {
- "project_id": "d01246bc5792477d9062a76332b7514a"
- }
- }
- ]
- }
-
-+------------------+---------------+---------------+-------------------------------------+
-|**GET** |/jobs/{id} | |Retrieve a Single Job |
-+------------------+---------------+---------------+-------------------------------------+
-
-This fetches a single job entry. This entry may be from job table or job log
-table.
-
-Normal Response Code: 200
-
-**Request**
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |path | string |id is a uuid attribute of the job. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-The attributes of the returned job are described in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|id |body | string |id is a uuid attribute of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|status |body | string |job status, including NEW, RUNNING, SUCCESS, FAIL. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | object |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|timestamp |body | timestamp |create or update time of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /job/{id}``.
-
-::
-
- {
- "job": {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
-+------------------+----------------+---------------------------+------------------------+
-|**GET** |/jobs/schemas | |Retrieve Jobs' Schemas |
-+------------------+----------------+---------------------------+------------------------+
-
-Retrieve all jobs' schemas.
-
-
-Normal Response Code: 200
-
-**Response**
-
-This returns a list of all jobs' schemas. The architecture of job schema
-is described as following.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | array |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response Example**
-
-This is an example of response information for ``GET /jobs/schemas``.
-
-::
-
- {
- "schemas": [
- {
- "type": "configure_route",
- "resource": ["router_id"]
- },
- {
- "type": "router_setup",
- "resource": ["pod_id", "router_id", "network_id"]
- },
- {
- "type": "port_delete",
- "resource": ["pod_id", "port_id"]
- },
- {
- "type": "seg_rule_setup",
- "resource": ["project_id"]
- },
- {
- "type": "update_network",
- "resource": ["pod_id", "network_id"]
- },
- {
- "type": "subnet_update",
- "resource": ["pod_id", "subnet_id"]
- },
- {
- "type": "shadow_port_setup",
- "resource": [pod_id", "network_id"]
- }
- ]
- }
-
-+---------------+-------+------------------------------------+--------------------+
-|**POST** |/job | |Create a Job |
-+---------------+-------+------------------------------------+--------------------+
-
-This creates a new job. If target job already exists in the job table and its
-status is NEW, then this newer job will be picked up by job handler.
-
-Normal Response Code: 202
-
-**Request**
-
-Some essential attributes of the job are required and they are described
-in the following table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | object |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-This returns a newly created job. Its attributes are described in the following
-table.
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|id |body | string |id is a uuid attribute of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|project_id |body | string |project_id is the uuid of a project object in |
-| | | |KeyStone. "Tenant" is an old term for a project in |
-| | | |Keystone. Starting in API version 3, "project" is the|
-| | | |preferred term. They are identical in the context. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|type |body | string |the type of a job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|status |body | string |job status, including NEW, RUNNING, SUCCESS, FAIL. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|resource |body | object |all kinds of resources that are needed to run the |
-| | | |job. It may also contain other necessary parameters |
-| | | |such as pod_id and project_id that are not Neutron |
-| | | |resources. |
-+-----------+-------+---------------+-----------------------------------------------------+
-|timestamp |body | timestamp |create time of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Request Example**
-
-This is an example of request information for ``POST /jobs``.
-
-::
-
- {
- "job": {
- "type": "port_delete",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
-**Response Example**
-
-This is an example of response information for ``POST /jobs``.
-
-::
-
- {
- "job": {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
-+------------------+-----------------+------------------------+-------------------------+
-|**DELETE** |/jobs/{id} | |Delete a Job |
-+------------------+-----------------+------------------------+-------------------------+
-
-Delete a failed or duplicated job from the job table. If a user tries to delete
-a job from job log table, an error will be raised.
-
-Normal Response Code: 200
-
-**Request**
-
-+-----------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+===========+=======+===============+=====================================================+
-|id |path | string |id is a uuid attribute of the job. |
-+-----------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-A pair of curly braces will be returned if succeeds, otherwise an exception
-will be thrown. We can list jobs to verify whether it has been deleted
-successfully or not.
-
-+------------------+---------------+-----------------+-----------------------------------+
-|**PUT** |/jobs/{id} | |Redo a Job |
-+------------------+---------------+-----------------+-----------------------------------+
-
-Redo a halted job brought by the XJob server corruption or network failures.
-The job handler will redo a failed job with time interval, but this Admin API
-will redo a job immediately. If a user tries to redo a job in job log table,
-an error will be raised.
-
-
-Normal Response Code: 200
-
-**Request**
-
-Only job id is needed. We use PUT method to redo a job. Regularly PUT method
-requires a request body, but considering the job redo operation doesn't
-need more information other than job id, we will issue this request without
-request body.
-
-+-------------+-------+---------------+-----------------------------------------------------+
-|Name |In | Type | Description |
-+=============+=======+===============+=====================================================+
-|id |path | string |id is a uuid attribute of the job. |
-+-------------+-------+---------------+-----------------------------------------------------+
-
-**Response**
-
-Nothing will be returned for this request, but we can monitor its status
-through the execution state.
-
diff --git a/doc/source/admin/cli.rst b/doc/source/admin/cli.rst
deleted file mode 100644
index f58c1be7..00000000
--- a/doc/source/admin/cli.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-================================
-Command-Line Interface Reference
-================================
-
-Synopsis
-========
-
-Follow OpenStack CLI format ::
-
- openstack [] []
-
-The CLI for Tricircle can be executed as follows ::
-
- openstack multiregion networking []
-
-All commands will issue request to Tricircle Admin API.
-
-
-Management commands
-===================
-
-.. toctree::
- :maxdepth: 1
-
- tricircle-status
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
deleted file mode 100644
index fc51a9d2..00000000
--- a/doc/source/admin/index.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-=====================
-Tricircle Admin Guide
-=====================
-
-.. toctree::
- :maxdepth: 3
-
- api_v1
- cli
diff --git a/doc/source/admin/tricircle-status.rst b/doc/source/admin/tricircle-status.rst
deleted file mode 100644
index 0c13f2e0..00000000
--- a/doc/source/admin/tricircle-status.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-================
-tricircle-status
-================
-
-Synopsis
-========
-
-::
-
- tricircle-status []
-
-Description
-===========
-
-:program:`tricircle-status` is a tool that provides routines for checking the
-status of a Tricircle deployment.
-
-Options
-=======
-
-The standard pattern for executing a :program:`tricircle-status` command is::
-
- tricircle-status []
-
-Run without arguments to see a list of available command categories::
-
- tricircle-status
-
-Categories are:
-
-* ``upgrade``
-
-Detailed descriptions are below.
-
-You can also run with a category argument such as ``upgrade`` to see a list of
-all commands in that category::
-
- tricircle-status upgrade
-
-These sections describe the available categories and arguments for
-:program:`tricircle-status`.
-
-Upgrade
-~~~~~~~
-
-.. _tricircle-status-checks:
-
-``tricircle-status upgrade check``
- Performs a release-specific readiness check before restarting services with
- new code. This command expects to have complete configuration and access
- to databases and services.
-
- **Return Codes**
-
- .. list-table::
- :widths: 20 80
- :header-rows: 1
-
- * - Return code
- - Description
- * - 0
- - All upgrade readiness checks passed successfully and there is nothing
- to do.
- * - 1
- - At least one check encountered an issue and requires further
- investigation. This is considered a warning but the upgrade may be OK.
- * - 2
- - There was an upgrade status check failure that needs to be
- investigated. This should be considered something that stops an
- upgrade.
- * - 255
- - An unexpected error occurred.
-
- **History of Checks**
-
- **6.0.0 (Stein)**
-
- * Placeholder to be filled in with checks as they are added in Stein.
diff --git a/doc/source/conf.py b/doc/source/conf.py
deleted file mode 100755
index a69bc202..00000000
--- a/doc/source/conf.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('../..'))
-# -- General configuration ----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [
- 'sphinx.ext.autodoc',
- # 'sphinx.ext.intersphinx',
- 'openstackdocstheme'
-]
-
-# openstackdocstheme options
-repository_name = 'openstack/tricircle'
-bug_project = 'tricircle'
-bug_tag = ''
-
-# autodoc generation is a bit aggressive and a nuisance when doing heavy
-# text edit cycles.
-# execute "export SPHINX_DEBUG=1" in your terminal to disable
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'tricircle'
-copyright = u'2015, OpenStack Foundation'
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = True
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# -- Options for HTML output --------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-# html_theme_path = ["."]
-# html_theme = '_theme'
-# html_static_path = ['static']
-html_theme = 'openstackdocs'
-
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = '%sdoc' % project
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass
-# [howto/manual]).
-latex_documents = [
- ('index',
- '%s.tex' % project,
- u'%s Documentation' % project,
- u'OpenStack Foundation', 'manual'),
-]
-
-# Example configuration for intersphinx: refer to the Python standard library.
-# intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/configuration/configuration.rst b/doc/source/configuration/configuration.rst
deleted file mode 100644
index 0398878a..00000000
--- a/doc/source/configuration/configuration.rst
+++ /dev/null
@@ -1,226 +0,0 @@
-===================
-Configuration Guide
-===================
-A brief introduction to configure Tricircle service. Only the
-configuration items for Tricircle will be described here. Logging,
-messaging, database, keystonemiddleware etc configuration which are
-generated from OpenStack Oslo library, will not be described here. Since
-these configuration items are common to Nova, Cinder, Neutron. Please
-refer to corresponding description from Nova, Cinder or Neutron.
-
-Common Options
-==============
-In the common configuration options, the group of "client" need to be
-configured in Admin API, XJob, Local Plugin and Central Plugin. The
-"tricircle_db_connection" should be configured in Admin API, XJob and
-Central Plugin.
-
-.. _Common:
-
-.. list-table:: Description of common configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``tricircle_db_connection`` = ``None``
- - (String) database connection string for Tricircle, for example, mysql+pymysql://root:password@127.0.0.1/tricircle?charset=utf8
- * - **[client]**
- -
- * - ``admin_password`` = ``None``
- - (String) password of admin account, needed when auto_refresh_endpoint set to True, for example, password.
- * - ``admin_tenant`` = ``None``
- - (String) tenant name of admin account, needed when auto_refresh_endpoint set to True, for example, demo.
- * - ``admin_tenant_domain_name`` = ``Default``
- - (String) tenant domain name of admin account, needed when auto_refresh_endpoint set to True.
- * - ``admin_user_domain_name`` = ``Default``
- - (String) user domain name of admin account, needed when auto_refresh_endpoint set to True.
- * - ``admin_username`` = ``None``
- - (String) username of admin account, needed when auto_refresh_endpoint set to True.
- * - ``auth_url`` = ``http://127.0.0.1/identity``
- - (String) keystone authorization url, it's basically the internal or public endpoint of keystone, depends on how
- the common.client module can reach keystone, for example, http://$service_host/identity
- * - ``identity_url`` = ``http://127.0.0.1/identity/v3``
- - [Deprecated] (String) keystone service url, for example, http://$service_host/identity/v3 (this option is not
- used in code since Pike release, you can simply ignore this option)
- * - ``auto_refresh_endpoint`` = ``True``
- - (Boolean) if set to True, endpoint will be automatically refreshed if timeout accessing endpoint.
- * - ``bridge_cidr`` = ``100.0.0.0/9``
- - (String) cidr pool of the bridge network, for example, 100.0.0.0/9
- * - ``neutron_timeout`` = ``60``
- - (Integer) timeout for neutron client in seconds.
- * - ``top_region_name`` = ``None``
- - (String) region name of Central Neutron in which client needs to access, for example, CentralRegion.
- * - ``cross_pod_vxlan_mode`` = ``p2p``
- - (String) Cross-pod VxLAN networking support mode, possible choices are p2p l2gw and noop
-
-
-
-
-Tricircle Admin API Settings
-============================
-
-Tricircle Admin API servers for managing the mapping between OpenStack instances
-and availability zone, retrieving object uuid routing and exposing API for
-maintenance. The following items should be configured in Tricircle's api.conf.
-
-.. _Tricircle-Admin_API:
-
-.. list-table:: Description of Tricircle Admin API configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``api_workers`` = ``1``
- - (Integer) The port to bind to
- * - ``auth_strategy`` = ``keystone``
- - (String) The type of authentication to use
- * - ``bind_host`` = ``0.0.0.0``
- - (String) The host IP to bind to
- * - ``bind_port`` = ``19999``
- - (Integer) The port to bind to
-
-
-Tricircle XJob Settings
-=======================
-
-Tricircle XJob serves for receiving and processing cross Neutron
-functionality and other async jobs from Admin API or Tricircle Central
-Neutron Plugin. The following items should be configured in Tricircle's
-xjob.conf.
-
-.. _Tricircle-Xjob:
-
-.. list-table:: Description of Tricircle XJob configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``periodic_enable`` = ``True``
- - (Boolean) Enable periodic tasks
- * - ``periodic_fuzzy_delay`` = ``60``
- - (Integer) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0)
- * - ``report_interval`` = ``10``
- - (Integer) Seconds between nodes reporting state to datastore
- * - ``host`` = ``tricircle.xhost``
- - (String) The host name for RPC server, each node should have different host name.
- * - ``job_run_expire`` = ``180``
- - (Integer) Running job is considered expires after this time, in seconds
- * - ``workers`` = ``1``
- - (Integer) Number of workers
- * - ``worker_handle_timeout`` = ``1800``
- - (Integer) Timeout for worker's one turn of processing, in seconds
- * - ``worker_sleep_time`` = ``60``
- - (Float) Seconds a worker sleeps after one run in a loop
- * - ``redo_time_span`` = ``172800``
- - (Integer) Time span in seconds, we calculate the latest job timestamp by
- subtracting this time span from the current timestamp, jobs created
- between these two timestamps will be redone
-
-Networking Setting for Tricircle
-================================
-To make the networking automation work, two plugins need to be configured:
-Tricircle Central Neutron Plugin and Tricircle Local Neutron Plugin.
-
-**Tricircle Central Neutron Plugin**
-
-The Tricircle Central Neutron Plugin serves for tenant level L2/L3 networking
-automation across multiple Neutron servers. The following items should be
-configured in central Neutron's neutron.conf.
-
-.. _Central Neutron:
-
-.. list-table:: Description of Central Neutron configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``core_plugin`` = ``None``
- - (String) core plugin central Neutron server uses, should be set to tricircle.network.central_plugin.TricirclePlugin
- * - **[tricircle]**
- -
- * - ``bridge_network_type`` = ``vxlan``
- - (String) Type of l3 bridge network, this type should be enabled in tenant_network_types and is not local type, for example, vlan or vxlan.
- * - ``default_region_for_external_network`` = ``RegionOne``
- - (String) Default region where the external network belongs to, it must exist, for example, RegionOne.
- * - ``network_vlan_ranges`` = ``None``
- - (String) List of :: or specifying physical_network names usable for VLAN provider and tenant networks, as well as ranges of VLAN tags on each available for allocation to tenant networks, for example, bridge:2001:3000.
- * - ``tenant_network_types`` = ``vxlan,local``
- - (String) Ordered list of network_types to allocate as tenant networks. The default value "local" is useful for single pod connectivity, for example, local vlan and vxlan.
- * - ``type_drivers`` = ``vxlan,local``
- - (String) List of network type driver entry points to be loaded from the tricircle.network.type_drivers namespace, for example, local vlan and vxlan.
- * - ``vni_ranges`` = ``None``
- - (String) Comma-separated list of : tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation, for example, 1001:2000
- * - ``flat_networks`` = ``*``
- - (String) List of physical_network names with which flat networks can be created. Use default '*' to allow flat networks with arbitrary physical_network names. Use an empty list to disable flat networks.
-
-
-**Tricircle Local Neutron Plugin**
-
-The Tricircle Local Neutron Plugin serves for cross Neutron networking
-automation triggering. It is a shim layer between real core plugin and
-Neutron API server. The following items should be configured in local
-Neutron's neutron.conf
-
-.. list-table:: Description of Local Neutron configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description and Example
- * - **[DEFAULT]**
- -
- * - ``core_plugin`` = ``None``
- - (String) core plugin local Neutron server uses, should be set to tricircle.network.local_plugin.TricirclePlugin
- * - **[tricircle]**
- -
- * - ``central_neutron_url`` = ``None``
- - (String) Central Neutron server url, for example, http://$service_host:9696
- * - ``real_core_plugin`` = ``None``
- - (String) The core plugin the Tricircle local plugin will invoke, for example, neutron.plugins.ml2.plugin.Ml2Plugin
-
-
-**Tricircle Local Neutron L3 Plugin**
-
-In multiple OpenStack clouds, if the external network is located in the
-first OpenStack cloud, but the port which will be associated with one
-floating ip is located in the second OpenStack cloud, then the network for
-this port may not be able to be added to the router in the first OpenStack.
-In Tricircle, to address this scenario, a bridge network will be used
-to connect the routers in these two OpenStack clouds if the network is not
-a cross Neutron L2 network. To make it happen, the Tricircle Local Neutron L3
-Plugin or other L3 service plugin should be able to associate a floating ip to
-a port whose network is not directly attached to the router. TricircleL3Plugin
-is inherited from Neutron original L3RouterPlugin, and overrides the original
-"get_router_for_floatingip" implementation to allow associating a floating ip
-to a port whose network is not directly attached to the router. If you want
-to configure local Neutron to use original L3RouterPlugin, then you need to
-patch the function "get_router_for_floatingip" as what has been done in
-TricircleL3Plugin.
-
-If only cross Neutron L2 networking is needed in the deployment, it's not
-necessary to configure the service plugins.
-
-The following item should be configured in local Neutron's neutron.conf
-
-.. list-table:: Description of Local Neutron configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description and Example
- * - **[DEFAULT]**
- -
- * - ``service_plugins`` = ``None``
- - (String) service plugins local Neutron server uses, can be set to tricircle.network.local_l3_plugin.TricircleL3Plugin
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
deleted file mode 100644
index 4f7dcb22..00000000
--- a/doc/source/configuration/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-=============================
-Tricircle Configuration Guide
-=============================
-
-.. toctree::
- :maxdepth: 3
-
- configuration
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
deleted file mode 100644
index 2aa07077..00000000
--- a/doc/source/contributor/contributing.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-============
-Contributing
-============
-.. include:: ../../../CONTRIBUTING.rst
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
deleted file mode 100644
index 1ae43b76..00000000
--- a/doc/source/contributor/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-============================
-Tricircle Contribution Guide
-============================
-
-.. toctree::
- :maxdepth: 1
-
- contributing
diff --git a/doc/source/index.rst b/doc/source/index.rst
deleted file mode 100644
index 17f25305..00000000
--- a/doc/source/index.rst
+++ /dev/null
@@ -1,51 +0,0 @@
-.. tricircle documentation master file, created by
- sphinx-quickstart on Wed Dec 2 17:00:36 2015.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-=====================================
-Welcome to Tricircle's documentation!
-=====================================
-
-Tricircle User Guide
-====================
-.. toctree::
- :maxdepth: 3
-
- user/index
-
-Tricircle Contribution Guide
-============================
-.. toctree::
- :maxdepth: 2
-
- contributor/index
-
-Tricircle Admin Guide
-=====================
-.. toctree::
- :maxdepth: 3
-
- admin/index
-
-Tricircle Installation Guide
-============================
-.. toctree::
- :maxdepth: 3
-
- install/index
-
-Tricircle Configuration Guide
-=============================
-.. toctree::
- :maxdepth: 3
-
- configuration/index
-
-Tricircle Networking Guide
-==========================
-.. toctree::
- :maxdepth: 4
-
- networking/index
-
diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst
deleted file mode 100644
index a42975e8..00000000
--- a/doc/source/install/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-============================
-Tricircle Installation Guide
-============================
-
-.. toctree::
- :maxdepth: 3
-
- installation-guide
diff --git a/doc/source/install/installation-cell.rst b/doc/source/install/installation-cell.rst
deleted file mode 100644
index 83d662b4..00000000
--- a/doc/source/install/installation-cell.rst
+++ /dev/null
@@ -1,152 +0,0 @@
-==================================
-Work with Nova cell v2(experiment)
-==================================
-
-.. note:: Multi-cell support of Nova cell v2 is under development. DevStack
- doesn't support multi-cell deployment currently, so the steps discussed in
- this document may seem not that elegant. We will keep updating this document
- according to the progress of multi-cell development by Nova team.
-
-Setup
-^^^^^
-
-- 1 Follow "Multi-pod Installation with DevStack" document to prepare your
- local.conf for both nodes, and set TRICIRCLE_DEPLOY_WITH_CELL to True for
- both nodes. Start DevStack in node1, then node2.
-
-.. note:: After running DevStack in both nodes, a multi-cell environment will
- be prepared: there is one CentralRegion, where Nova API and central Neutron
- will be registered. Nova has two cells, node1 belongs to cell1, node2 belongs
- to cell2, and each cell will be configured to use a dedicated local Neutron.
- For cell1, it's RegionOne Neutron in node1; for cell2, it's RegionTwo Neutron
- in node2(you can set the region name in local.conf to make the name more
- friendly). End user can access CentralRegion endpoint of Nova and Neutron to
- experience the integration of Nova cell v2 and Tricircle.
-
-- 2 Stop the following services in node2::
-
- systemctl stop devstack@n-sch.service
- systemctl stop devstack@n-super-cond.service
- systemctl stop devstack@n-api.service
-
- if the service of devstack@n-api-meta.service exists, stop it::
-
- systemctl stop devstack@n-api-meta.service
-
-.. note:: Actually for cell v2, only one Nova API is required. We enable n-api
- in node2 because we need DevStack to help us create the necessary cell
- database. If n-api is disabled, neither API database nor cell database will
- be created.
-
-- 3 In node2, run the following command::
-
- mysql -u$user -p$password -Dnova_cell1 -e 'select host, mapped from compute_nodes'
-
- you can see that this command returns you one row showing the host of node2
- is already mapped::
-
- +-----------+--------+
- | host | mapped |
- +-----------+--------+
- | zhiyuan-2 | 1 |
- +-----------+--------+
-
- This host is registered to Nova API in node2, which is already stopped by us,
- We need to update this row to set "mapped" to 0::
-
- mysql -u$user -p$password -Dnova_cell1 -e 'update compute_nodes set mapped = 0 where host = "zhiyuan-2"'
-
- then we can register this host again in step4.
-
-- 4 In node1, run the following commands to register the new cell::
-
- nova-manage cell_v2 create_cell --name cell2 \
- --transport-url rabbit://$rabbit_user:$rabbit_passwd@$node2_ip:5672/nova_cell1 \
- --database_connection mysql+pymysql://$db_user:$db_passwd@$node2_ip/nova_cell1?charset=utf8
-
- nova-manage cell_v2 discover_hosts
-
- then you can see the new cell and host are added in the database::
-
- mysql -u$user -p$password -Dnova_api -e 'select cell_id, host from host_mappings'
-
- +---------+-----------+
- | cell_id | host |
- +---------+-----------+
- | 2 | zhiyuan-1 |
- | 3 | zhiyuan-2 |
- +---------+-----------+
-
- mysql -u$user -p$password -Dnova_api -e 'select id, name from cell_mappings'
-
- +----+-------+
- | id | name |
- +----+-------+
- | 1 | cell0 |
- | 2 | cell1 |
- | 3 | cell2 |
- +----+-------+
-
-- 5 In node1, run the following commands::
-
- systemctl restart devstack@n-sch.service
-
-- 6 In node1, check if compute services in both hosts are registered::
-
- openstack --os-region-name CentralRegion compute service list
-
- +----+------------------+-----------+----------+---------+-------+----------------------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+------------------+-----------+----------+---------+-------+----------------------------+
- | 5 | nova-scheduler | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:56:02.000000 |
- | 6 | nova-conductor | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:56:09.000000 |
- | 8 | nova-consoleauth | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:56:01.000000 |
- | 1 | nova-conductor | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:56:07.000000 |
- | 3 | nova-compute | zhiyuan-1 | nova | enabled | up | 2017-09-20T06:56:10.000000 |
- | 1 | nova-conductor | zhiyuan-2 | internal | enabled | up | 2017-09-20T06:56:07.000000 |
- | 3 | nova-compute | zhiyuan-2 | nova | enabled | up | 2017-09-20T06:56:09.000000 |
- +----+------------------+-----------+----------+---------+-------+----------------------------+
-
- zhiyuan-1 has two nova-conductor services, because one of them is a super
- conductor service.
-
-- 7 Create two aggregates and put the two hosts in each aggregate::
-
- nova --os-region-name CentralRegion aggregate-create ag1 az1
- nova --os-region-name CentralRegion aggregate-create ag2 az2
- nova --os-region-name CentralRegion aggregate-add-host ag1 zhiyuan-1
- nova --os-region-name CentralRegion aggregate-add-host ag2 zhiyuan-2
-
-- 8 Create pods, tricircle client is used::
-
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name CentralRegion
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionOne --availability-zone az1
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionTwo --availability-zone az2
-
-- 9 Create network and boot virtual machines::
-
- net_id=$(openstack --os-region-name CentralRegion network create --provider-network-type vxlan net1 -c id -f value)
- openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.1.0/24 --network net1 subnet1
- image_id=$(openstack --os-region-name CentralRegion image list -c ID -f value)
-
- openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net_id --availability-zone az1 vm1
- openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net_id --availability-zone az2 vm2
-
-Trouble Shooting
-^^^^^^^^^^^^^^^^
-
-- 1 After you run "compute service list" in step5, you only see services in node1, like::
-
- +----+------------------+-----------+----------+---------+-------+----------------------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+------------------+-----------+----------+---------+-------+----------------------------+
- | 5 | nova-scheduler | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:55:52.000000 |
- | 6 | nova-conductor | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:55:59.000000 |
- | 8 | nova-consoleauth | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:56:01.000000 |
- | 1 | nova-conductor | zhiyuan-1 | internal | enabled | up | 2017-09-20T06:55:57.000000 |
- | 3 | nova-compute | zhiyuan-1 | nova | enabled | up | 2017-09-20T06:56:00.000000 |
- +----+------------------+-----------+----------+---------+-------+----------------------------+
-
- Though new cell has been registered in the database, the running n-api process
- in node1 may not recognize it. We find that restarting n-api can solve this
- problem.
diff --git a/doc/source/install/installation-guide.rst b/doc/source/install/installation-guide.rst
deleted file mode 100644
index d9e30f4d..00000000
--- a/doc/source/install/installation-guide.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-Installation Guide
-------------------
-Now the Tricircle can be played with Devstack for all-in-one single pod and
-multi-pod. You can build different Tricircle environments with Devstack
-according to your needs. In the near future there will be a manual installation
-guide in this installation guide that discussing how to install the Tricircle
-step by step without DevStack for users who install OpenStack manually.
-
-
-
-
-
-.. include:: ./single-pod-installation-devstack.rst
-.. include:: ./multi-pod-installation-devstack.rst
-.. include:: ./installation-manual.rst
-.. include:: ./installation-cell.rst
-.. include:: ./installation-lbaas.rst
-.. include:: ./installation-lbaas_with_nova_cell_v2.rst
-.. include:: ./installation-tricircle_work_with_container.rst
\ No newline at end of file
diff --git a/doc/source/install/installation-lbaas.rst b/doc/source/install/installation-lbaas.rst
deleted file mode 100644
index fac5adee..00000000
--- a/doc/source/install/installation-lbaas.rst
+++ /dev/null
@@ -1,1191 +0,0 @@
-=========================================
-Installation guide for LBaaS in Tricircle
-=========================================
-
-.. note:: Since Octavia does not support multiple region scenarios, some
- modifications are required to install the Tricircle and Octavia in multiple
- pods. As a result, we will keep updating this document, so as to support
- automatic installation and test for Tricircle and Octavia in multiple regions.
-
-Setup & Installation
-^^^^^^^^^^^^^^^^^^^^
-
-- 1 For the node1 in RegionOne, clone the code from Octavia repository to /opt/stack/ .
- Then make some changes to Octavia, so that we can build the management network in multiple regions manually. Here we give the following comment example.
-
- - First, comment the following lines in the **octavia_init** function in octavia/devstack/plugin.sh .
-
- `Line 586-588 : `_
-
- - **build_mgmt_network**
- - **OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id)**
- - **iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID}**
-
- `Line 593-595 : `_
-
- - **if is_service_enabled tempest; then**
- - **configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}**
- - **fi**
-
- `Line 602-604 : `_
-
- - **if is_service_enabled tempest; then**
- - **configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}**
- - **fi**
-
- `Line 610 : `_
-
- - **create_mgmt_network_interface**
-
- `Line 612 : `_
-
- - **configure_lb_mgmt_sec_grp**
-
- - Second, comment the following three lines in the **octavia_start** function in octavia/devstack/plugin.sh .
-
- `Line 465-467 : `_
-
- - **if ! ps aux | grep -q [o]-hm0 && [ $OCTAVIA_NODE != 'api' ] ; then**
- - **sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF**
- - **fi**
-
-- 2 Follow "Multi-pod Installation with DevStack" document `Multi-pod Installation with DevStack `_
- to prepare your local.conf for the node1 in RegionOne, and add the
- following lines before installation. Start DevStack in node1.
-
- .. code-block:: console
-
- enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
- enable_plugin octavia https://github.com/openstack/octavia.git
- ENABLED_SERVICES+=,q-lbaasv2
- ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-- 3 If users only want to deploy Octavia in RegionOne, the following two
- steps can be skipped. After the DevStack installation in node1 is completed.
- For the node2 in RegionTwo, clone the code from Octavia repository to
- /opt/stack/. Here we need to modify plugin.sh in five sub-steps.
-
- - First, since Keystone is installed in RegionOne and shared by other
- regions, we need to comment all **add_load-balancer_roles** lines in
- the **octavia_init** function in octavia/devstack/plugin.sh .
-
- `Line 597 and Line 606 : `_
-
- - **add_load-balancer_roles**
-
- - Second, the same as Step 1, comment total fourteen lines of creating networking resources in the **octavia_init** function.
-
- - Third, replace all **'openstack keypair'** with
- **'openstack --os-region-name=$REGION_NAME keypair'**.
-
- - Fourth, replace all **'openstack image'** with
- **'openstack --os-region-name=$REGION_NAME image'**.
-
- - Fifth, replace all **'openstack flavor'** with
- **'openstack --os-region-name=$REGION_NAME flavor'**.
-
-- 4 Follow "Multi-pod Installation with DevStack" document `Multi-pod Installation with DevStack `_
- to prepare your local.conf for the node2 in RegionTwo, and add the
- following lines before installation. Start DevStack in node2.
-
- .. code-block:: console
-
- enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
- enable_plugin octavia https://github.com/openstack/octavia.git
- ENABLED_SERVICES+=,q-lbaasv2
- ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-Prerequisite
-^^^^^^^^^^^^
-
-- 1 After DevStack successfully starts, we must create environment variables
- for the admin user and use the admin project, since Octavia controller will
- use admin account to query and use the management network as well as
- security group created in the following steps
-
- .. code-block:: console
-
- $ source openrc admin admin
-
-- 2 Then unset the region name environment variable, so that the command can be
- issued to specified region in following commands as needed.
-
- .. code-block:: console
-
- $ unset OS_REGION_NAME
-
-- 3 Before configure LBaaS, we need to create pods in CentralRegion, i.e., node1.
-
- .. code-block:: console
-
- $ openstack multiregion networking pod create --region-name CentralRegion
- $ openstack multiregion networking pod create --region-name RegionOne --availability-zone az1
- $ openstack multiregion networking pod create --region-name RegionTwo --availability-zone az2
-
-Configuration
-^^^^^^^^^^^^^
-
-- 1 Create security groups.
-
- - Create security group and rules for load balancer management network.
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion security group create lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol icmp lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 80 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol icmpv6 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 22 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 80 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 9443 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
-
- .. note:: The output in the console is omitted.
-
- - Create security group and rules for healthy manager
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion security group create lb-health-mgr-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol udp --dst-port 5555 --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp
-
- .. note:: The output in the console is omitted.
-
-- 2 Configure LBaaS in node1
-
- - Create an amphora management network in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create lb-mgmt-net1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 9c3bd3f7-b581-4686-b35a-434b2fe5c1d5 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | lb-mgmt-net1 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1094 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in lb-mgmt-net1
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 192.168.10.0/24 --network lb-mgmt-net1 lb-mgmt-subnet1
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 192.168.10.2-192.168.10.254 |
- | cidr | 192.168.10.0/24 |
- | created_at | 2019-01-01T06:31:10Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 192.168.10.1 |
- | host_routes | |
- | id | 84562c3a-55be-4c0f-9e50-3a5206670077 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | lb-mgmt-subnet1 |
- | network_id | 9c3bd3f7-b581-4686-b35a-434b2fe5c1d5 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T06:31:10Z |
- +-------------------+--------------------------------------+
-
- - Create the health management interface for Octavia in RegionOne.
-
- .. code-block:: console
-
- $ id_and_mac=$(openstack --os-region-name CentralRegion port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --network lb-mgmt-net1 octavia-health-manager-region-one-listen-port | awk '/ id | mac_address / {print $4}')
- $ id_and_mac=($id_and_mac)
- $ MGMT_PORT_ID=${id_and_mac[0]}
- $ MGMT_PORT_MAC=${id_and_mac[1]}
- $ MGMT_PORT_IP=$(openstack --os-region-name RegionOne port show -f value -c fixed_ips $MGMT_PORT_ID | awk '{FS=",| "; gsub(",",""); gsub("'\''",""); for(i = 1; i <= NF; ++i) {if ($i ~ /^ip_address/) {n=index($i, "="); if (substr($i, n+1) ~ "\\.") print substr($i, n+1)}}}')
- $ openstack --os-region-name RegionOne port set --host $(hostname) $MGMT_PORT_ID
- $ sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID -- set Interface o-hm0 external-ids:skip_cleanup=true
- $ OCTAVIA_DHCLIENT_CONF=/etc/octavia/dhcp/dhclient.conf
- $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC
- $ sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF
-
- Listening on LPF/o-hm0/fa:16:3e:54:16:8e
- Sending on LPF/o-hm0/fa:16:3e:54:16:8e
- Sending on Socket/fallback
- DHCPDISCOVER on o-hm0 to 255.255.255.255 port 67 interval 3 (xid=0xd3e7353)
- DHCPREQUEST of 192.168.10.194 on o-hm0 to 255.255.255.255 port 67 (xid=0x53733e0d)
- DHCPOFFER of 192.168.10.194 from 192.168.10.2
- DHCPACK of 192.168.10.194 from 192.168.10.2
- bound to 192.168.10.194 -- renewal in 42514 seconds.
-
- $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT
-
- .. note:: As shown in the console, DHCP server allocates 192.168.10.194 as the
- IP of the health management interface, i.e., 0-hm. Hence, we need to
- modify the /etc/octavia/octavia.conf file to make Octavia aware of it and
- use the resources we just created, including health management interface,
- amphora security group and so on.
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [health_manager] bind_ip, "the ip of health manager in RegionOne", 192.168.10.194
- [health_manager] bind_port, "the port health manager listens on", 5555
- [health_manager] controller_ip_port_list, "the ip and port of health manager binds in RegionOne", 192.168.10.194:5555
- [controller_worker] amp_boot_network_list, "the id of amphora management network in RegionOne", "query neutron to obtain it, i.e., the id of lb-mgmt-net1 in this doc"
- [controller_worker] amp_secgroup_list, "the id of security group created for amphora in central region", "query neutron to obtain it, i.e., the id of lb-mgmt-sec-grp"
- [neutron] service_name, "The name of the neutron service in the keystone catalog", neutron
- [neutron] endpoint, "Central neutron endpoint if override is necessary", http://192.168.57.9:20001/
- [neutron] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", CentralRegion
- [neutron] endpoint_type, "Endpoint type", public
- [nova] service_name, "The name of the nova service in the keystone catalog", nova
- [nova] endpoint, "Custom nova endpoint if override is necessary", http://192.168.57.9/compute/v2.1
- [nova] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionOne
- [nova] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
- [glance] service_name, "The name of the glance service in the keystone catalog", glance
- [glance] endpoint, "Custom glance endpoint if override is necessary", http://192.168.57.9/image
- [glance] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionOne
- [glance] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
-
- Restart all the services of Octavia in node1.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@o-*
-
-- 2 If users only deploy Octavia in RegionOne, this step can be skipped.
- Configure LBaaS in node2.
-
- - Create an amphora management network in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create lb-mgmt-net2
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 6494d887-25a8-4b07-8422-93f7acc21ecd |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | lb-mgmt-net2 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1085 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in lb-mgmt-net2
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 192.168.20.0/24 --network lb-mgmt-net2 lb-mgmt-subnet2
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 192.168.20.2-192.168.20.254 |
- | cidr | 192.168.20.0/24 |
- | created_at | 2019-01-01T06:53:28Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 192.168.20.1 |
- | host_routes | |
- | id | de2e9e76-e3c8-490f-b030-4374b22c2d95 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | lb-mgmt-subnet2 |
- | network_id | 6494d887-25a8-4b07-8422-93f7acc21ecd |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T06:53:28Z |
- +-------------------+--------------------------------------+
-
- - Create the health management interface for Octavia in RegionTwo.
-
- .. code-block:: console
-
- $ id_and_mac=$(openstack --os-region-name CentralRegion port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --network lb-mgmt-net2 octavia-health-manager-region-two-listen-port | awk '/ id | mac_address / {print $4}')
- $ id_and_mac=($id_and_mac)
- $ MGMT_PORT_ID=${id_and_mac[0]}
- $ MGMT_PORT_MAC=${id_and_mac[1]}
- $ MGMT_PORT_IP=$(openstack --os-region-name RegionTwo port show -f value -c fixed_ips $MGMT_PORT_ID | awk '{FS=",| "; gsub(",",""); gsub("'\''",""); for(i = 1; i <= NF; ++i) {if ($i ~ /^ip_address/) {n=index($i, "="); if (substr($i, n+1) ~ "\\.") print substr($i, n+1)}}}')
- $ openstack --os-region-name RegionTwo port set --host $(hostname) $MGMT_PORT_ID
- $ sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID -- set Interface o-hm0 external-ids:skip_cleanup=true
- $ OCTAVIA_DHCLIENT_CONF=/etc/octavia/dhcp/dhclient.conf
- $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC
- $ sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF
-
- Listening on LPF/o-hm0/fa:16:3e:c0:bf:30
- Sending on LPF/o-hm0/fa:16:3e:c0:bf:30
- Sending on Socket/fallback
- DHCPDISCOVER on o-hm0 to 255.255.255.255 port 67 interval 3 (xid=0xad6d3a1a)
- DHCPREQUEST of 192.168.20.3 on o-hm0 to 255.255.255.255 port 67 (xid=0x1a3a6dad)
- DHCPOFFER of 192.168.20.3 from 192.168.20.2
- DHCPACK of 192.168.20.3 from 192.168.20.2
- bound to 192.168.20.3 -- renewal in 37208 seconds.
-
- $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT
-
- .. note:: The ip allocated by DHCP server, i.e., 192.168.20.3 in this case,
- is the bound and listened by health manager of Octavia. Please note that
- it will be used in the configuration file of Octavia.
-
- Modify the /etc/octavia/octavia.conf in node2.
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [health_manager] bind_ip, "the ip of health manager in RegionTwo", 192.168.20.3
- [health_manager] bind_port, "the port health manager listens on in RegionTwo", 5555
- [health_manager] controller_ip_port_list, "the ip and port of health manager binds in RegionTwo", 192.168.20.3:5555
- [controller_worker] amp_boot_network_list, "the id of amphora management network in RegionTwo", "query neutron to obtain it, i.e., the id of lb-mgmt-net2 in this doc"
- [controller_worker] amp_secgroup_list, "the id of security group created for amphora in central region", "query neutron to obtain it, i.e., the id of lb-mgmt-sec-grp"
- [neutron] service_name, "The name of the neutron service in the keystone catalog", neutron
- [neutron] endpoint, "Central neutron endpoint if override is necessary", http://192.168.57.9:20001/
- [neutron] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", CentralRegion
- [neutron] endpoint_type, "Endpoint type", public
- [nova] service_name, "The name of the nova service in the keystone catalog", nova
- [nova] endpoint, "Custom nova endpoint if override is necessary", http://192.168.57.10/compute/v2.1
- [nova] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionTwo
- [nova] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
- [glance] service_name, "The name of the glance service in the keystone catalog", glance
- [glance] endpoint, "Custom glance endpoint if override is necessary", http://192.168.57.10/image
- [glance] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionTwo
- [glance] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
-
- Restart all the services of Octavia in node2.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@o-*
-
- - By now, we finish installing LBaaS.
-
-How to play
-^^^^^^^^^^^
-
-- 1 LBaaS members in one network and in same region
-
- Here we take VxLAN as an example.
-
- - Create net1 in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create net1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | net1 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1102 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in net1
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.10.0/24 --gateway none --network net1 subnet1
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.10.1-10.0.10.254 |
- | cidr | 10.0.10.0/24 |
- | created_at | 2019-01-01T07:22:45Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | None |
- | host_routes | |
- | id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | subnet1 |
- | network_id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T07:22:45Z |
- +-------------------+--------------------------------------+
-
- .. note:: To enable adding instances as members with VIP, amphora adds a
- new route table to route the traffic sent from VIP to its gateway. However,
- in Tricircle, the gateway obtained from central neutron is not the real
- gateway in local neutron. As a result, we did not set any gateway for
- the subnet temporarily. We will remove the limitation in the future.
-
- - List all available flavors in RegionOne
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionOne
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create two instances, i.e., backend1 and backend2, in RegionOne, which reside in subnet1.
-
- .. code-block:: console
-
- $ nova --os-region-name=RegionOne boot --flavor 1 --image $image_id --nic net-id=$net1_id backend1
- $ nova --os-region-name=RegionOne boot --flavor 1 --image $image_id --nic net-id=$net1_id backend2
-
- +--------------------------------------+-----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | backend1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | |
- | OS-EXT-SRV-ATTR:reservation_id | r-0m1suyvm |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | 7poPJnDxV3Mz |
- | config_drive | |
- | created | 2019-01-01T07:30:26Z |
- | description | - |
- | flavor:disk | 1 |
- | flavor:ephemeral | 0 |
- | flavor:extra_specs | {} |
- | flavor:original_name | m1.tiny |
- | flavor:ram | 512 |
- | flavor:swap | 0 |
- | flavor:vcpus | 1 |
- | hostId | |
- | host_status | |
- | id | d330f73f-2d78-4f59-8ea2-6fa1b878d6a5 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | backend1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | trusted_image_certificates | - |
- | updated | 2019-01-01T07:30:27Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- +--------------------------------------+-----------------------------------------------------------------+
-
- $ nova --os-region-name=RegionOne list
-
- +--------------------------------------+----------+--------+------------+-------------+------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+----------+--------+------------+-------------+------------------+
- | d330f73f-2d78-4f59-8ea2-6fa1b878d6a5 | backend1 | ACTIVE | - | Running | net1=10.0.10.152 |
- | 72a4d0b0-88bc-41c5-9cb1-0965a5f3008f | backend2 | ACTIVE | - | Running | net1=10.0.10.176 |
- +--------------------------------------+----------+--------+------------+-------------+------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'.
- Then run the following commands to simulate a web server.
-
- .. note::
-
- If using cirros 0.4.0 and above, Console in the instances with user
- 'cirros' and password of 'gocubsgo'.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@10.0.10.152
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@10.0.10.176
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- The Octavia installed in node1 and node2 are two standalone services,
- here we take RegionOne as an example.
-
- - Create a load balancer for subnet1 in RegionOne.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer create --name lb1 --vip-subnet-id $subnet1_id
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | admin_state_up | True |
- | created_at | 2019-01-01T07:37:46 |
- | description | |
- | flavor | |
- | id | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | listeners | |
- | name | lb1 |
- | operating_status | OFFLINE |
- | pools | |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider | amphora |
- | provisioning_status | PENDING_CREATE |
- | updated_at | None |
- | vip_address | 10.0.10.189 |
- | vip_network_id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | vip_port_id | 759370eb-5f50-4229-be7e-0ca7aefe04db |
- | vip_qos_policy_id | None |
- | vip_subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- +---------------------+--------------------------------------+
-
- Create a listener for the load balancer after the status of the load
- balancer is 'ACTIVE'. Please note that it may take some time for the
- load balancer to become 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer list
-
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
- | id | name | project_id | vip_address | provisioning_status | provider |
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
- | bbb5480a-a6ec-4cea-a77d-4872a94aca5c | lb1 | d3b83ed3f2504a8699c9528a2297fea7 | 10.0.10.189 | ACTIVE | amphora |
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
-
- $ openstack --os-region-name RegionOne loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | connection_limit | -1 |
- | created_at | 2019-01-01T07:44:21 |
- | default_pool_id | None |
- | default_tls_container_ref | None |
- | description | |
- | id | ec9d2e51-25ab-4c50-83cb-15f726d366ec |
- | insert_headers | None |
- | l7policies | |
- | loadbalancers | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | name | listener1 |
- | operating_status | OFFLINE |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol | HTTP |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | sni_container_refs | [] |
- | timeout_client_data | 50000 |
- | timeout_member_connect | 5000 |
- | timeout_member_data | 50000 |
- | timeout_tcp_inspect | 0 |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a pool for the listener after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | admin_state_up | True |
- | created_at | 2019-01-01T07:46:21 |
- | description | |
- | healthmonitor_id | |
- | id | 7560b064-cdbe-4fa2-ae50-f66ad67fb575 |
- | lb_algorithm | ROUND_ROBIN |
- | listeners | ec9d2e51-25ab-4c50-83cb-15f726d366ec |
- | loadbalancers | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | members | |
- | name | pool1 |
- | operating_status | OFFLINE |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol | HTTP |
- | provisioning_status | PENDING_CREATE |
- | session_persistence | None |
- | updated_at | None |
- +---------------------+--------------------------------------+
-
- - Add two instances to the pool as members, after the status of the load
- balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend1_ip --protocol-port 80 pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | address | 10.0.10.152 |
- | admin_state_up | True |
- | created_at | 2019-01-01T07:49:04 |
- | id | 4e6ce567-0710-4a29-a98f-ab766e4963ab |
- | name | |
- | operating_status | NO_MONITOR |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | updated_at | None |
- | weight | 1 |
- | monitor_port | None |
- | monitor_address | None |
- | backup | False |
- +---------------------+--------------------------------------+
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend2_ip --protocol-port 80 pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | address | 10.0.10.176 |
- | admin_state_up | True |
- | created_at | 2019-01-01T07:50:06 |
- | id | 1e8ab609-a7e9-44af-b37f-69b494b40d01 |
- | name | |
- | operating_status | NO_MONITOR |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | updated_at | None |
- | weight | 1 |
- | monitor_port | None |
- | monitor_address | None |
- | backup | False |
- +---------------------+--------------------------------------+
-
- - Verify load balancing. Request the VIP twice.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
-- 2 LBaaS members in one network but in different regions
-
- - List all available flavors in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create an instance in RegionTwo, which resides in subnet1
-
- .. code-block:: console
-
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image $image_id --nic net-id=$net1_id backend3
-
- +-------------------------------------+-----------------------------------------------------------------+
- | Field | Value |
- +-------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | az2 |
- | OS-EXT-SRV-ATTR:host | None |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-STS:power_state | NOSTATE |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | None |
- | OS-SRV-USG:terminated_at | None |
- | accessIPv4 | |
- | accessIPv6 | |
- | addresses | |
- | adminPass | rpV9MLzPGSvB |
- | config_drive | |
- | created | 2019-01-01T07:56:41Z |
- | flavor | m1.tiny (1) |
- | hostId | |
- | id | b27539fb-4c98-4f0c-b3f8-bc6744659f67 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | None |
- | name | backend3 |
- | progress | 0 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | properties | |
- | security_groups | name='default' |
- | status | BUILD |
- | updated | 2019-01-01T07:56:42Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- | volumes_attached | |
- +-------------------------------------+-----------------------------------------------------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'.
- Then run the following commands to simulate a web server.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@$backend3_ip
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- - Add backend3 to the pool as a member, after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend3_ip --protocol-port 80 pool1
-
- - Verify load balancing. Request the VIP three times.
-
- .. note:: Please note if the subnet is created in the region, just like the
- cases before this step, either unique name or id of the subnet can be
- used as hint. But if the subnet is not created yet, like the case for
- backend3, users are required to use subnet id as hint instead of subnet
- name. Because the subnet is not created in RegionOne, local neutron needs
- to query central neutron for the subnet with id.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp- curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.186
- * Closing connection 0
-
-- 3 LBaaS across members in different networks and different regions
-
- - Create net2 in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create net2
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | f0ea9608-2d6e-4272-a596-2dc3a725eddc |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | net2 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1088 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in net2
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.20.0/24 --gateway none --network net2 subnet2
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.20.1-10.0.20.254 |
- | cidr | 10.0.20.0/24 |
- | created_at | 2019-01-01T07:59:53Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | None |
- | host_routes | |
- | id | 4c05a73d-fa1c-46a9-982f-6683b0d1cb2a |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | subnet2 |
- | network_id | f0ea9608-2d6e-4272-a596-2dc3a725eddc |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T07:59:53Z |
- +-------------------+--------------------------------------+
-
- - List all available flavors in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create an instance in RegionTwo, which resides in subnet2
-
- .. code-block:: console
-
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image $image_id --nic net-id=$net2_id backend4
-
- +-------------------------------------+-----------------------------------------------------------------+
- | Field | Value |
- +-------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | az2 |
- | OS-EXT-SRV-ATTR:host | None |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-STS:power_state | NOSTATE |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | None |
- | OS-SRV-USG:terminated_at | None |
- | accessIPv4 | |
- | accessIPv6 | |
- | addresses | |
- | adminPass | jHY5xdqgxezb |
- | config_drive | |
- | created | 2019-01-01T08:02:50Z |
- | flavor | m1.tiny (1) |
- | hostId | |
- | id | 43bcdc80-6492-4a88-90dd-a979c73219a1 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | None |
- | name | backend4 |
- | progress | 0 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | properties | |
- | security_groups | name='default' |
- | status | BUILD |
- | updated | 2019-01-01T08:02:51Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- | volumes_attached | |
- +-------------------------------------+-----------------------------------------------------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'. Then run the following commands to simulate a web server.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net2_id ssh cirros@$backend4_ip
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- - Add the instance to the pool as a member, after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet2_id --address $backend4_ip --protocol-port 80 pool1
-
- - Verify load balancing. Request the VIP four times.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp- curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.186
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.20.64
- * Closing connection 0
\ No newline at end of file
diff --git a/doc/source/install/installation-lbaas_with_nova_cell_v2.rst b/doc/source/install/installation-lbaas_with_nova_cell_v2.rst
deleted file mode 100644
index 569b6de4..00000000
--- a/doc/source/install/installation-lbaas_with_nova_cell_v2.rst
+++ /dev/null
@@ -1,1323 +0,0 @@
-===========================================================
-Installation guide for LBaaS with nova cell v2 in Tricircle
-===========================================================
-
-.. note:: Since Octavia does not support multiple region scenarios, some
- modifications are required to install the Tricircle and Octavia in multiple
- pods. As a result, we will keep updating this document, so as to support
- automatic installation and test for Tricircle and Octavia in multiple regions.
-
-.. note:: Multi-cell support of Nova cell v2 is under development. DevStack
- doesn't support multi-cell deployment currently, so the steps discussed in
- this document may seem not that elegant. We will keep updating this document
- according to the progress of multi-cell development by Nova team.
-
-Setup & Installation
-^^^^^^^^^^^^^^^^^^^^
-
-- 1 For the node1 in RegionOne, clone the code from Octavia repository to /opt/stack/ .
- Then make some changes to Octavia, so that we can build the management network in multiple regions manually. Here we give the following comment example.
-
- - First, comment the following lines in the **octavia_init** function in octavia/devstack/plugin.sh .
-
- `Line 586-588 : `_
-
- - **build_mgmt_network**
- - **OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id)**
- - **iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID}**
-
- `Line 593-595 : `_
-
- - **if is_service_enabled tempest; then**
- - **configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}**
- - **fi**
-
- `Line 602-604 : `_
-
- - **if is_service_enabled tempest; then**
- - **configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}**
- - **fi**
-
- `Line 610 : `_
-
- - **create_mgmt_network_interface**
-
- `Line 612 : `_
-
- - **configure_lb_mgmt_sec_grp**
-
- - Second, comment the following three lines in the **octavia_start** function in octavia/devstack/plugin.sh .
-
- `Line 465-467 : `_
-
- - **if ! ps aux | grep -q [o]-hm0 && [ $OCTAVIA_NODE != 'api' ] ; then**
- - **sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF**
- - **fi**
-
-- 2 Follow "Multi-pod Installation with DevStack" document `Multi-pod Installation with DevStack `_
- to prepare your local.conf for the node1 in RegionOne, and add the
- following lines before installation. Start DevStack in node1.
-
- .. code-block:: console
-
- TRICIRCLE_DEPLOY_WITH_CELL=True
-
- enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
- enable_plugin octavia https://github.com/openstack/octavia.git
- ENABLED_SERVICES+=,q-lbaasv2
- ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-- 3 If users only want to deploy Octavia in RegionOne, the following two
- steps can be skipped. After the DevStack installation in node1 is completed.
- For the node2 in RegionTwo, clone the code from Octavia repository to
- /opt/stack/. Here we need to modify plugin.sh in five sub-steps.
-
- - First, since Keystone is installed in RegionOne and shared by other
- regions, we need to comment all **add_load-balancer_roles** lines in
- the **octavia_init** function in octavia/devstack/plugin.sh .
-
- `Line 597 and Line 606 : `_
-
- - **add_load-balancer_roles**
-
- - Second, the same as Step 1, comment total fourteen lines of creating networking resources in the **octavia_init** function.
-
- - Third, replace all **'openstack keypair'** with
- **'openstack --os-region-name=$REGION_NAME keypair'**.
-
- - Fourth, replace all **'openstack image'** with
- **'openstack --os-region-name=$REGION_NAME image'**.
-
- - Fifth, replace all **'openstack flavor'** with
- **'openstack --os-region-name=$REGION_NAME flavor'**.
-
-- 4 Follow "Multi-pod Installation with DevStack" document `Multi-pod Installation with DevStack `_
- to prepare your local.conf for the node2 in RegionTwo, and add the
- following lines before installation. Start DevStack in node2.
-
- .. code-block:: console
-
- TRICIRCLE_DEPLOY_WITH_CELL=True
-
- enable_plugin neutron-lbaas https://github.com/openstack/neutron-lbaas.git
- enable_plugin octavia https://github.com/openstack/octavia.git
- ENABLED_SERVICES+=,q-lbaasv2
- ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-
-- 5 After DevStack successfully starts, we must create environment variables
- for the admin user and use the admin project, since Octavia controller will
- use admin account to query and use the management network as well as
- security group created in the following steps
-
- .. code-block:: console
-
- $ source openrc admin admin
-
-- 6 Then unset the region name environment variable, so that the command can be
- issued to specified region in following commands as needed.
-
- .. code-block:: console
-
- $ unset OS_REGION_NAME
-
-.. note:: After running DevStack in both nodes, a multi-cell environment will
- be prepared: there is one CentralRegion, where Nova API and central Neutron
- will be registered. Nova has two cells, node1 belongs to cell1, node2 belongs
- to cell2, and each cell will be configured to use a dedicated local Neutron.
- For cell1, it's RegionOne Neutron in node1; for cell2, it's RegionTwo Neutron
- in node2(you can set the region name in local.conf to make the name more
- friendly). End user can access CentralRegion endpoint of Nova and Neutron to
- experience the integration of Nova cell v2 and Tricircle.
-
-- 7 Stop the following services in node2::
-
- systemctl stop devstack@n-sch.service
- systemctl stop devstack@n-super-cond.service
- systemctl stop devstack@n-api.service
-
- if the service of devstack@n-api-meta.service exists, stop it::
-
- systemctl stop devstack@n-api-meta.service
-
-.. note:: Actually for cell v2, only one Nova API is required. We enable n-api
- in node2 because we need DevStack to help us create the necessary cell
- database. If n-api is disabled, neither API database nor cell database will
- be created.
-
-- 8 In node2, run the following command::
-
- mysql -u $user -p $password -D nova_cell1 -e 'select host, mapped from compute_nodes'
-
- you can see that this command returns you one row showing the host of node2
- is already mapped::
-
- +--------+--------+
- | host | mapped |
- +--------+--------+
- | stack2 | 1 |
- +--------+--------+
-
- This host is registered to Nova API in node2, which is already stopped by us,
- We need to update this row to set "mapped" to 0::
-
- mysql -u $user -p $password -D nova_cell1 -e 'update compute_nodes set mapped = 0 where host = "stack2"'
- mysql -u $user -p $password -D nova_cell1 -e 'select host, mapped from compute_nodes'
-
- +--------+--------+
- | host | mapped |
- +--------+--------+
- | stack2 | 0 |
- +--------+--------+
-
- then we can register this host again in step4.
-
-- 9 In node1, run the following commands to register the new cell::
-
- nova-manage cell_v2 create_cell --name cell2 \
- --transport-url rabbit://$rabbit_user:$rabbit_passwd@$node2_ip:5672/nova_cell1 \
- --database_connection mysql+pymysql://$db_user:$db_passwd@$node2_ip/nova_cell1?charset=utf8
-
- nova-manage cell_v2 discover_hosts
-
- then you can see the new cell and host are added in the database::
-
- mysql -u $user -p $password -D nova_api -e 'select cell_id, host from host_mappings'
-
- +---------+--------+
- | cell_id | host |
- +---------+--------+
- | 2 | stack1 |
- | 3 | stack2 |
- +---------+--------+
-
- mysql -u $user -p $password -D nova_api -e 'select id, name from cell_mappings'
-
- +----+-------+
- | id | name |
- +----+-------+
- | 1 | cell0 |
- | 2 | cell1 |
- | 3 | cell2 |
- +----+-------+
-
-- 10 In node1, run the following commands::
-
- systemctl restart devstack@n-sch.service
-
-- 11 In node1, check if compute services in both hosts are registered::
-
- openstack --os-region-name CentralRegion compute service list
-
- +----+------------------+--------+----------+---------+-------+----------------------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+------------------+--------+----------+---------+-------+----------------------------+
- | 3 | nova-scheduler | stack1 | internal | enabled | up | 2019-01-01T05:31:31.000000 |
- | 5 | nova-consoleauth | stack1 | internal | enabled | up | 2019-01-01T05:31:37.000000 |
- | 7 | nova-conductor | stack1 | internal | enabled | up | 2019-01-01T05:31:30.000000 |
- | 1 | nova-conductor | stack1 | internal | enabled | up | 2019-01-01T05:31:38.000000 |
- | 3 | nova-compute | stack1 | nova | enabled | up | 2019-01-01T05:31:38.000000 |
- | 1 | nova-conductor | stack2 | internal | enabled | up | 2019-01-01T05:31:36.000000 |
- | 3 | nova-compute | stack2 | nova | enabled | up | 2019-01-01T05:31:31.000000 |
- +----+------------------+--------+----------+---------+-------+----------------------------+
-
- stack1 has two nova-conductor services, because one of them is a super
- conductor service.
-
- After you run "compute service list" in step5, if you only see services in node1, like::
-
- +----+------------------+--------+----------+---------+-------+----------------------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+------------------+--------+----------+---------+-------+----------------------------+
- | 1 | nova-conductor | stack1 | internal | enabled | up | 2019-01-01T05:30:58.000000 |
- | 3 | nova-compute | stack1 | nova | enabled | up | 2019-01-01T05:30:58.000000 |
- | 3 | nova-scheduler | stack1 | internal | enabled | up | 2019-01-01T05:31:01.000000 |
- | 5 | nova-consoleauth | stack1 | internal | enabled | up | 2019-01-01T05:30:57.000000 |
- | 7 | nova-conductor | stack1 | internal | enabled | up | 2019-01-01T05:31:00.000000 |
- +----+------------------+--------+----------+---------+-------+----------------------------+
-
- Though new cell has been registered in the database, the running n-api process
- in node1 may not recognize it. We find that restarting n-api can solve this
- problem.
-
-- 12 Create two aggregates and put the two hosts in each aggregate::
-
- nova --os-region-name CentralRegion aggregate-create ag1 az1
- nova --os-region-name CentralRegion aggregate-create ag2 az2
- nova --os-region-name CentralRegion aggregate-add-host ag1 stack1
- nova --os-region-name CentralRegion aggregate-add-host ag2 stack2
-
-- 13 Create pods, tricircle client is used::
-
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name CentralRegion
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionOne --availability-zone az1
- openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionTwo --availability-zone az2
-
-
-Configuration
-^^^^^^^^^^^^^
-
-- 1 Create security groups.
-
- - Create security group and rules for load balancer management network.
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion security group create lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol icmp lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 80 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol icmpv6 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 22 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 80 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol tcp --dst-port 9443 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp
- $ openstack --os-region-name RegionOne security group show $lb-mgmt-sec-grp_ID
-
- .. note:: The output in the console is omitted.
-
- - Create security group and rules for healthy manager
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion security group create lb-health-mgr-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp
- $ openstack --os-region-name CentralRegion security group rule create --protocol udp --dst-port 5555 --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp
-
- .. note:: The output in the console is omitted.
-
-- 2 Configure LBaaS in node1
-
- - Create an amphora management network in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create lb-mgmt-net1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 9c3bd3f7-b581-4686-b35a-434b2fe5c1d5 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | lb-mgmt-net1 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1094 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in lb-mgmt-net1
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 192.168.10.0/24 --network lb-mgmt-net1 lb-mgmt-subnet1
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 192.168.10.2-192.168.10.254 |
- | cidr | 192.168.10.0/24 |
- | created_at | 2019-01-01T06:31:10Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 192.168.10.1 |
- | host_routes | |
- | id | 84562c3a-55be-4c0f-9e50-3a5206670077 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | lb-mgmt-subnet1 |
- | network_id | 9c3bd3f7-b581-4686-b35a-434b2fe5c1d5 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T06:31:10Z |
- +-------------------+--------------------------------------+
-
- - Create the health management interface for Octavia in RegionOne.
-
- .. code-block:: console
-
- $ id_and_mac=$(openstack --os-region-name CentralRegion port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --network lb-mgmt-net1 octavia-health-manager-region-one-listen-port | awk '/ id | mac_address / {print $4}')
- $ id_and_mac=($id_and_mac)
- $ MGMT_PORT_ID=${id_and_mac[0]}
- $ MGMT_PORT_MAC=${id_and_mac[1]}
- $ MGMT_PORT_IP=$(openstack --os-region-name RegionOne port show -f value -c fixed_ips $MGMT_PORT_ID | awk '{FS=",| "; gsub(",",""); gsub("'\''",""); for(i = 1; i <= NF; ++i) {if ($i ~ /^ip_address/) {n=index($i, "="); if (substr($i, n+1) ~ "\\.") print substr($i, n+1)}}}')
- $ openstack --os-region-name RegionOne port set --host $(hostname) $MGMT_PORT_ID
- $ sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID -- set Interface o-hm0 external-ids:skip_cleanup=true
- $ OCTAVIA_DHCLIENT_CONF=/etc/octavia/dhcp/dhclient.conf
- $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC
- $ sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF
-
- Listening on LPF/o-hm0/fa:16:3e:54:16:8e
- Sending on LPF/o-hm0/fa:16:3e:54:16:8e
- Sending on Socket/fallback
- DHCPDISCOVER on o-hm0 to 255.255.255.255 port 67 interval 3 (xid=0xd3e7353)
- DHCPREQUEST of 192.168.10.194 on o-hm0 to 255.255.255.255 port 67 (xid=0x53733e0d)
- DHCPOFFER of 192.168.10.194 from 192.168.10.2
- DHCPACK of 192.168.10.194 from 192.168.10.2
- bound to 192.168.10.194 -- renewal in 42514 seconds.
-
- $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT
-
- .. note:: As shown in the console, DHCP server allocates 192.168.10.194 as the
- IP of the health management interface, i.e., 0-hm. Hence, we need to
- modify the /etc/octavia/octavia.conf file to make Octavia aware of it and
- use the resources we just created, including health management interface,
- amphora security group and so on.
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [health_manager] bind_ip, "the ip of health manager in RegionOne", 192.168.10.194
- [health_manager] bind_port, "the port health manager listens on", 5555
- [health_manager] controller_ip_port_list, "the ip and port of health manager binds in RegionOne", 192.168.10.194:5555
- [controller_worker] amp_boot_network_list, "the id of amphora management network in RegionOne", "query neutron to obtain it, i.e., the id of lb-mgmt-net1 in this doc"
- [controller_worker] amp_secgroup_list, "the id of security group created for amphora in central region", "query neutron to obtain it, i.e., the id of lb-mgmt-sec-grp"
- [neutron] service_name, "The name of the neutron service in the keystone catalog", neutron
- [neutron] endpoint, "Central neutron endpoint if override is necessary", http://192.168.57.9:20001/
- [neutron] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", CentralRegion
- [neutron] endpoint_type, "Endpoint type", public
- [nova] service_name, "The name of the nova service in the keystone catalog", nova
- [nova] endpoint, "Custom nova endpoint if override is necessary", http://192.168.57.9/compute/v2.1
- [nova] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionOne
- [nova] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
- [glance] service_name, "The name of the glance service in the keystone catalog", glance
- [glance] endpoint, "Custom glance endpoint if override is necessary", http://192.168.57.9/image
- [glance] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionOne
- [glance] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
-
- Restart all the services of Octavia in node1.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@o-*
-
-- 2 If users only deploy Octavia in RegionOne, this step can be skipped.
- Configure LBaaS in node2.
-
- - Create an amphora management network in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create lb-mgmt-net2
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 6494d887-25a8-4b07-8422-93f7acc21ecd |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | lb-mgmt-net2 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1085 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in lb-mgmt-net2
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 192.168.20.0/24 --network lb-mgmt-net2 lb-mgmt-subnet2
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 192.168.20.2-192.168.20.254 |
- | cidr | 192.168.20.0/24 |
- | created_at | 2019-01-01T06:53:28Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 192.168.20.1 |
- | host_routes | |
- | id | de2e9e76-e3c8-490f-b030-4374b22c2d95 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | lb-mgmt-subnet2 |
- | network_id | 6494d887-25a8-4b07-8422-93f7acc21ecd |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T06:53:28Z |
- +-------------------+--------------------------------------+
-
- - Create the health management interface for Octavia in RegionTwo.
-
- .. code-block:: console
-
- $ id_and_mac=$(openstack --os-region-name CentralRegion port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --network lb-mgmt-net2 octavia-health-manager-region-two-listen-port | awk '/ id | mac_address / {print $4}')
- $ id_and_mac=($id_and_mac)
- $ MGMT_PORT_ID=${id_and_mac[0]}
- $ MGMT_PORT_MAC=${id_and_mac[1]}
- $ MGMT_PORT_IP=$(openstack --os-region-name RegionTwo port show -f value -c fixed_ips $MGMT_PORT_ID | awk '{FS=",| "; gsub(",",""); gsub("'\''",""); for(i = 1; i <= NF; ++i) {if ($i ~ /^ip_address/) {n=index($i, "="); if (substr($i, n+1) ~ "\\.") print substr($i, n+1)}}}')
- $ openstack --os-region-name RegionTwo port set --host $(hostname) $MGMT_PORT_ID
- $ sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID -- set Interface o-hm0 external-ids:skip_cleanup=true
- $ OCTAVIA_DHCLIENT_CONF=/etc/octavia/dhcp/dhclient.conf
- $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC
- $ sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF
-
- Listening on LPF/o-hm0/fa:16:3e:c0:bf:30
- Sending on LPF/o-hm0/fa:16:3e:c0:bf:30
- Sending on Socket/fallback
- DHCPDISCOVER on o-hm0 to 255.255.255.255 port 67 interval 3 (xid=0xad6d3a1a)
- DHCPREQUEST of 192.168.20.3 on o-hm0 to 255.255.255.255 port 67 (xid=0x1a3a6dad)
- DHCPOFFER of 192.168.20.3 from 192.168.20.2
- DHCPACK of 192.168.20.3 from 192.168.20.2
- bound to 192.168.20.3 -- renewal in 37208 seconds.
-
- $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT
-
- .. note:: The ip allocated by DHCP server, i.e., 192.168.20.3 in this case,
- is the bound and listened by health manager of Octavia. Please note that
- it will be used in the configuration file of Octavia.
-
- Modify the /etc/octavia/octavia.conf in node2.
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [health_manager] bind_ip, "the ip of health manager in RegionTwo", 192.168.20.3
- [health_manager] bind_port, "the port health manager listens on in RegionTwo", 5555
- [health_manager] controller_ip_port_list, "the ip and port of health manager binds in RegionTwo", 192.168.20.3:5555
- [controller_worker] amp_boot_network_list, "the id of amphora management network in RegionTwo", "query neutron to obtain it, i.e., the id of lb-mgmt-net2 in this doc"
- [controller_worker] amp_secgroup_list, "the id of security group created for amphora in central region", "query neutron to obtain it, i.e., the id of lb-mgmt-sec-grp"
- [neutron] service_name, "The name of the neutron service in the keystone catalog", neutron
- [neutron] endpoint, "Central neutron endpoint if override is necessary", http://192.168.57.9:20001/
- [neutron] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", CentralRegion
- [neutron] endpoint_type, "Endpoint type", public
- [nova] service_name, "The name of the nova service in the keystone catalog", nova
- [nova] endpoint, "Custom nova endpoint if override is necessary", http://192.168.57.10/compute/v2.1
- [nova] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionTwo
- [nova] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
- [glance] service_name, "The name of the glance service in the keystone catalog", glance
- [glance] endpoint, "Custom glance endpoint if override is necessary", http://192.168.57.10/image
- [glance] region_name, "Region in Identity service catalog to use for communication with the OpenStack services", RegionTwo
- [glance] endpoint_type, "Endpoint type in Identity service catalog to use for communication with the OpenStack services", public
-
- Restart all the services of Octavia in node2.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@o-*
-
- - By now, we finish installing LBaaS.
-
-How to play
-^^^^^^^^^^^
-
-- 1 LBaaS members in one network and in same region
-
- Here we take VxLAN as an example.
-
- - Create net1 in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create net1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | net1 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1102 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in net1
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.10.0/24 --gateway none --network net1 subnet1
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.10.1-10.0.10.254 |
- | cidr | 10.0.10.0/24 |
- | created_at | 2019-01-01T07:22:45Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | None |
- | host_routes | |
- | id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | subnet1 |
- | network_id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T07:22:45Z |
- +-------------------+--------------------------------------+
-
- .. note:: To enable adding instances as members with VIP, amphora adds a
- new route table to route the traffic sent from VIP to its gateway. However,
- in Tricircle, the gateway obtained from central neutron is not the real
- gateway in local neutron. As a result, we did not set any gateway for
- the subnet temporarily. We will remove the limitation in the future.
-
- - List all available flavors in RegionOne
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionOne
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create two instances, i.e., backend1 and backend2, in RegionOne, which reside in subnet1.
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az1 backend1
- $ openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az1 backend2
-
- +--------------------------------------+-----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | backend1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | |
- | OS-EXT-SRV-ATTR:reservation_id | r-0m1suyvm |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | 7poPJnDxV3Mz |
- | config_drive | |
- | created | 2019-01-01T07:30:26Z |
- | description | - |
- | flavor:disk | 1 |
- | flavor:ephemeral | 0 |
- | flavor:extra_specs | {} |
- | flavor:original_name | m1.tiny |
- | flavor:ram | 512 |
- | flavor:swap | 0 |
- | flavor:vcpus | 1 |
- | hostId | |
- | host_status | |
- | id | d330f73f-2d78-4f59-8ea2-6fa1b878d6a5 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | backend1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | trusted_image_certificates | - |
- | updated | 2019-01-01T07:30:27Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- +--------------------------------------+-----------------------------------------------------------------+
-
- $ openstack --os-region-name CentralRegion server list
-
- +--------------------------------------+----------+--------+------------+-------------+------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+----------+--------+------------+-------------+------------------+
- | d330f73f-2d78-4f59-8ea2-6fa1b878d6a5 | backend1 | ACTIVE | - | Running | net1=10.0.10.152 |
- | 72a4d0b0-88bc-41c5-9cb1-0965a5f3008f | backend2 | ACTIVE | - | Running | net1=10.0.10.176 |
- +--------------------------------------+----------+--------+------------+-------------+------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'.
- Then run the following commands to simulate a web server.
-
- .. note::
-
- If using cirros 0.4.0 and above, Console in the instances with user
- 'cirros' and password of 'gocubsgo'.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@10.0.10.152
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@10.0.10.176
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- The Octavia installed in node1 and node2 are two standalone services,
- here we take RegionOne as an example.
-
- - Create a load balancer for subnet1 in RegionOne.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer create --name lb1 --vip-subnet-id $subnet1_id
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | admin_state_up | True |
- | created_at | 2019-01-01T07:37:46 |
- | description | |
- | flavor | |
- | id | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | listeners | |
- | name | lb1 |
- | operating_status | OFFLINE |
- | pools | |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider | amphora |
- | provisioning_status | PENDING_CREATE |
- | updated_at | None |
- | vip_address | 10.0.10.189 |
- | vip_network_id | 9dcdcb56-358f-40b1-9e3f-6ed6bae6db7d |
- | vip_port_id | 759370eb-5f50-4229-be7e-0ca7aefe04db |
- | vip_qos_policy_id | None |
- | vip_subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- +---------------------+--------------------------------------+
-
- Create a listener for the load balancer after the status of the load
- balancer is 'ACTIVE'. Please note that it may take some time for the
- load balancer to become 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer list
-
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
- | id | name | project_id | vip_address | provisioning_status | provider |
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
- | bbb5480a-a6ec-4cea-a77d-4872a94aca5c | lb1 | d3b83ed3f2504a8699c9528a2297fea7 | 10.0.10.189 | ACTIVE | amphora |
- +--------------------------------------+------+----------------------------------+-------------+---------------------+----------+
-
- $ openstack --os-region-name RegionOne loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | connection_limit | -1 |
- | created_at | 2019-01-01T07:44:21 |
- | default_pool_id | None |
- | default_tls_container_ref | None |
- | description | |
- | id | ec9d2e51-25ab-4c50-83cb-15f726d366ec |
- | insert_headers | None |
- | l7policies | |
- | loadbalancers | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | name | listener1 |
- | operating_status | OFFLINE |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol | HTTP |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | sni_container_refs | [] |
- | timeout_client_data | 50000 |
- | timeout_member_connect | 5000 |
- | timeout_member_data | 50000 |
- | timeout_tcp_inspect | 0 |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a pool for the listener after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | admin_state_up | True |
- | created_at | 2019-01-01T07:46:21 |
- | description | |
- | healthmonitor_id | |
- | id | 7560b064-cdbe-4fa2-ae50-f66ad67fb575 |
- | lb_algorithm | ROUND_ROBIN |
- | listeners | ec9d2e51-25ab-4c50-83cb-15f726d366ec |
- | loadbalancers | bbb5480a-a6ec-4cea-a77d-4872a94aca5c |
- | members | |
- | name | pool1 |
- | operating_status | OFFLINE |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol | HTTP |
- | provisioning_status | PENDING_CREATE |
- | session_persistence | None |
- | updated_at | None |
- +---------------------+--------------------------------------+
-
- - Add two instances to the pool as members, after the status of the load
- balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend1_ip --protocol-port 80 pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | address | 10.0.10.152 |
- | admin_state_up | True |
- | created_at | 2019-01-01T07:49:04 |
- | id | 4e6ce567-0710-4a29-a98f-ab766e4963ab |
- | name | |
- | operating_status | NO_MONITOR |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | updated_at | None |
- | weight | 1 |
- | monitor_port | None |
- | monitor_address | None |
- | backup | False |
- +---------------------+--------------------------------------+
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend2_ip --protocol-port 80 pool1
-
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | address | 10.0.10.176 |
- | admin_state_up | True |
- | created_at | 2019-01-01T07:50:06 |
- | id | 1e8ab609-a7e9-44af-b37f-69b494b40d01 |
- | name | |
- | operating_status | NO_MONITOR |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | protocol_port | 80 |
- | provisioning_status | PENDING_CREATE |
- | subnet_id | 39ccf811-b188-4ccf-a643-dd7669a413c2 |
- | updated_at | None |
- | weight | 1 |
- | monitor_port | None |
- | monitor_address | None |
- | backup | False |
- +---------------------+--------------------------------------+
-
- - Verify load balancing. Request the VIP twice.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
-- 2 LBaaS members in one network but in different regions
-
- - List all available flavors in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create an instance in RegionTwo, which resides in subnet1
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az2 backend3
-
- +-------------------------------------+-----------------------------------------------------------------+
- | Field | Value |
- +-------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | az2 |
- | OS-EXT-SRV-ATTR:host | None |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-STS:power_state | NOSTATE |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | None |
- | OS-SRV-USG:terminated_at | None |
- | accessIPv4 | |
- | accessIPv6 | |
- | addresses | |
- | adminPass | rpV9MLzPGSvB |
- | config_drive | |
- | created | 2019-01-01T07:56:41Z |
- | flavor | m1.tiny (1) |
- | hostId | |
- | id | b27539fb-4c98-4f0c-b3f8-bc6744659f67 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | None |
- | name | backend3 |
- | progress | 0 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | properties | |
- | security_groups | name='default' |
- | status | BUILD |
- | updated | 2019-01-01T07:56:42Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- | volumes_attached | |
- +-------------------------------------+-----------------------------------------------------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'.
- Then run the following commands to simulate a web server.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net1_id ssh cirros@$backend3_ip
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- - Add backend3 to the pool as a member, after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet1_id --address $backend3_ip --protocol-port 80 pool1
-
- - Verify load balancing. Request the VIP three times.
-
- .. note:: Please note if the subnet is created in the region, just like the
- cases before this step, either unique name or id of the subnet can be
- used as hint. But if the subnet is not created yet, like the case for
- backend3, users are required to use subnet id as hint instead of subnet
- name. Because the subnet is not created in RegionOne, local neutron needs
- to query central neutron for the subnet with id.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp- curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.186
- * Closing connection 0
-
-- 3 LBaaS across members in different networks and different regions
-
- - Create net2 in CentralRegion
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create net2
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | f0ea9608-2d6e-4272-a596-2dc3a725eddc |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | net2 |
- | port_security_enabled | False |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1088 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
- - Create a subnet in net2
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.20.0/24 --gateway none --network net2 subnet2
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.20.1-10.0.20.254 |
- | cidr | 10.0.20.0/24 |
- | created_at | 2019-01-01T07:59:53Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | None |
- | host_routes | |
- | id | 4c05a73d-fa1c-46a9-982f-6683b0d1cb2a |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | subnet2 |
- | network_id | f0ea9608-2d6e-4272-a596-2dc3a725eddc |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-01-01T07:59:53Z |
- +-------------------+--------------------------------------+
-
- - List all available flavors in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo flavor list
-
- +----+-----------+-------+------+-----------+-------+-----------+
- | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
- +----+-----------+-------+------+-----------+-------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | 1 | True |
- | 2 | m1.small | 2048 | 20 | 0 | 1 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | 2 | True |
- | 4 | m1.large | 8192 | 80 | 0 | 4 | True |
- | 42 | m1.nano | 64 | 0 | 0 | 1 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True |
- | 84 | m1.micro | 128 | 0 | 0 | 1 | True |
- | c1 | cirros256 | 256 | 0 | 0 | 1 | True |
- | d1 | ds512M | 512 | 5 | 0 | 1 | True |
- | d2 | ds1G | 1024 | 10 | 0 | 1 | True |
- | d3 | ds2G | 2048 | 10 | 0 | 2 | True |
- | d4 | ds4G | 4096 | 20 | 0 | 4 | True |
- +----+-----------+-------+------+-----------+-------+-----------+
-
- - List all available images in RegionTwo
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionTwo image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 471ed2cb-8004-4973-9210-b96463b2c668 | amphora-x64-haproxy | active |
- | 85d165f0-bc7a-43d5-850b-4a8e89e57a66 | cirros-0.3.6-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
- - Create an instance in RegionTwo, which resides in subnet2
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion server create --flavor 1 --image $image_id --nic net-id=$net2_id --availability-zone az2 backend4
-
- +-------------------------------------+-----------------------------------------------------------------+
- | Field | Value |
- +-------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | az2 |
- | OS-EXT-SRV-ATTR:host | None |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-STS:power_state | NOSTATE |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | None |
- | OS-SRV-USG:terminated_at | None |
- | accessIPv4 | |
- | accessIPv6 | |
- | addresses | |
- | adminPass | jHY5xdqgxezb |
- | config_drive | |
- | created | 2019-01-01T08:02:50Z |
- | flavor | m1.tiny (1) |
- | hostId | |
- | id | 43bcdc80-6492-4a88-90dd-a979c73219a1 |
- | image | cirros-0.3.6-x86_64-disk (85d165f0-bc7a-43d5-850b-4a8e89e57a66) |
- | key_name | None |
- | name | backend4 |
- | progress | 0 |
- | project_id | d3b83ed3f2504a8699c9528a2297fea7 |
- | properties | |
- | security_groups | name='default' |
- | status | BUILD |
- | updated | 2019-01-01T08:02:51Z |
- | user_id | fdf37c6259544a9294ae8463e9be063c |
- | volumes_attached | |
- +-------------------------------------+-----------------------------------------------------------------+
-
- - Console in the instances with user 'cirros' and password of 'cubswin:)'. Then run the following commands to simulate a web server.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp-$net2_id ssh cirros@$backend4_ip
-
- $ MYIP=$(ifconfig eth0| grep 'inet addr'| awk -F: '{print $2}'| awk '{print $1}')
- $ while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
- - Add the instance to the pool as a member, after the status of the load balancer is 'ACTIVE'.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne loadbalancer member create --subnet $subnet2_id --address $backend4_ip --protocol-port 80 pool1
-
- - Verify load balancing. Request the VIP four times.
-
- .. code-block:: console
-
- $ sudo ip netns exec dhcp- curl -v $VIP
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.152
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.176
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.10.186
- * Closing connection 0
-
- * Rebuilt URL to: 10.0.10.189/
- * Trying 10.0.10.189...
- * Connected to 10.0.10.189 (10.0.10.189) port 80 (#0)
- > GET / HTTP/1.1
- > Host: 10.0.10.189
- > User-Agent: curl/7.47.0
- > Accept: */*
- >
- * HTTP 1.0, assume close after body
- < HTTP/1.0 200 OK
- <
- Welcome to 10.0.20.64
- * Closing connection 0
\ No newline at end of file
diff --git a/doc/source/install/installation-manual.rst b/doc/source/install/installation-manual.rst
deleted file mode 100644
index 96194be9..00000000
--- a/doc/source/install/installation-manual.rst
+++ /dev/null
@@ -1,296 +0,0 @@
-===================
-Manual Installation
-===================
-
-The Tricircle works with Neutron to provide networking automation functionality
-across Neutron in multi-region OpenStack deployment. In this guide we discuss
-how to manually install the Tricircle with local and central Neutron server.
-
-Local Neutron server, running with the Tricircle local plugin, is responsible
-for triggering cross-Neutron networking automation. Every OpenStack instance
-has one local Neutron service, registered in the same region with other core
-services like Nova, Cinder, Glance, etc. Central Neutron server, running with
-the Tricircle central plugin, is responsible for unified resource allocation
-and cross-Neutron networking building. Besides regions for each OpenStack
-instance, we also need one specific region for central Neutron service. Only
-the Tricircle administrator service needs to be registered in this region along
-with central Neutron service while other core services are not mandatory.
-
-Installation with Central Neutron Server
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-- 1 Install the Tricircle package::
-
- git clone https://github.com/openstack/tricircle.git
- cd tricircle
- pip install -e .
-
-- 2 Register the Tricircle administrator API to Keystone::
-
- openstack user create tricircle --password password
- openstack role add --project service --user tricircle service
- openstack service create tricircle --name tricircle --description "Cross Neutron Networking Automation Service"
- service_id=$(openstack service show tricircle -f value -c id)
- service_host=162.3.124.201
- service_port=19999
- service_region=CentralRegion
- service_url=http://$service_host:$service_port/v1.0
- openstack endpoint create $service_id public $service_url --region $service_region
- openstack endpoint create $service_id admin $service_url --region $service_region
- openstack endpoint create $service_id internal $service_url --region $service_region
-
- change password, service_host, service_port and service_region in the above
- commands to adapt your deployment. OpenStack CLI tool will automatically find
- the endpoints to send to registration requests. If you would like to specify
- the region for endpoints, use::
-
- openstack --os-region-name
-
-- 3 Generate the Tricircle configuration sample::
-
- cd tricircle
- oslo-config-generator --config-file=etc/api-cfg-gen.conf
- oslo-config-generator --config-file=etc/xjob-cfg-gen.conf
-
- The generated sample files are located in tricircle/etc
-
-- 4 Configure the Tricircle administrator API::
-
- cd tricircle/etc
- cp api.conf.sample api.conf
-
- Edit etc/api.conf, for detail configuration information, please refer to the
- configuration guide. Below only options necessary to be changed are listed.
-
-.. csv-table::
- :header: "Option", "Description", "Example"
-
- [DEFAULT] tricircle_db_connection, "database connection string for tricircle", mysql+pymysql://root:password@ 127.0.0.1/tricircle?charset=utf8
- [DEFAULT] transport_url, "a URL representing the used messaging driver and its full configuration", rabbit://user:password@ 127.0.0.1:5672
- [keystone_authtoken] auth_type, "authentication method", password
- [keystone_authtoken] auth_url, "keystone authorization url", http://$keystone_service_host/identity
- [keystone_authtoken] username, "username of service account, needed for password authentication", tricircle
- [keystone_authtoken] password, "password of service account, needed for password authentication", password
- [keystone_authtoken] user_domain_name, "user domain name of service account, needed for password authentication", Default
- [keystone_authtoken] project_name, "project name of service account, needed for password authentication", service
- [keystone_authtoken] project_domain_name, "project domain name of service account, needed for password authentication", Default
- [keystone_authtoken] www_authenticate_uri, "complete public Identity API endpoint", http://$keystone_service_host/identity
- [keystone_authtoken] cafile, "A PEM encoded Certificate Authority to use when verifying HTTPs", /opt/stack/data/ca-bundle.pem
- [keystone_authtoken] signing_dir, "Directory used to cache files related to PKI tokens", /var/cache/tricircle
- [keystone_authtoken] memcached_servers, "Optionally specify a list of memcached server(s) to use for caching", $keystone_service_host:11211
- [client] auth_url, "keystone authorization url", http://$keystone_service_host/identity
- [client] identity_url, "keystone service url", http://$keystone_service_host/identity/v3
- [client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
- [client] top_region_name, "name of central region which client needs to access", CentralRegion
- [client] admin_username, "username of admin account", admin
- [client] admin_password, "password of admin account", password
- [client] admin_tenant, "project name of admin account", demo
- [client] admin_user_domain_name, "user domain name of admin account", Default
- [client] admin_tenant_domain_name, "project name of admin account", Default
-
-.. note:: The Tricircle utilizes the Oslo library to setup service, database,
- log and RPC, please refer to the configuration guide of the corresponding
- Oslo library if you need further configuration of these modules. Change
- keystone_service_host to the address of Keystone service.
-
-.. note:: It's worth explaining the following options that can easily make users confused. **keystone_authtoken.auth_url**
- is the keystone endpoint url used by services to validate user tokens. **keystone_authtoken.www_authenticate_uri** will be put in
- the "WWW-Authenticate: Keystone uri=%s" header in the 401 response to tell users where they can get authentication.
- These two URLs can be the same, but sometimes people would like to use an internal URL for auth_url and a public URL
- for www_authenticate_uri. **client.auth_url** is used by the common.client module to construct a client to get authentication and
- access other services, it can be the either internal or public endpoint of keystone, depends on how the module can
- reach keystone. **client.identity_url** is no longer used in code since Pike release so you can simply ignore it, we
- will deprecate and remove this option later.
-
-- 5 Create the Tricircle database(take mysql as an example)::
-
- mysql -uroot -p -e "create database tricircle character set utf8;"
- cd tricircle
- tricircle-db-manage --config-file etc/api.conf db_sync
-
-- 6 Start the Tricircle administrator API::
-
- sudo mkdir /var/cache/tricircle
- sudo chown $(whoami) /var/cache/tricircle/
- cd tricircle
- tricircle-api --config-file etc/api.conf
-
-- 7 Configure the Tricircle Xjob daemon::
-
- cd tricircle/etc
- cp xjob.conf.sample xjob.conf
-
- Edit etc/xjob.conf, for detail configuration information, please refer to the
- configuration guide. Below only options necessary to be changed are listed.
-
-.. csv-table::
- :header: "Option", "Description", "Example"
-
- [DEFAULT] tricircle_db_connection, "database connection string for tricircle", mysql+pymysql://root:password@ 127.0.0.1/tricircle?charset=utf8
- [DEFAULT] transport_url, "a URL representing the used messaging driver and its full configuration", rabbit://user:password@ 127.0.0.1:5672
- [client] auth_url, "keystone authorization url", http://$keystone_service_host/identity
- [client] identity_url, "keystone service url", http://$keystone_service_host/identity/v3
- [client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
- [client] top_region_name, "name of central region which client needs to access", CentralRegion
- [client] admin_username, "username of admin account", admin
- [client] admin_password, "password of admin account", password
- [client] admin_tenant, "project name of admin account", demo
- [client] admin_user_domain_name, "user domain name of admin account", Default
- [client] admin_tenant_domain_name, "project name of admin account", Default
-
-.. note:: The Tricircle utilizes the Oslo library to setup service, database,
- log and RPC, please refer to the configuration guide of the corresponding
- Oslo library if you need further configuration of these modules. Change
- keystone_service_host to the address of Keystone service.
-
-- 8 Start the Tricircle Xjob daemon::
-
- cd tricircle
- tricircle-xjob --config-file etc/xjob.conf
-
-- 9 Setup central Neutron server
-
- In this guide we assume readers are familiar with how to install Neutron
- server, so we just briefly discuss the steps and extra configuration needed
- by central Neutron server. For detail information about the configuration
- options in "client" and "tricircle" groups, please refer to the configuration
- guide. Neutron server can be installed alone, or you can install a full
- OpenStack instance then remove or stop other services.
-
- - install Neutron package
-
- - configure central Neutron server
-
- edit neutron.conf
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [database] connection, "database connection string for central Neutron server", mysql+pymysql://root:password@ 127.0.0.1/neutron?charset=utf8
- [DEFAULT] bind_port, "Port central Neutron server binds to", change to a different value rather than 9696 if you run central and local Neutron server in the same host
- [DEFAULT] core_plugin, "core plugin central Neutron server uses", tricircle.network.central_plugin. TricirclePlugin
- [DEFAULT] service_plugins, "service plugin central Neutron server uses", "(leave empty)"
- [DEFAULT] tricircle_db_connection, "database connection string for tricircle", mysql+pymysql://root:password@ 127.0.0.1/tricircle?charset=utf8
- [client] auth_url, "keystone authorization url", http://$keystone_service_host/identity
- [client] identity_url, "keystone service url", http://$keystone_service_host/identity/v3
- [client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
- [client] top_region_name, "name of central region which client needs to access", CentralRegion
- [client] admin_username, "username of admin account", admin
- [client] admin_password, "password of admin account", password
- [client] admin_tenant, "project name of admin account", demo
- [client] admin_user_domain_name, "user domain name of admin account", Default
- [client] admin_tenant_domain_name, "project name of admin account", Default
- [tricircle] type_drivers, "list of network type driver entry points to be loaded", "vxlan,vlan,flat,local"
- [tricircle] tenant_network_types, "ordered list of network_types to allocate as tenant networks", "vxlan,vlan,flat,local"
- [tricircle] network_vlan_ranges, "physical network names and VLAN tags range usable of VLAN provider", "bridge:2001:3000"
- [tricircle] vni_ranges, "VxLAN VNI range", "1001:2000"
- [tricircle] flat_networks, "physical network names with which flat networks can be created", bridge
- [tricircle] bridge_network_type, "l3 bridge network type which is enabled in tenant_network_types and is not local type", vxlan
- [tricircle] default_region_for_external_network, "Default Region where the external network belongs to", RegionOne
- [tricircle] enable_api_gateway, "whether the API gateway is enabled", False
-
- .. note:: Change keystone_service_host to the address of Keystone service.
-
- - create database for central Neutron server
-
- - register central Neutron server endpoint in Keystone, central Neutron
- should be registered in the same region with the Tricircle
-
- - start central Neutron server
-
-Installation with Local Neutron Server
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-- 1 Install the Tricircle package::
-
- git clone https://github.com/openstack/tricircle.git
- cd tricircle
- pip install -e .
-
-- 2 Setup local Neutron server
-
- In this guide we assume readers have already installed a complete OpenStack
- instance running services like Nova, Cinder, Neutron, etc, so we just discuss
- how to configure Neutron server to work with the Tricircle. For detail
- information about the configuration options in "client" and "tricircle"
- groups, please refer to the configuration guide. After the change, you just
- restart the Neutron server.
-
- edit neutron.conf.
-
- .. note::
-
- Pay attention to the service_plugins configuration item, make sure
- the plugin which is configured can support the association of floating IP
- to a port whose network is not directly attached to the router. To support
- it, TricircleL3Plugin is inherited from Neutron original L3RouterPlugin
- and overrides the original "get_router_for_floatingip" implementation.
- In order to configure local Neutron to use original L3RouterPlugin, you
- will need to patch the function "get_router_for_floatingip" in the same
- way that has been done for TricircleL3Plugin.
-
- It's not necessary to configure the service plugins if cross Neutron L2
- networking is the only need in the deployment.
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [DEFAULT] core_plugin, "core plugin local Neutron server uses", tricircle.network.local_plugin. TricirclePlugin
- [DEFAULT] service_plugins, "service plugins local Neutron server uses", tricircle.network.local_l3_plugin. TricircleL3Plugin
- [client] auth_url, "keystone authorization url", http://$keystone_service_host/identity
- [client] identity_url, "keystone service url", http://$keystone_service_host/identity/v3
- [client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
- [client] top_region_name, "name of central region which client needs to access", CentralRegion
- [client] admin_username, "username of admin account", admin
- [client] admin_password, "password of admin account", password
- [client] admin_tenant, "project name of admin account", demo
- [client] admin_user_domain_name, "user domain name of admin account", Default
- [client] admin_tenant_domain_name, "project name of admin account", Default
- [tricircle] real_core_plugin, "the core plugin the Tricircle local plugin invokes", neutron.plugins.ml2.plugin. Ml2Plugin
- [tricircle] central_neutron_url, "central Neutron server url", http://$neutron_service_host :9696
-
- .. note:: Change keystone_service_host to the address of Keystone service,
- and neutron_service_host to the address of central Neutron service.
-
- edit ml2_conf.ini
-
- .. list-table::
- :header-rows: 1
-
- * - Option
- - Description
- - Example
- * - [ml2] mechanism_drivers
- - add l2population if vxlan network is used
- - openvswitch,l2population
- * - [agent] l2_population
- - set to True if vxlan network is used
- - True
- * - [agent] tunnel_types
- - set to vxlan if vxlan network is used
- - vxlan
- * - [ml2_type_vlan] network_vlan_ranges
- - for a specific physical network, the vlan range should be the same with
- tricircle.network_vlan_ranges option for central Neutron, configure this
- option if vlan network is used
- - bridge:2001:3000
- * - [ml2_type_vxlan] vni_ranges
- - should be the same with tricircle.vni_ranges option for central Neutron,
- configure this option if vxlan network is used
- - 1001:2000
- * - [ml2_type_flat] flat_networks
- - should be part of the tricircle.network_vlan_ranges option for central
- Neutron, configure this option if flat network is used
- - bridge
- * - [ovs] bridge_mappings
- - map the physical network to an ovs bridge
- - bridge:br-bridge
-
- .. note:: In tricircle.network_vlan_ranges option for central Neutron, all
- the available physical networks in all pods and their vlan ranges should
- be configured without duplication. It's possible that one local Neutron
- doesn't contain some of the physical networks configured in
- tricircle.network_vlan_ranges, in this case, users need to specify
- availability zone hints when creating network or booting instances in the
- correct pod, to ensure that the required physical network is available in
- the target pod.
diff --git a/doc/source/install/installation-tricircle_work_with_container.rst b/doc/source/install/installation-tricircle_work_with_container.rst
deleted file mode 100644
index b6b8572a..00000000
--- a/doc/source/install/installation-tricircle_work_with_container.rst
+++ /dev/null
@@ -1,394 +0,0 @@
-====================================================
-Installation guide for Tricircle work with Container
-====================================================
-
-Introduction
-^^^^^^^^^^^^
-
-In the `Multi-pod Installation with DevStack `_ ,
-we have discussed how to deploy Tricircle in multi-region scenario with DevStack.
-However, the previous installation guides have been on how to
-manage virtual machines using tricircle and Nova in cross-region
-openstack cloud environments. So, multi-region container management
-is not supported in Tricircle. Meanwhile, OpenStack uses Zun
-component to provide container management service, OpenStack also use
-kuyr component and kuryr-libnetwork component to provide container network.
-In view of the Tricircle Central_Neutron-Local_Neutron fashion, Tricircle work
-with zun and kuryr will provide a cross-region container management solution.
-This guide is to describe how tricircle work with container management and how
-to deploy a multi-region container environment.
-
-
-Prerequisite
-^^^^^^^^^^^^
-
-In this guide, we need specific versions of the zun project and
-kuryr project source code. The source code versions of both projects
-must be the Train version and upper. If not, we need to manually change
-the source code for both projects. The modification example is as follows:
-
-- 1 Zun Source Code Modification:
- For Zun project, we need modify the **neutron** function
- in /zun/zun/common/clients.py file.
- (The '+' sign represents the added line)
-
- .. code-block:: console
-
- def neutron(self):
- if self._neutron:
- return self._neutron
-
- session = self.keystone().session
- session.verify = self._get_client_option('neutron', 'ca_file') or True
- if self._get_client_option('neutron', 'insecure'):
- session.verify = False
- endpoint_type = self._get_client_option('neutron', 'endpoint_type')
- + region_name = self._get_client_option('neutron', 'region_name')
- self._neutron = neutronclient.Client(session=session,
- endpoint_type=endpoint_type,
- + region_name=region_name)
-
- return self._neutron
-
-- 2 Kuryr Source Code Modification:
- For kuryr project, we need modify the **get_neutron_client** function
- in /kuryr/kuryr/lib/utils.py file.
- (The '+' sign represents the added line)
-
- .. code-block:: console
-
- def get_neutron_client(*args, **kwargs):
- conf_group = kuryr_config.neutron_group.name
- auth_plugin = get_auth_plugin(conf_group)
- session = get_keystone_session(conf_group, auth_plugin)
- endpoint_type = getattr(getattr(cfg.CONF, conf_group), 'endpoint_type')
- + region_name = getattr(getattr(cfg.CONF, conf_group), 'region_name')
-
- return client.Client(session=session,
- auth=auth_plugin,
- endpoint_type=endpoint_type,
- + region_name=region_name)
-
-
-Setup
-^^^^^
-
-In this guide we take two nodes deployment as an example, the node1 run as RegionOne and
-Central Region, the node2 run as RegionTwo.
-
-- 1 For the node1 in RegionOne and the node2 in RegionTwo, clone the code from Zun repository
- and Kuryr repository to /opt/stack/ . If the code does not meet the requirements described
- in the Prerequisite Section, modify it with reference to the modification example of the Prerequisite Section.
-
-- 2 Follow "Multi-pod Installation with DevStack" document `Multi-pod Installation with DevStack `_
- to prepare your local.conf for the node1 in RegionOne and the node12 in RegionTwo, and add the
- following lines before installation. Start DevStack in node1 and node2.
-
- .. code-block:: console
-
- enable_plugin zun https://git.openstack.org/openstack/zun
- enable_plugin zun-tempest-plugin https://git.openstack.org/openstack/zun-tempest-plugin
- enable_plugin devstack-plugin-container https://git.openstack.org/openstack/devstack-plugin-container
- enable_plugin kuryr-libnetwork https://git.openstack.org/openstack/kuryr-libnetwork
-
- KURYR_CAPABILITY_SCOPE=local
- KURYR_PROCESS_EXTERNAL_CONNECTIVITY=False
-
-- 3 After DevStack successfully started and finished, we need make some configuration changes to
- Zun component and Kuryr component in node1 and node2.
-
- - For Zun in node1, modify the /etc/zun/zun.conf
-
- .. csv-table::
- :header: "Group", "Option", "Value"
-
- [neutron_client], region_name, RegionOne
-
- - Restart all the services of Zun in node1.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@zun*
-
- - For Kuryr in node1, modify the /etc/kuryr/kuryr.conf
-
- .. csv-table::
- :header: "Group", "Option", "Value"
-
- [neutron], region_name, RegionOne
-
- - Restart all the services of Kuryr in node1.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@kur*
-
- - For Zun in node2, modify the /etc/zun/zun.conf
-
- .. csv-table::
- :header: "Group", "Option", "Value"
-
- [neutron_client], region_name, RegionTwo
-
- - Restart all the services of Zun in node2.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@zun*
-
- - For Kuryr in node2, modify the /etc/kuryr/kuryr.conf
-
- .. csv-table::
- :header: "Group", "Option", "Value"
-
- [neutron], region_name, RegionTwo
-
- - Restart all the services of Zun in node2.
-
- .. code-block:: console
-
- $ sudo systemctl restart devstack@kur*
-
-- 4 Then, we must create environment variables for the admin user and use the admin project.
-
- .. code-block:: console
-
- $ source openrc admin admin
- $ unset OS_REGION_NAME
-
-- 5 Finally, use tricircle client to create pods for multi-region.
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion multiregion networking pod create --region-name CentralRegion
- $ openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionOne --availability-zone az1
- $ openstack --os-region-name CentralRegion multiregion networking pod create --region-name RegionTwo --availability-zone az2
-
-
-How to play
-^^^^^^^^^^^
-
-- 1 Create container glance image in RegionOne and RegionTwo.
-
- - Get docker image from Docker Hub. Run these command in the node1 and the node2.
-
- .. code-block:: console
-
- $ docker pull cirros
- $ docker save cirros -o /opt/stack/container_cirros
-
- - Use glance client to create container image.
-
- .. code-block:: console
-
- $ glance --os-region-name=RegionOne image-create --file /opt/stack/container_cirros --container-format=docker --disk-format=raw --name container_cirros --progress
- $ glance --os-region-name=RegionTwo image-create --file /opt/stack/container_cirros --container-format=docker --disk-format=raw --name container_cirros --progress
-
- $ openstack --os-region-name RegionOne image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 11186baf-4381-4e52-956c-22878b0642df | cirros-0.4.0-x86_64-disk | active |
- | 87864205-4352-4a2c-b9b1-ca95df52c93c | container_cirros | active |
- +--------------------------------------+--------------------------+--------+
-
- $ openstack --os-region-name RegionTwo image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | cd062c19-bb3a-4f60-b5ef-9688eb67b3da | container_cirros | active |
- | cf4a2dc7-6d6e-4b7e-a772-44247246e1ff | cirros-0.4.0-x86_64-disk | active |
- +--------------------------------------+--------------------------+--------+
-
-- 2 Create container network in CentralRegion.
-
- - Create a net in CentralRegion.
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion network create container-net
-
- +---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 5e73dda5-902b-4322-b5b6-4121437fde26 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | cloud='', project.domain_id='default', project.domain_name=, project.id='2f314a39de10467bb62745bd96c5fe4d', project.name='admin', region_name='CentralRegion', zone= |
- | mtu | None |
- | name | container-net |
- | port_security_enabled | False |
- | project_id | 2f314a39de10467bb62745bd96c5fe4d |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 1070 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-
- - Create a subnet in container-net
-
- .. code-block:: console
-
- $ openstack --os-region-name CentralRegion subnet create --subnet-range 10.0.60.0/24 --network container-net container-subnet
-
- +-------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | allocation_pools | 10.0.60.2-10.0.60.254 |
- | cidr | 10.0.60.0/24 |
- | created_at | 2019-12-10T07:13:21Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.60.1 |
- | host_routes | |
- | id | b7a7adbd-afd3-4449-9cbc-fbce16c7a2e7 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | cloud='', project.domain_id='default', project.domain_name=, project.id='2f314a39de10467bb62745bd96c5fe4d', project.name='admin', region_name='CentralRegion', zone= |
- | name | container-subnet |
- | network_id | 5e73dda5-902b-4322-b5b6-4121437fde26 |
- | prefix_length | None |
- | project_id | 2f314a39de10467bb62745bd96c5fe4d |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2019-12-10T07:13:21Z |
- +-------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-
-- 3 Create container in RegionOne and RegionTwo.
-
- .. note:: We can give container a specific command to run it continually, e.g. "sudo nc -l -p 5000" .
-
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne appcontainer run --name container01 --net network=$container_net_id --image-driver glance $RegionTwo_container_cirros_id sudo nc -l -p 5000
-
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | tty | False |
- | addresses | None |
- | links | [{u'href': u'http://192.168.1.81/v1/containers/ca67055c-635d-4603-9b0b-19c16eed7ef9', u'rel': u'self'}, {u'href': u'http://192.168.1.81/containers/ca67055c-635d-4603-9b0b-19c16eed7ef9', u'rel': u'bookmark'}] |
- | image | 87864205-4352-4a2c-b9b1-ca95df52c93c |
- | labels | {} |
- | disk | 0 |
- | security_groups | None |
- | image_pull_policy | None |
- | user_id | 57df611fd8c7415dad6d2530bf962ecd |
- | uuid | ca67055c-635d-4603-9b0b-19c16eed7ef9 |
- | hostname | None |
- | auto_heal | False |
- | environment | {} |
- | memory | 0 |
- | project_id | 2f314a39de10467bb62745bd96c5fe4d |
- | privileged | False |
- | status | Creating |
- | workdir | None |
- | healthcheck | None |
- | auto_remove | False |
- | status_detail | None |
- | cpu_policy | shared |
- | host | None |
- | image_driver | glance |
- | task_state | None |
- | status_reason | None |
- | name | container01 |
- | restart_policy | None |
- | ports | None |
- | command | [u'sudo', u'nc', u'-l', u'-p', u'5000'] |
- | runtime | None |
- | registry_id | None |
- | cpu | 0.0 |
- | interactive | False |
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-
- $ openstack --os-region-name RegionOne appcontainer list
-
- +--------------------------------------+-------------+--------------------------------------+---------+------------+------------+-------+
- | uuid | name | image | status | task_state | addresses | ports |
- +--------------------------------------+-------------+--------------------------------------+---------+------------+------------+-------+
- | ca67055c-635d-4603-9b0b-19c16eed7ef9 | container01 | 87864205-4352-4a2c-b9b1-ca95df52c93c | Running | None | 10.0.60.62 | [] |
- +--------------------------------------+-------------+--------------------------------------+---------+------------+------------+-------+
-
-
- $ openstack --os-region-name RegionTwo appcontainer run --name container02 --net network=$container_net_id --image-driver glance $RegionTwo_container_cirros_id sudo nc -l -p 5000
-
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- | tty | False |
- | addresses | None |
- | links | [{u'href': u'http://192.168.1.82/v1/containers/c359e48c-7637-4d9f-8219-95a4577683c3', u'rel': u'self'}, {u'href': u'http://192.168.1.82/containers/c359e48c-7637-4d9f-8219-95a4577683c3', u'rel': u'bookmark'}] |
- | image | cd062c19-bb3a-4f60-b5ef-9688eb67b3da |
- | labels | {} |
- | disk | 0 |
- | security_groups | None |
- | image_pull_policy | None |
- | user_id | 57df611fd8c7415dad6d2530bf962ecd |
- | uuid | c359e48c-7637-4d9f-8219-95a4577683c3 |
- | hostname | None |
- | auto_heal | False |
- | environment | {} |
- | memory | 0 |
- | project_id | 2f314a39de10467bb62745bd96c5fe4d |
- | privileged | False |
- | status | Creating |
- | workdir | None |
- | healthcheck | None |
- | auto_remove | False |
- | status_detail | None |
- | cpu_policy | shared |
- | host | None |
- | image_driver | glance |
- | task_state | None |
- | status_reason | None |
- | name | container02 |
- | restart_policy | None |
- | ports | None |
- | command | [u'sudo', u'nc', u'-l', u'-p', u'5000'] |
- | runtime | None |
- | registry_id | None |
- | cpu | 0.0 |
- | interactive | False |
- +-------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-
- $ openstack --os-region-name RegionTwo appcontainer list
-
- +--------------------------------------+-------------+--------------------------------------+---------+------------+-------------+-------+
- | uuid | name | image | status | task_state | addresses | ports |
- +--------------------------------------+-------------+--------------------------------------+---------+------------+-------------+-------+
- | c359e48c-7637-4d9f-8219-95a4577683c3 | container02 | cd062c19-bb3a-4f60-b5ef-9688eb67b3da | Running | None | 10.0.60.134 | [] |
- +--------------------------------------+-------------+--------------------------------------+---------+------------+-------------+-------+
-
-- 4 Execute container in RegionOne and RegionTwo.
-
- .. code-block:: console
-
- $ openstack --os-region-name RegionOne appcontainer exec --interactive container01 /bin/sh
- $ openstack --os-region-name RegionTwo appcontainer exec --interactive container02 /bin/sh
-
-- 5 By now, we successfully created multi-region container scenario. So we can do something
- on cross-region container, e.g. 1) RegionOne container ping RegionTwo container 2) Cross-Region Container Load Balancing.
\ No newline at end of file
diff --git a/doc/source/install/multi-pod-installation-devstack.rst b/doc/source/install/multi-pod-installation-devstack.rst
deleted file mode 100644
index 004df428..00000000
--- a/doc/source/install/multi-pod-installation-devstack.rst
+++ /dev/null
@@ -1,409 +0,0 @@
-====================================
-Multi-pod Installation with DevStack
-====================================
-
-Introduction
-^^^^^^^^^^^^
-
-In the single pod installation guide, we discuss how to deploy the Tricircle in
-one single pod with DevStack. Besides the Tricircle API and the central Neutron
-server, only one pod(one pod means one OpenStack instance) is running. Network
-is created with the default network type: local. Local type network will be only
-presented in one pod. If a local type network is already hosting virtual machines
-in one pod, you can not use it to boot virtual machine in another pod. That is
-to say, local type network doesn't support cross-Neutron l2 networking.
-
-With multi-pod installation of the Tricircle, you can try out cross-Neutron l2
-networking and cross-Neutron l3 networking features.
-
-To support cross-Neutron l2 networking, we have added both VLAN and VxLAN
-network type to the Tricircle. When a VLAN type network created via the
-central Neutron server is used to boot virtual machines in different pods, local
-Neutron server in each pod will create a VLAN type network with the same VLAN
-ID and physical network as the central network, so each pod should be configured
-with the same VLAN allocation pool and physical network. Then virtual machines
-in different pods can communicate with each other in the same physical network
-with the same VLAN tag. Similarly, for VxLAN network type, each pod should be
-configured with the same VxLAN allocation pool, so local Neutron server in each
-pod can create a VxLAN type network with the same VxLAN ID as is allocated by
-the central Neutron server.
-
-Cross-Neutron l3 networking is supported in two ways in the Tricircle. If two
-networks connected to the router are of local type, we utilize a shared
-VLAN or VxLAN network to achieve cross-Neutron l3 networking. When a subnet is
-attached to a router via the central Neutron server, the Tricircle not only
-creates corresponding subnet and router in the pod, but also creates a "bridge"
-network. Both tenant network and "bridge" network are attached to the router.
-Each tenant will have one allocated VLAN or VxLAN ID, which is shared by the
-tenant's "bridge" networks across Neutron servers. The CIDRs of "bridge" networks for one
-tenant are also the same, so the router interfaces in "bridge" networks across
-different Neutron servers can communicate with each other. By adding an extra route as
-following::
-
- destination: CIDR of tenant network in another pod
- nexthop: "bridge" network interface ip in another pod
-
-When a virtual machine sends a packet whose receiver is in another network and
-in another pod, the packet first goes to router, then is forwarded to the router
-in another pod according to the extra route, at last the packet is sent to the
-target virtual machine. This route configuration job is triggered when user
-attaches a subnet to a router via the central Neutron server and the job is
-finished asynchronously.
-
-If one of the network connected to the router is not local type, meaning that
-cross-Neutron l2 networking is supported in this network(like VLAN type), and
-the l2 network can be stretched into current pod, packets sent to the virtual
-machine in this network will not pass through the "bridge" network. Instead,
-packets first go to router, then are directly forwarded to the target virtual
-machine via the l2 network. A l2 network's presence scope is determined by the
-network's availability zone hint. If the l2 network is not able to be stretched
-into the current pod, the packets will still pass through the "bridge network".
-For example, let's say we have two pods, pod1 and pod2, and two availability
-zones, az1 and az2. Pod1 belongs to az1 and pod2 belongs to az2. If the
-availability zone hint of one VLAN type network is set to az1, this
-network can not be stretched to pod2. So packets sent from pod2 to virtual
-machines in this network still need to pass through the "bridge network".
-
-Prerequisite
-^^^^^^^^^^^^
-
-In this guide we take two nodes deployment as an example. One node to run the
-Tricircle API, the central Neutron server and one pod, the other one node to run
-another pod. For VLAN network, both nodes should have two network interfaces,
-which are connected to the management network and provider VLAN network. The
-physical network infrastructure should support VLAN tagging. For VxLAN network,
-you can combine the management plane and data plane, in this case, only one
-network interface is needed. If you would like to try north-south networking,
-too, you should prepare one more network interface in the second node for the
-external network. In this guide, the external network is also VLAN type, so the
-local.conf sample is based on VLAN type external network setup. For the resource
-requirements to setup each node, please refer to
-`All-In-One Single Machine `_
-for installing DevStack in bare metal server and
-`All-In-One Single VM `_
-for installing DevStack in virtual machine.
-
-If you want to experience cross Neutron VxLAN network, please make sure
-compute nodes are routable to each other on data plane, and enable L2
-population mechanism driver in OpenStack RegionOne and OpenStack RegionTwo.
-
-
-Setup
-^^^^^
-
-In pod1 in node1 for Tricircle service, central Neutron and OpenStack
-RegionOne,
-
-- 1 Install DevStack. Please refer to
- `DevStack document `_
- on how to install DevStack into single VM or bare metal server.
-
-- 2 In DevStack folder, create a file local.conf, and copy the content of
- `local.conf node1 sample `_
- to local.conf, change password in the file if needed.
-
-- 3 Change the following options according to your environment
-
- - change HOST_IP to your management interface ip::
-
- HOST_IP=10.250.201.24
-
- - the format of Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is
- (network_vlan_ranges=::),
- you can change physical network name, but remember to adapt your change
- to the commands showed in this guide; also, change min VLAN and max vlan
- to adapt the VLAN range your physical network supports. You need to
- additionally specify the physical network "extern" to ensure the
- central neutron can create "extern" physical network which located in
- other pods::
-
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
-
- - if you would like to also configure vxlan network, you can set
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS. the format of it is
- (vni_ranges=:)::
-
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=(vni_ranges=1001:2000)
-
- - the format of OVS_BRIDGE_MAPPINGS is :,
- you can change these names, but remember to adapt your change to the
- commands showed in this guide. You do not need specify the bridge mapping
- for "extern", because this physical network is located in other pods::
-
- OVS_BRIDGE_MAPPINGS=bridge:br-vlan
-
- this option can be omitted if only VxLAN networks are needed
-
- - if you would like to also configure flat network, you can set
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS, the format of it is
- (flat_networks=phy_net1,phy_net2,...). Besides specifying a list of
- physical network names, you can also use '*' to allow flat networks with
- arbitrary physical network names; or use an empty list to disable flat
- networks. For simplicity, we use the same physical networks and bridge
- mappings for vlan and flat network configuration. Similar to vlan network,
- You need to additionally specify the physical network "extern" to ensure
- the central neutron can create "extern" physical network which located in
- other pods::
-
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS=(flat_networks=bridge,extern)
-
- - set TRICIRCLE_START_SERVICES to True to install the Tricircle service and
- central Neutron in node1::
-
- TRICIRCLE_START_SERVICES=True
-
-- 4 Create OVS bridge and attach the VLAN network interface to it ::
-
- sudo ovs-vsctl add-br br-vlan
- sudo ovs-vsctl add-port br-vlan eth1
-
- br-vlan is the OVS bridge name you configure on OVS_PHYSICAL_BRIDGE, eth1 is
- the device name of your VLAN network interface, this step can be omitted if
- only VxLAN networks are provided to tenants.
-
-- 5 Run DevStack. In DevStack folder, run ::
-
- ./stack.sh
-
-- 6 After DevStack successfully starts, begin to setup node2.
-
-In pod2 in node2 for OpenStack RegionTwo,
-
-- 1 Install DevStack. Please refer to
- `DevStack document `_
- on how to install DevStack into single VM or bare metal server.
-
-- 2 In DevStack folder, create a file local.conf, and copy the content of
- `local.conf node2 sample `_
- to local.conf, change password in the file if needed.
-
-- 3 Change the following options according to your environment
-
- - change HOST_IP to your management interface ip::
-
- HOST_IP=10.250.201.25
-
- - change KEYSTONE_SERVICE_HOST to management interface ip of node1::
-
- KEYSTONE_SERVICE_HOST=10.250.201.24
-
- - change KEYSTONE_AUTH_HOST to management interface ip of node1::
-
- KEYSTONE_AUTH_HOST=10.250.201.24
-
- - the format of Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is
- (network_vlan_ranges=::),
- you can change physical network name, but remember to adapt your change
- to the commands showed in this guide; also, change min vlan and max vlan
- to adapt the vlan range your physical network supports::
-
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
-
- - if you would like to also configure vxlan network, you can set
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS. the format of it is
- (vni_ranges=:)::
-
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=(vni_ranges=1001:2000)
-
- - the format of OVS_BRIDGE_MAPPINGS is :,
- you can change these names, but remember to adapt your change to the commands
- showed in this guide::
-
- OVS_BRIDGE_MAPPINGS=bridge:br-vlan,extern:br-ext
-
- if you only use vlan network for external network, it can be configured like::
-
- OVS_BRIDGE_MAPPINGS=extern:br-ext
-
- - if you would like to also configure flat network, you can set
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS, the format of it is
- (flat_networks=phy_net1,phy_net2,...). Besides specifying a list of
- physical network names, you can also use '*' to allow flat networks with
- arbitrary physical network names; or use an empty list to disable flat
- networks. For simplicity, we use the same physical networks and bridge
- mappings for vlan and flat network configuration::
-
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS=(flat_networks=bridge,extern)
-
- - set TRICIRCLE_START_SERVICES to False(it's True by default) so Tricircle
- services and central Neutron will not be started in node2::
-
- TRICIRCLE_START_SERVICES=False
-
- In this guide, we define two physical networks in node2, one is "bridge" for
- bridge network, the other one is "extern" for external network. If you do not
- want to try l3 north-south networking, you can simply remove the "extern"
- part. The external network type we use in the guide is VLAN, if you want to
- use other network type like flat, please refer to
- `DevStack document `_.
-
-- 4 Create OVS bridge and attach the VLAN network interface to it ::
-
- sudo ovs-vsctl add-br br-vlan
- sudo ovs-vsctl add-port br-vlan eth1
- sudo ovs-vsctl add-br br-ext
- sudo ovs-vsctl add-port br-ext eth2
-
- br-vlan and br-ext are the OVS bridge names you configure on
- OVS_PHYSICAL_BRIDGE, eth1 and eth2 are the device names of your VLAN network
- interfaces, for the "bridge" network and the external network. Omit br-vlan
- if you only use vxlan network as tenant network.
-
-- 5 Run DevStack. In DevStack folder, run ::
-
- ./stack.sh
-
-- 6 After DevStack successfully starts, the setup is finished.
-
-.. note:: In the newest version of codes, we may fail to boot an instance in
- node2. The reason is that Apache configuration file of Nova placement API
- doesn't grant access right to the placement API bin folder. You can use
- "screen -r" to check placement API is working well or not. If placement API
- is in stuck status, manually update "/etc/apache2/sites-enabled/placement-api.conf"
- placement API configuration file in node2 to add the following section::
-
-
- Require all granted
-
-
- After update, restart Apache service first, and then placement API.
-
- **This problem no longer exists after this patch:**
-
- https://github.com/openstack-dev/devstack/commit/6ed53156b6198e69d59d1cf3a3497e96f5b7a870
-
-How to play
-^^^^^^^^^^^
-
-- 1 After DevStack successfully starts, we need to create environment variables
- for the user (admin user as example in this guide). In DevStack folder ::
-
- source openrc admin demo
-
-- 2 Unset the region name environment variable, so that the command can be
- issued to specified region in following commands as needed ::
-
- unset OS_REGION_NAME
-
-- 3 Check if services have been correctly registered. Run ::
-
- openstack --os-region-name=RegionOne endpoint list
-
- you should get output looks like as following ::
-
- +----------------------------------+---------------+--------------+----------------+
- | ID | Region | Service Name | Service Type |
- +----------------------------------+---------------+--------------+----------------+
- | 4adaab1426d94959be46314b4bd277c2 | RegionOne | glance | image |
- | 5314a11d168042ed85a1f32d40030b31 | RegionTwo | nova_legacy | compute_legacy |
- | ea43c53a8ab7493dacc4db079525c9b1 | RegionOne | keystone | identity |
- | a1f263473edf4749853150178be1328d | RegionOne | neutron | network |
- | ebea16ec07d94ed2b5356fb0a2a3223d | RegionTwo | neutron | network |
- | 8d374672c09845f297755117ec868e11 | CentralRegion | tricircle | Tricircle |
- | e62e543bb9cf45f593641b2d00d72700 | RegionOne | nova_legacy | compute_legacy |
- | 540bdedfc449403b9befef3c2bfe3510 | RegionOne | nova | compute |
- | d533429712954b29b9f37debb4f07605 | RegionTwo | glance | image |
- | c8bdae9506cd443995ee3c89e811fb45 | CentralRegion | neutron | network |
- | 991d304dfcc14ccf8de4f00271fbfa22 | RegionTwo | nova | compute |
- +----------------------------------+---------------+--------------+----------------+
-
- "CentralRegion" is the region you set in local.conf via CENTRAL_REGION_NAME,
- whose default value is "CentralRegion", we use it as the region for the
- Tricircle API and central Neutron server. "RegionOne" and "RegionTwo" are the
- normal OpenStack regions which includes Nova, Neutron and Glance. Shared
- Keystone service is registered in "RegionOne".
-
-- 4 Create pod instances for the Tricircle to manage the mapping between
- availability zones and OpenStack instances ::
-
- openstack multiregion networking pod create --region-name CentralRegion
-
- openstack multiregion networking pod create --region-name RegionOne --availability-zone az1
-
- openstack multiregion networking pod create --region-name RegionTwo --availability-zone az2
-
- Pay attention to "region_name" parameter we specify when creating pod. Pod name
- should exactly match the region name registered in Keystone. In the above
- commands, we create pods named "CentralRegion", "RegionOne" and "RegionTwo".
-
-- 5 Create necessary resources in central Neutron server ::
-
- neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionTwo net2
- neutron --os-region-name=CentralRegion subnet-create net2 10.0.2.0/24
-
- Please note that the net1 and net2 ID will be used in later step to boot VM.
-
-- 6 Get image ID and flavor ID which will be used in VM booting ::
-
- glance --os-region-name=RegionOne image-list
- nova --os-region-name=RegionOne flavor-list
- glance --os-region-name=RegionTwo image-list
- nova --os-region-name=RegionTwo flavor-list
-
-- 7 Boot virtual machines ::
-
- nova --os-region-name=RegionOne boot --flavor 1 --image $image1_id --nic net-id=$net1_id vm1
- nova --os-region-name=RegionTwo boot --flavor 1 --image $image2_id --nic net-id=$net2_id vm2
-
-- 8 Verify the VMs are connected to the networks ::
-
- neutron --os-region-name=CentralRegion port-list
- neutron --os-region-name=RegionOne port-list
- nova --os-region-name=RegionOne list
- neutron --os-region-name=RegionTwo port-list
- nova --os-region-name=RegionTwo list
-
- The ip address of each VM could be found in local Neutron server and central
- Neutron server. The port has same uuid in local Neutron server and central
- Neutron Server.
-
-- 9 Create external network and subnet ::
-
- neutron --os-region-name=CentralRegion net-create --router:external --provider:network_type vlan --provider:physical_network extern --availability-zone-hint RegionTwo ext-net
- neutron --os-region-name=CentralRegion subnet-create --name ext-subnet --disable-dhcp ext-net 163.3.124.0/24
-
- Pay attention that when creating external network, we need to pass
- "availability_zone_hints" parameter, which is the name of the pod that will
- host external network.
-
- *Currently external network needs to be created before attaching subnet to the
- router, because plugin needs to utilize external network information to setup
- bridge network when handling interface adding operation. This limitation will
- be removed later.*
-
-- 10 Create router and attach subnets in central Neutron server ::
-
- neutron --os-region-name=CentralRegion router-create router
- neutron --os-region-name=CentralRegion router-interface-add router $subnet1_id
- neutron --os-region-name=CentralRegion router-interface-add router $subnet2_id
-
-- 11 Set router external gateway in central Neutron server ::
-
- neutron --os-region-name=CentralRegion router-gateway-set router ext-net
-
- Now virtual machine in the subnet attached to the router should be able to
- ping machines in the external network. In our test, we use hypervisor tool
- to directly start a virtual machine in the external network to check the
- network connectivity.
-
-- 12 Launch VNC console and test connection ::
-
- nova --os-region-name=RegionOne get-vnc-console vm1 novnc
- nova --os-region-name=RegionTwo get-vnc-console vm2 novnc
-
- You should be able to ping vm1 from vm2 and vice versa.
-
-- 13 Create floating ip in central Neutron server ::
-
- neutron --os-region-name=CentralRegion floatingip-create ext-net
-
-- 14 Associate floating ip ::
-
- neutron --os-region-name=CentralRegion floatingip-list
- neutron --os-region-name=CentralRegion port-list
- neutron --os-region-name=CentralRegion floatingip-associate $floatingip_id $port_id
-
- Now you should be able to access virtual machine with floating ip bound from
- the external network.
diff --git a/doc/source/install/single-pod-installation-devstack.rst b/doc/source/install/single-pod-installation-devstack.rst
deleted file mode 100644
index 33cfaee5..00000000
--- a/doc/source/install/single-pod-installation-devstack.rst
+++ /dev/null
@@ -1,97 +0,0 @@
-=====================================
-Single pod installation with DevStack
-=====================================
-
-Now the Tricircle can be played with all-in-one single pod DevStack. For
-the resource requirement to setup single pod DevStack, please refer
-to `All-In-One Single Machine `_ for
-installing DevStack in bare metal server
-or `All-In-One Single VM `_ for
-installing DevStack in virtual machine.
-
-- 1 Install DevStack. Please refer to `DevStack document
- `_
- on how to install DevStack into single VM or bare metal server.
-
-- 2 In DevStack folder, create a file local.conf, and copy the content of
- https://github.com/openstack/tricircle/blob/master/devstack/local.conf.sample
- to local.conf, change password in the file if needed.
-
-- 3 Run DevStack. In DevStack folder, run ::
-
- ./stack.sh
-
-- 4 After DevStack successfully starts, we need to create environment variables for
- the user (admin user as example in this document). In DevStack folder ::
-
- source openrc admin demo
-
-- 5 Unset the region name environment variable, so that the command can be issued to
- specified region in following commands as needed ::
-
- unset OS_REGION_NAME
-
-- 6 Check if services have been correctly registered. Run ::
-
- openstack --os-region-name=RegionOne endpoint list
-
- you should get output looks like as following ::
-
- +----------------------------------+---------------+--------------+----------------+
- | ID | Region | Service Name | Service Type |
- +----------------------------------+---------------+--------------+----------------+
- | 3944592550764e349d0e82dba19a8e64 | RegionOne | cinder | volume |
- | 2ce48c73cca44e66a558ad69f1aa4436 | CentralRegion | tricircle | Tricircle |
- | d214b688923a4348b908525266db66ed | RegionOne | nova_legacy | compute_legacy |
- | c5dd60f23f2e4442865f601758a73982 | RegionOne | keystone | identity |
- | a99d5742c76a4069bb8621e0303c6004 | RegionOne | cinderv3 | volumev3 |
- | 8a3c711a24b2443a9a4420bcc302ed2c | RegionOne | glance | image |
- | e136af00d64a4cdf8b6b367210476f49 | RegionOne | nova | compute |
- | 4c3e5d52a90e493ab720213199ab22cd | RegionOne | neutron | network |
- | 8a1312afb6944492b47c5a35f1e5caeb | RegionOne | cinderv2 | volumev2 |
- | e0a5530abff749e1853a342b5747492e | CentralRegion | neutron | network |
- +----------------------------------+---------------+--------------+----------------+
-
- "CentralRegion" is the region you set in local.conf via CENTRAL_REGION_NAME,
- whose default value is "CentralRegion", we use it as the region for the
- central Neutron server and Tricircle Admin API(ID is
- 2ce48c73cca44e66a558ad69f1aa4436 in the above list).
- "RegionOne" is the normal OpenStack region which includes Nova, Cinder,
- Neutron.
-
-- 7 Create pod instances for the Tricircle to manage the mapping between
- availability zone and OpenStack instances ::
-
- openstack multiregion networking pod create --region-name CentralRegion
-
- openstack multiregion networking pod create --region-name RegionOne --availability-zone az1
-
- Pay attention to "region_name" parameter we specify when creating pod. Pod name
- should exactly match the region name registered in Keystone. In the above
- commands, we create pods named "CentralRegion" and "RegionOne".
-
-- 8 Create necessary resources in central Neutron server ::
-
- neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- neutron --os-region-name=CentralRegion subnet-create net1 10.0.0.0/24
-
- Please note that the net1 ID will be used in later step to boot VM.
-
-- 9 Get image ID and flavor ID which will be used in VM booting ::
-
- glance --os-region-name=RegionOne image-list
- nova --os-region-name=RegionOne flavor-list
-
-- 10 Boot a virtual machine ::
-
- nova --os-region-name=RegionOne boot --flavor 1 --image $image_id --nic net-id=$net_id vm1
-
-- 11 Verify the VM is connected to the net1 ::
-
- neutron --os-region-name=CentralRegion port-list
- neutron --os-region-name=RegionOne port-list
- nova --os-region-name=RegionOne list
-
- The IP address of the VM could be found in local Neutron server and central
- Neutron server. The port has same uuid in local Neutron server and central
- Neutron Server.
diff --git a/doc/source/networking/index.rst b/doc/source/networking/index.rst
deleted file mode 100644
index a9b06174..00000000
--- a/doc/source/networking/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-==========================
-Tricircle Networking Guide
-==========================
-
-.. toctree::
- :maxdepth: 4
-
- networking-guide
diff --git a/doc/source/networking/networking-guide-direct-provider-networks.rst b/doc/source/networking/networking-guide-direct-provider-networks.rst
deleted file mode 100644
index d2e84402..00000000
--- a/doc/source/networking/networking-guide-direct-provider-networks.rst
+++ /dev/null
@@ -1,406 +0,0 @@
-===================================================
-North South Networking via Direct Provider Networks
-===================================================
-
-The following figure illustrates one typical networking mode, instances have
-two interfaces, one interface is connected to net1 for heartbeat or
-data replication, the other interface is connected to phy_net1 or phy_net2 to
-provide service. There is different physical network in different region to
-support service redundancy in case of region level failure.
-
-.. code-block:: console
-
- +-----------------+ +-----------------+
- |RegionOne | |RegionTwo |
- | | | |
- | phy_net1 | | phy_net2 |
- | +--+---------+ | | +--+---------+ |
- | | | | | |
- | | | | | |
- | +--+--------+ | | +--+--------+ |
- | | | | | | | |
- | | Instance1 | | | | Instance2 | |
- | +------+----+ | | +------+----+ |
- | | | | | |
- | | | | | |
- | net1 | | | | |
- | +------+-------------------------+---+ |
- | | | |
- +-----------------+ +-----------------+
-
-How to create this network topology
-===================================
-
-Create provider network phy_net1, which will be located in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --availability-zone-hint RegionOne phy_net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | b7832cbb-d399-4d5d-bcfd-d1b804506a1a |
- | name | phy_net1 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 170 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- +---------------------------+--------------------------------------+
-
-Create subnet in phy_net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create phy_net1 202.96.1.0/24
- +-------------------+------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------+
- | allocation_pools | {"start": "202.96.1.2", "end": "202.96.1.254"} |
- | cidr | 202.96.1.0/24 |
- | created_at | 2017-01-11T08:43:48Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 202.96.1.1 |
- | host_routes | |
- | id | 4941c48e-5602-40fc-a117-e84833b85ed3 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | b7832cbb-d399-4d5d-bcfd-d1b804506a1a |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- | updated_at | 2017-01-11T08:43:48Z |
- +-------------------+------------------------------------------------+
-
-Create provider network phy_net2, which will be located in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --availability-zone-hint RegionTwo phy_net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | 731293af-e68f-4677-b433-f46afd6431f3 |
- | name | phy_net2 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 168 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- +---------------------------+--------------------------------------+
-
-Create subnet in phy_net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create phy_net2 202.96.2.0/24
- +-------------------+------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------+
- | allocation_pools | {"start": "202.96.2.2", "end": "202.96.2.254"} |
- | cidr | 202.96.2.0/24 |
- | created_at | 2017-01-11T08:47:07Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 202.96.2.1 |
- | host_routes | |
- | id | f5fb4f11-4bc1-4911-bcca-b0eaccc6eaf9 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | 731293af-e68f-4677-b433-f46afd6431f3 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- | updated_at | 2017-01-11T08:47:08Z |
- +-------------------+------------------------------------------------+
-
-Create net1 which will work as the L2 network across RegionOne and RegionTwo.
-
-.. code-block:: console
-
- If net1 is vlan based cross-Neutron L2 network
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network bridge --availability-zone-hint az1 --availability-zone-hint az2 net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | az1 |
- | | az2 |
- | id | 1897a446-bf6a-4bce-9374-6a3825ee5051 |
- | name | net1 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | provider:network_type | vlan |
- | provider:physical_network | bridge |
- | provider:segmentation_id | 132 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- +---------------------------+--------------------------------------+
-
- If net1 is vxlan based cross-Neutron L2 network
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vxlan --availability-zone-hint az1 --availability-zone-hint az2 net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | az1 |
- | | az2 |
- | id | 0093f32c-2ecd-4888-a8c2-a6a424bddfe8 |
- | name | net1 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | provider:network_type | vxlan |
- | provider:physical_network | |
- | provider:segmentation_id | 1036 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- +---------------------------+--------------------------------------+
-
-Create subnet in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.1.2", "end": "10.0.1.254"} |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-01-11T08:49:53Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | |
- | id | 6a6c63b4-7f41-4a8f-9393-55cd79380e5a |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | 1897a446-bf6a-4bce-9374-6a3825ee5051 |
- | project_id | ce444c8be6da447bb412db7d30cd7023 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- | updated_at | 2017-01-11T08:49:53Z |
- +-------------------+--------------------------------------------+
-
-List available images in RegionOne.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionOne image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 924a5078-efe5-4abf-85e8-992b7e5f6ac3 | cirros-0.3.4-x86_64-uec |
- | d3e8349d-d58d-4d17-b0ab-951c095fbbc4 | cirros-0.3.4-x86_64-uec-kernel |
- | c4cd7482-a145-4f26-9f41-a9ac17b9492c | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List available flavors in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance1 in RegionOne, and connect this instance to net1 and phy_net1.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne boot --flavor 1 --image 924a5078-efe5-4abf-85e8-992b7e5f6ac3 --nic net-id=1897a446-bf6a-4bce-9374-6a3825ee5051 --nic net-id=b7832cbb-d399-4d5d-bcfd-d1b804506a1a instance1
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | d3e8349d-d58d-4d17-b0ab-951c095fbbc4 |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | c4cd7482-a145-4f26-9f41-a9ac17b9492c |
- | OS-EXT-SRV-ATTR:reservation_id | r-eeu5hjq7 |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | ZB3Ve3nPS66g |
- | config_drive | |
- | created | 2017-01-11T10:49:32Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 5fd0f616-1077-46df-bebd-b8b53d09663c |
- | image | cirros-0.3.4-x86_64-uec (924a5078-efe5-4abf-85e8-992b7e5f6ac3) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- | updated | 2017-01-11T10:49:33Z |
- | user_id | 66d7b31664a840939f7d3f2de5e717a9 |
- +--------------------------------------+----------------------------------------------------------------+
-
-List available images in RegionTwo.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionTwo image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 1da4303c-96bf-4714-a4dc-cbd5709eda29 | cirros-0.3.4-x86_64-uec |
- | fb35d578-a984-4807-8234-f0d0ca393e89 | cirros-0.3.4-x86_64-uec-kernel |
- | a615d6df-be63-4d5a-9a05-5cf7e23a438a | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List available flavors in RegionTwo.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance2 in RegionTwo, and connect this instance to net1 and phy_net2.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image 1da4303c-96bf-4714-a4dc-cbd5709eda29 --nic net-id=1897a446-bf6a-4bce-9374-6a3825ee5051 --nic net-id=731293af-e68f-4677-b433-f46afd6431f3 instance2
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance2 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | fb35d578-a984-4807-8234-f0d0ca393e89 |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | a615d6df-be63-4d5a-9a05-5cf7e23a438a |
- | OS-EXT-SRV-ATTR:reservation_id | r-m0duhg40 |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | M5FodqwcsTiJ |
- | config_drive | |
- | created | 2017-01-11T12:55:35Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 010a0a24-0453-4e73-ae8d-21c7275a9df5 |
- | image | cirros-0.3.4-x86_64-uec (1da4303c-96bf-4714-a4dc-cbd5709eda29) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance2 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | ce444c8be6da447bb412db7d30cd7023 |
- | updated | 2017-01-11T12:55:35Z |
- | user_id | 66d7b31664a840939f7d3f2de5e717a9 |
- +--------------------------------------+----------------------------------------------------------------+
-
-Make sure the instance1 is active in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne list
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------------+
- | 5fd0f616-1077-46df-bebd-b8b53d09663c | instance1 | ACTIVE | - | Running | net1=10.0.1.4; phy_net1=202.96.1.13 |
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------------+
-
-Make sure the instance2 is active in RegionTwo.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo list
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------------+
- | 010a0a24-0453-4e73-ae8d-21c7275a9df5 | instance2 | ACTIVE | - | Running | phy_net2=202.96.2.5; net1=10.0.1.5 |
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------------+
-
-Now you can ping instance2's IP address 10.0.1.5 from instance1, or ping
-instance1's IP address 10.0.1.4 from instance2.
-
-Note: Not all images will bring up the second nic, so you can ssh into
-instance1 or instance2, use ifconfig -a to check whether all NICs are created,
-and bring up all NICs if necessary.
diff --git a/doc/source/networking/networking-guide-local-networking.rst b/doc/source/networking/networking-guide-local-networking.rst
deleted file mode 100644
index 00fbc15c..00000000
--- a/doc/source/networking/networking-guide-local-networking.rst
+++ /dev/null
@@ -1,420 +0,0 @@
-================
-Local Networking
-================
-
-The following figure illustrates one networking mode without cross
-Neutron networking requirement, only networking inside one region is needed.
-
-.. code-block:: console
-
- +-----------------+ +-----------------+
- | RegionOne | | RegionTwo |
- | | | |
- | ext-net1 | | ext-net2 |
- | +-----+-----+ | | +-----+-----+ |
- | | | | | |
- | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | | | |
- | | R1 | | | | R2 | |
- | | | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | |
- | | | | | |
- | +---+-+-+ | | +---++--+ |
- | net1 | | | net2 | |
- | | | | | |
- | +-------+---+ | | +-------+----+ |
- | | instance1 | | | | instance2 | |
- | +-----------+ | | +------------+ |
- +-----------------+ +-----------------+
-
-How to create this network topology
-===================================
-
-Create external network ext-net1, which will be located in RegionOne.
-Need to specify region name as the value of availability-zone-hint.
-If availability-zone-hint is not provided, then the external network
-will be created in a default region.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --router:external --availability-zone-hint RegionOne ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | a3a23b20-b0c1-461a-bc00-3db04ce212ca |
- | name | ext-net1 |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 170 |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- +---------------------------+--------------------------------------+
-
-Now you can also create flat type external network
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionOne ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | df2c8e3a-3f25-4cba-a902-33289f3a8aee |
- | name | ext-net1 |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- +---------------------------+--------------------------------------+
-
-For external network, the network will be created in the region specified in
-availability-zone-hint too.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-list
- +--------------------------------------+----------+---------+
- | id | name | subnets |
- +--------------------------------------+----------+---------+
- | a3a23b20-b0c1-461a-bc00-3db04ce212ca | ext-net1 | |
- +--------------------------------------+----------+---------+
-
- $ neutron --os-region-name=RegionOne net-list
- +--------------------------------------+--------------------------------------+---------+
- | id | name | subnets |
- +--------------------------------------+--------------------------------------+---------+
- | a3a23b20-b0c1-461a-bc00-3db04ce212ca | a3a23b20-b0c1-461a-bc00-3db04ce212ca | |
- +--------------------------------------+--------------------------------------+---------+
-
-Create subnet in ext-net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet1 --disable-dhcp ext-net1 163.3.124.0/24
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | allocation_pools | {"start": "163.3.124.2", "end": "163.3.124.254"} |
- | cidr | 163.3.124.0/24 |
- | created_at | 2017-01-10T04:49:16Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.124.1 |
- | host_routes | |
- | id | 055ec17a-5b64-4cff-878c-c898427aabe3 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet1 |
- | network_id | a3a23b20-b0c1-461a-bc00-3db04ce212ca |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated_at | 2017-01-10T04:49:16Z |
- +-------------------+--------------------------------------------------+
-
-Create local router R1 in RegionOne
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne R1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | availability_zones | |
- | created_at | 2017-01-10T04:50:06Z |
- | description | |
- | external_gateway_info | |
- | id | 7ce3282f-3864-4c55-84bf-fc5edc3293cb |
- | name | R1 |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | revision_number | 1 |
- | status | ACTIVE |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated_at | 2017-01-10T04:50:06Z |
- +-------------------------+--------------------------------------+
-
-Set the router gateway to ext-net1 for R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R1 ext-net1
- Set gateway for router R1
-
- $ neutron --os-region-name=CentralRegion router-show R1
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | created_at | 2017-01-10T04:50:06Z |
- | description | |
- | external_gateway_info | {"network_id": "a3a23b20-b0c1-461a-bc00-3db04ce212ca", "external_fixed_ips": [{"subnet_id": "055ec17a-5b64 |
- | | -4cff-878c-c898427aabe3", "ip_address": "163.3.124.5"}]} |
- | id | 7ce3282f-3864-4c55-84bf-fc5edc3293cb |
- | name | R1 |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | revision_number | 3 |
- | status | ACTIVE |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated_at | 2017-01-10T04:51:19Z |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
-
-Create local network net1 in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | beaf59eb-c597-4b69-bd41-8bf9fee2dc6a |
- | name | net1 |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.1.2", "end": "10.0.1.254"} |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-01-10T04:54:29Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | |
- | id | ab812ed5-1a4c-4b12-859c-6c9b3df21642 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | beaf59eb-c597-4b69-bd41-8bf9fee2dc6a |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated_at | 2017-01-10T04:54:29Z |
- +-------------------+--------------------------------------------+
-
-Add this subnet to router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-interface-add R1 ab812ed5-1a4c-4b12-859c-6c9b3df21642
- Added interface 2b7eceaf-8333-49cd-a7fe-aa101d5c9598 to router R1.
-
-List the available images in RegionOne.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionOne image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 2f73b93e-8b8a-4e07-8732-87f968852d82 | cirros-0.3.4-x86_64-uec |
- | 4040ca54-2ebc-4ccd-8a0d-4284f4713ef1 | cirros-0.3.4-x86_64-uec-kernel |
- | 7e86341f-2d6e-4a2a-b01a-e334fa904cf0 | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List the available flavors in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance1 in RegionOne, and connect this instance to net1.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne boot --flavor 1 --image 2f73b93e-8b8a-4e07-8732-87f968852d82 --nic net-id=beaf59eb-c597-4b69-bd41-8bf9fee2dc6a instance1
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | 4040ca54-2ebc-4ccd-8a0d-4284f4713ef1 |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | 7e86341f-2d6e-4a2a-b01a-e334fa904cf0 |
- | OS-EXT-SRV-ATTR:reservation_id | r-5t409rww |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | 23DipTvrpCvn |
- | config_drive | |
- | created | 2017-01-10T04:59:25Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 301546be-b675-49eb-b6c2-c5c986235ecb |
- | image | cirros-0.3.4-x86_64-uec (2f73b93e-8b8a-4e07-8732-87f968852d82) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated | 2017-01-10T04:59:26Z |
- | user_id | a7b7420bd76c48c2bb5cb97c16bb165d |
- +--------------------------------------+----------------------------------------------------------------+
-
-Make sure instance1 is active in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne list
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | 301546be-b675-49eb-b6c2-c5c986235ecb | instance1 | ACTIVE | - | Running | net1=10.0.1.4 |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
-
-Verify regarding networking resource are provisioned in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=RegionOne router-list
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
- | id | name | external_gateway_info | distributed | ha |
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
- | d6cd0978-f3cc-4a0b-b45b- | 7ce3282f-3864-4c55-84bf- | {"network_id": "a3a23b20-b0c1 | False | False |
- | a427ebc51382 | fc5edc3293cb | -461a-bc00-3db04ce212ca", | | |
- | | | "enable_snat": true, | | |
- | | | "external_fixed_ips": | | |
- | | | [{"subnet_id": "055ec17a-5b64 | | |
- | | | -4cff-878c-c898427aabe3", | | |
- | | | "ip_address": "163.3.124.5"}]} | | |
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
-
-
-Create a floating IP for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-create ext-net1
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-01-10T05:17:48Z |
- | description | |
- | fixed_ip_address | |
- | floating_ip_address | 163.3.124.7 |
- | floating_network_id | a3a23b20-b0c1-461a-bc00-3db04ce212ca |
- | id | 0c031c3f-93ba-49bf-9c98-03bf4b0c7b2b |
- | port_id | |
- | project_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | revision_number | 1 |
- | router_id | |
- | status | DOWN |
- | tenant_id | c0e194dfadd44fc1983fd6dd7c8ed384 |
- | updated_at | 2017-01-10T05:17:48Z |
- +---------------------+--------------------------------------+
-
-List the port in net1 for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion port-list
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | id | name | mac_address | fixed_ips |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | 0b55c3b3-ae5f-4d03-899b- | | fa:16:3e:b5:1d:95 | {"subnet_id": "ab812ed5-1a4c-4b12 |
- | f056d967942e | | | -859c-6c9b3df21642", "ip_address": |
- | | | | "10.0.1.4"} |
- | 2b7eceaf-8333-49cd-a7fe- | | fa:16:3e:59:b3:ef | {"subnet_id": "ab812ed5-1a4c-4b12 |
- | aa101d5c9598 | | | -859c-6c9b3df21642", "ip_address": |
- | | | | "10.0.1.1"} |
- | 572ad59f- | dhcp_port_ab812ed5-1a4c-4b12-859c- | fa:16:3e:56:7f:2b | {"subnet_id": "ab812ed5-1a4c-4b12 |
- | 5a15-4662-9fb8-f92a49389b28 | 6c9b3df21642 | | -859c-6c9b3df21642", "ip_address": |
- | | | | "10.0.1.2"} |
- | bf398883-c435-4cb2-8693-017a790825 | interface_RegionOne_ab812ed5-1a4c- | fa:16:3e:15:ef:1f | {"subnet_id": "ab812ed5-1a4c-4b12 |
- | 9e | 4b12-859c-6c9b3df21642 | | -859c-6c9b3df21642", "ip_address": |
- | | | | "10.0.1.7"} |
- | 452b8ebf- | | fa:16:3e:1f:59:b2 | {"subnet_id": "055ec17a-5b64-4cff- |
- | c9c6-4990-9048-644a3a6fde1a | | | 878c-c898427aabe3", "ip_address": |
- | | | | "163.3.124.5"} |
- | 8e77c6ab-2884-4779-91e2-c3a4975fdf | | fa:16:3e:3c:88:7d | {"subnet_id": "055ec17a-5b64-4cff- |
- | 50 | | | 878c-c898427aabe3", "ip_address": |
- | | | | "163.3.124.7"} |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
-
-Associate the floating IP to instance1's IP in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-associate 0c031c3f-93ba-49bf-9c98-03bf4b0c7b2b 0b55c3b3-ae5f-4d03-899b-f056d967942e
- Associated floating IP 0c031c3f-93ba-49bf-9c98-03bf4b0c7b2b
-
-Verify floating IP is associated in RegionOne too.
-
-.. code-block:: console
-
- $ neutron --os-region-name=RegionOne floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | b28baa80-d798-43e7-baff-e65873bd1ec2 | 10.0.1.4 | 163.3.124.7 | 0b55c3b3-ae5f-4d03-899b-f056d967942e |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
-You can create topology in RegionTwo like what has been done in RegionOne.
diff --git a/doc/source/networking/networking-guide-multiple-external-networks.rst b/doc/source/networking/networking-guide-multiple-external-networks.rst
deleted file mode 100644
index 13b11f09..00000000
--- a/doc/source/networking/networking-guide-multiple-external-networks.rst
+++ /dev/null
@@ -1,919 +0,0 @@
-=====================================================
-North South Networking via Multiple External Networks
-=====================================================
-
-The following figure illustrates one typical networking mode, instances have
-two interfaces, one interface is connected to net3 for heartbeat or
-data replication, the other interface is connected to net1 or net2 to provide
-service. There is different external network in different region to support
-service redundancy in case of region level failure.
-
-.. code-block:: console
-
- +-----------------+ +-----------------+
- | RegionOne | | RegionTwo |
- | | | |
- | ext_net1 | | ext_net2 |
- | +-----+-----+ | | +-----+-----+ |
- | | | | | |
- | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | | | |
- | | R1 | | | | R2 | |
- | | | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | |
- | | | | | |
- | +---+-+-+ | | +---+-+-+ |
- | net1 | | | net2 | |
- | | | | | |
- | +--------+--+ | | +--------+--+ |
- | | Instance1 | | | | Instance2 | |
- | +-----------+ | | +-----------+ |
- | | | | | |
- | | | net3 | | |
- | +------+-------------------------+----+ |
- | | | |
- +-----------------+ +-----------------+
-
-How to create this network topology
-===================================
-
-Create external network ext-net1, which will be located in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --router:external --availability-zone-hint RegionOne ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | 9b3d04be-0c00-40ed-88ff-088da6fcd8bd |
- | name | ext-net1 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 170 |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Now you can also create flat type external network
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionOne ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | 17d969a5-efe3-407f-9657-61658a4a5193 |
- | name | ext-net1 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Create subnet in ext-net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet1 --disable-dhcp ext-net1 163.3.124.0/24
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | allocation_pools | {"start": "163.3.124.2", "end": "163.3.124.254"} |
- | cidr | 163.3.124.0/24 |
- | created_at | 2017-01-12T07:03:45Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.124.1 |
- | host_routes | |
- | id | a2eecc16-deb8-42a6-a41b-5058847ed20a |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet1 |
- | network_id | 9b3d04be-0c00-40ed-88ff-088da6fcd8bd |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:03:45Z |
- +-------------------+--------------------------------------------------+
-
-Create router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne R1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | availability_zones | |
- | created_at | 2017-01-12T07:04:13Z |
- | description | |
- | external_gateway_info | |
- | id | 063de74b-d962-4fc2-96d9-87e2cb35c082 |
- | name | R1 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 1 |
- | status | ACTIVE |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:04:13Z |
- +-------------------------+--------------------------------------+
-
-Set the router gateway to ext-net1 for R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R1 ext-net1
- Set gateway for router R1
-
- $ neutron --os-region-name=CentralRegion router-show R1
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | created_at | 2017-01-12T07:04:13Z |
- | description | |
- | external_gateway_info | {"network_id": "9b3d04be-0c00-40ed-88ff-088da6fcd8bd", "external_fixed_ips": [{"subnet_id": |
- | | "a2eecc16-deb8-42a6-a41b-5058847ed20a", "ip_address": "163.3.124.5"}]} |
- | id | 063de74b-d962-4fc2-96d9-87e2cb35c082 |
- | name | R1 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 3 |
- | status | ACTIVE |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:04:36Z |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
-
-Create local network net1 which will reside in RegionOne, so you use RegionOne
-as the value of availability-zone-hint.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | de4fda27-e4f7-4448-80f6-79ee5ea2478b |
- | name | net1 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.1.2", "end": "10.0.1.254"} |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-01-12T07:05:57Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | |
- | id | 2c8f446f-ba02-4140-a793-913033aa3580 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | de4fda27-e4f7-4448-80f6-79ee5ea2478b |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:05:57Z |
- +-------------------+--------------------------------------------+
-
-Add this subnet to router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-interface-add R1 2c8f446f-ba02-4140-a793-913033aa3580
- Added interface d48a8e87-61a0-494b-bc06-54f7a008ea78 to router R1.
-
-Create net3 which will work as the L2 network across RegionOne and RegionTwo.
-
-.. code-block:: console
-
- If net3 is vlan based cross-Neutron L2 network
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network bridge --availability-zone-hint az1 --availability-zone-hint az2 net3
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | az1 |
- | | az2 |
- | id | 68d04c60-469d-495d-bb23-0d36d56235bd |
- | name | net3 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | vlan |
- | provider:physical_network | bridge |
- | provider:segmentation_id | 138 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
- If net3 is vxlan based cross-Neutron L2 network
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vxlan --availability-zone-hint az1 --availability-zone-hint az2 net3
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | az1 |
- | | az2 |
- | id | 0f171049-0c15-4d1b-95cd-ede8dc554b44 |
- | name | net3 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | vxlan |
- | provider:physical_network | |
- | provider:segmentation_id | 1031 |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net3.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net3 10.0.3.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.3.2", "end": "10.0.3.254"} |
- | cidr | 10.0.3.0/24 |
- | created_at | 2017-01-12T07:07:42Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.3.1 |
- | host_routes | |
- | id | 5ab92c3c-b799-451c-b5d5-b72274fb0fcc |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | 68d04c60-469d-495d-bb23-0d36d56235bd |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:07:42Z |
- +-------------------+--------------------------------------------+
-
-List the available images in RegionOne.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionOne image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 8747fd6a-72aa-4075-b936-a24bc48ed57b | cirros-0.3.4-x86_64-uec |
- | 3a54e6fd-d215-437b-9d67-eac840c97f9c | cirros-0.3.4-x86_64-uec-kernel |
- | 02b06834-2a9f-4dad-8d59-2a77963af8a5 | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List the available flavors in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-
-Boot instance1 in RegionOne, and connect this instance to net1 and net3.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne boot --flavor 1 --image 8747fd6a-72aa-4075-b936-a24bc48ed57b --nic net-id=68d04c60-469d-495d-bb23-0d36d56235bd --nic net-id=de4fda27-e4f7-4448-80f6-79ee5ea2478b instance1
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | 3a54e6fd-d215-437b-9d67-eac840c97f9c |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | 02b06834-2a9f-4dad-8d59-2a77963af8a5 |
- | OS-EXT-SRV-ATTR:reservation_id | r-9cnhvave |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | zDFR3x8pDDKi |
- | config_drive | |
- | created | 2017-01-12T07:09:53Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 3d53560e-4e04-43a0-b774-cfa3deecbca4 |
- | image | cirros-0.3.4-x86_64-uec (8747fd6a-72aa-4075-b936-a24bc48ed57b) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated | 2017-01-12T07:09:54Z |
- | user_id | d2521e53aa8c4916b3a8e444f20cf1da |
- +--------------------------------------+----------------------------------------------------------------+
-
-Make sure the instance1 is active in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne list
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------+
- | 3d53560e-4e04-43a0-b774-cfa3deecbca4 | instance1 | ACTIVE | - | Running | net3=10.0.3.7; net1=10.0.1.13 |
- +--------------------------------------+-----------+--------+------------+-------------+-------------------------------+
-
-
-Create a floating IP for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-create ext-net1
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-01-12T07:12:50Z |
- | description | |
- | fixed_ip_address | |
- | floating_ip_address | 163.3.124.6 |
- | floating_network_id | 9b3d04be-0c00-40ed-88ff-088da6fcd8bd |
- | id | 645f9cd6-d8d4-427a-88fe-770240c96d09 |
- | port_id | |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 1 |
- | router_id | |
- | status | DOWN |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:12:50Z |
- +---------------------+--------------------------------------+
-
-List the port in net1 for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion port-list
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | id | name | mac_address | fixed_ips |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | 185b5185-0254-486c-9d8b- | | fa:16:3e:da:ae:99 | {"subnet_id": "2c8f446f- |
- | 198af4b4d40e | | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.13"} |
- | 248f9072-76d6-405a- | | fa:16:3e:dc:2f:b3 | {"subnet_id": "5ab92c3c-b799-451c- |
- | 8eb5-f0d3475c542d | | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.7"} |
- | d48a8e87-61a0-494b- | | fa:16:3e:c6:8e:c5 | {"subnet_id": "2c8f446f- |
- | bc06-54f7a008ea78 | | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.1"} |
- | ce3a1530-20f4-4760-a451-81e5f939aa | dhcp_port_2c8f446f- | fa:16:3e:e6:32:0f | {"subnet_id": "2c8f446f- |
- | fc | ba02-4140-a793-913033aa3580 | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.2"} |
- | 7925a3cc- | interface_RegionOne_2c8f446f- | fa:16:3e:c5:ad:6f | {"subnet_id": "2c8f446f- |
- | 6c36-4bc3-a798-a6145fed442a | ba02-4140-a793-913033aa3580 | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.3"} |
- | 077c63b6-0184-4bf7-b3aa- | dhcp_port_5ab92c3c-b799-451c- | fa:16:3e:d2:a3:53 | {"subnet_id": "5ab92c3c-b799-451c- |
- | b071de6f39be | b5d5-b72274fb0fcc | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.2"} |
- | c90be7bc- | interface_RegionOne_5ab92c3c-b799 | fa:16:3e:b6:e4:bc | {"subnet_id": "5ab92c3c-b799-451c- |
- | 31ea-4015-a432-2bef62e343d1 | -451c-b5d5-b72274fb0fcc | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.9"} |
- | 3053fcb9-b6ad-4a9c-b89e- | bridge_port_532890c765604609a8d2ef | fa:16:3e:fc:d0:fc | {"subnet_id": "53def0ac-59ef- |
- | ffe6aff6523b | 6fc8e5f6ef_0c4faa42-5230-4adc- | | 4c7b-b694-3375598954da", |
- | | bab5-10ee53ebf888 | | "ip_address": "100.0.0.11"} |
- | ce787983-a140-4c53-96d2-71f62e1545 | | fa:16:3e:1a:62:7f | {"subnet_id": "a2eecc16-deb8-42a6 |
- | 3a | | | -a41b-5058847ed20a", "ip_address": |
- | | | | "163.3.124.5"} |
- | 2d9fc640-1858-4c7e-b42c- | | fa:16:3e:00:7c:6e | {"subnet_id": "a2eecc16-deb8-42a6 |
- | d3ed3f338b8a | | | -a41b-5058847ed20a", "ip_address": |
- | | | | "163.3.124.6"} |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
-
-Associate the floating IP to instance1's IP in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-associate 645f9cd6-d8d4-427a-88fe-770240c96d09 185b5185-0254-486c-9d8b-198af4b4d40e
- Associated floating IP 645f9cd6-d8d4-427a-88fe-770240c96d09
-
-Verify the floating IP was associated.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | 645f9cd6-d8d4-427a-88fe-770240c96d09 | 10.0.1.13 | 163.3.124.6 | 185b5185-0254-486c-9d8b-198af4b4d40e |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
-You can also check that in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=RegionOne floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | d59362fa-aea0-4e35-917e-8e586212c867 | 10.0.1.13 | 163.3.124.6 | 185b5185-0254-486c-9d8b-198af4b4d40e |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
- $ neutron --os-region-name=RegionOne router-list
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
- | id | name | external_gateway_info | distributed | ha |
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
- | 0c4faa42-5230-4adc- | 063de74b-d962-4fc2-96d9-87e2cb35c0 | {"network_id": "6932cd71-3cd4-4560 | False | False |
- | bab5-10ee53ebf888 | 82 | -88f3-2a112fff0cea", | | |
- | | | "enable_snat": false, | | |
- | | | "external_fixed_ips": | | |
- | | | [{"subnet_id": "53def0ac-59ef- | | |
- | | | 4c7b-b694-3375598954da", | | |
- | | | "ip_address": "100.0.0.11"}]} | | |
- | f99dcc0c-d94a- | ns_router_063de74b-d962-4fc2-96d9- | {"network_id": "9b3d04be-0c00 | False | False |
- | 4b41-9236-2c0169f3ab7d | 87e2cb35c082 | -40ed-88ff-088da6fcd8bd", | | |
- | | | "enable_snat": true, | | |
- | | | "external_fixed_ips": | | |
- | | | [{"subnet_id": "a2eecc16-deb8-42a6 | | |
- | | | -a41b-5058847ed20a", "ip_address": | | |
- | | | "163.3.124.5"}]} | | |
- +------------------------------------+------------------------------------+------------------------------------+-------------+-------+
-
-Create network topology in RegionTwo.
-
-Create external network ext-net2, which will be located in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --router:external --availability-zone-hint RegionTwo ext-net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | ae806ecb-fa3e-4b3c-a582-caef3d8cd9b4 |
- | name | ext-net2 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 183 |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Now you can also create flat type external network
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionTwo ext-net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | 0b6d43d1-a837-4f91-930e-dfcc74ef483b |
- | name | ext-net2 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Create subnet in ext-net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet2 --disable-dhcp ext-net2 163.3.125.0/24
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | allocation_pools | {"start": "163.3.125.2", "end": "163.3.125.254"} |
- | cidr | 163.3.125.0/24 |
- | created_at | 2017-01-12T07:43:04Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.125.1 |
- | host_routes | |
- | id | 9fb32423-95a8-4589-b69c-e2955234ae56 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet2 |
- | network_id | ae806ecb-fa3e-4b3c-a582-caef3d8cd9b4 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:43:04Z |
- +-------------------+--------------------------------------------------+
-
-Create router R2 which will work in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionTwo R2
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | availability_zones | |
- | created_at | 2017-01-12T07:19:23Z |
- | description | |
- | external_gateway_info | |
- | id | 8a8571db-e3ba-4b78-98ca-13d4dc1a4fb0 |
- | name | R2 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 1 |
- | status | ACTIVE |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:19:23Z |
- +-------------------------+--------------------------------------+
-
-Set the router gateway to ext-net2 for R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R2 ext-net2
- Set gateway for router R2
-
-Check router R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-show R2
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | created_at | 2017-01-12T07:19:23Z |
- | description | |
- | external_gateway_info | {"network_id": "ae806ecb-fa3e-4b3c-a582-caef3d8cd9b4", "external_fixed_ips": [{"subnet_id": |
- | | "9fb32423-95a8-4589-b69c-e2955234ae56", "ip_address": "163.3.125.3"}]} |
- | id | 8a8571db-e3ba-4b78-98ca-13d4dc1a4fb0 |
- | name | R2 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 7 |
- | status | ACTIVE |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:44:00Z |
- +-----------------------+------------------------------------------------------------------------------------------------------------+
-
-
-Create net2 in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionTwo net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | 71b06c5d-2eb8-4ef4-a978-c5c98874811b |
- | name | net2 |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- +---------------------------+--------------------------------------+
-
-Create subnet in net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net2 10.0.2.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.2.2", "end": "10.0.2.254"} |
- | cidr | 10.0.2.0/24 |
- | created_at | 2017-01-12T07:45:55Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.2.1 |
- | host_routes | |
- | id | 356947cf-88e2-408b-ab49-7c0e79110a25 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | 71b06c5d-2eb8-4ef4-a978-c5c98874811b |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-12T07:45:55Z |
- +-------------------+--------------------------------------------+
-
-Add router interface for the subnet to R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-interface-add R2 356947cf-88e2-408b-ab49-7c0e79110a25
- Added interface 805b16de-fbe9-4b54-b891-b39bc2f73a86 to router R2.
-
-List available images in RegionTwo.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionTwo image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 6fbad28b-d5f1-4924-a330-f9d5a6cf6c62 | cirros-0.3.4-x86_64-uec |
- | cc912d30-5cbe-406d-89f2-8c09a73012c4 | cirros-0.3.4-x86_64-uec-kernel |
- | 8660610d-d362-4f20-8f99-4d64c7c21284 | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List available flavors in RegionTwo.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance2, and connect the instance2 to net2 and net3.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image 6fbad28b-d5f1-4924-a330-f9d5a6cf6c62 --nic net-id=68d04c60-469d-495d-bb23-0d36d56235bd --nic net-id=71b06c5d-2eb8-4ef4-a978-c5c98874811b instance2
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance2 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | cc912d30-5cbe-406d-89f2-8c09a73012c4 |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | 8660610d-d362-4f20-8f99-4d64c7c21284 |
- | OS-EXT-SRV-ATTR:reservation_id | r-xylwc16h |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | Lmanqrz9GN77 |
- | config_drive | |
- | created | 2017-01-13T01:41:19Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | dbcfef20-0794-4b5e-aa3f-d08dc6086eb6 |
- | image | cirros-0.3.4-x86_64-uec (6fbad28b-d5f1-4924-a330-f9d5a6cf6c62) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance2 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated | 2017-01-13T01:41:19Z |
- | user_id | d2521e53aa8c4916b3a8e444f20cf1da |
- +--------------------------------------+----------------------------------------------------------------+
-
-Check to see if instance2 is active.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo list
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------+
- | dbcfef20-0794-4b5e-aa3f-d08dc6086eb6 | instance2 | ACTIVE | - | Running | net3=10.0.3.4; net2=10.0.2.3 |
- +--------------------------------------+-----------+--------+------------+-------------+------------------------------+
-
-Create floating IP for instance2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-create ext-net2
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-01-13T01:45:10Z |
- | description | |
- | fixed_ip_address | |
- | floating_ip_address | 163.3.125.4 |
- | floating_network_id | ae806ecb-fa3e-4b3c-a582-caef3d8cd9b4 |
- | id | e0dcbe62-0023-41a8-a099-a4c4b5285e03 |
- | port_id | |
- | project_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | revision_number | 1 |
- | router_id | |
- | status | DOWN |
- | tenant_id | 532890c765604609a8d2ef6fc8e5f6ef |
- | updated_at | 2017-01-13T01:45:10Z |
- +---------------------+--------------------------------------+
-
-List port of instance2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion port-list
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | id | name | mac_address | fixed_ips |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | 185b5185-0254-486c-9d8b- | | fa:16:3e:da:ae:99 | {"subnet_id": "2c8f446f- |
- | 198af4b4d40e | | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.13"} |
- | 248f9072-76d6-405a- | | fa:16:3e:dc:2f:b3 | {"subnet_id": "5ab92c3c-b799-451c- |
- | 8eb5-f0d3475c542d | | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.7"} |
- | 6b0fe2e0-a236-40db-bcbf- | | fa:16:3e:73:21:6c | {"subnet_id": "356947cf-88e2-408b- |
- | 2f31f7124d83 | | | ab49-7c0e79110a25", "ip_address": |
- | | | | "10.0.2.3"} |
- | ab6dd6f4-b48a-4a3e- | | fa:16:3e:67:03:73 | {"subnet_id": "5ab92c3c-b799-451c- |
- | 9f43-90d0fccc181a | | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.4"} |
- | 5c0e0e7a-0faf- | | fa:16:3e:7b:11:c6 | |
- | 44c4-a735-c8745faa9920 | | | |
- | d48a8e87-61a0-494b- | | fa:16:3e:c6:8e:c5 | {"subnet_id": "2c8f446f- |
- | bc06-54f7a008ea78 | | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.1"} |
- | ce3a1530-20f4-4760-a451-81e5f939aa | dhcp_port_2c8f446f- | fa:16:3e:e6:32:0f | {"subnet_id": "2c8f446f- |
- | fc | ba02-4140-a793-913033aa3580 | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.2"} |
- | 7925a3cc- | interface_RegionOne_2c8f446f- | fa:16:3e:c5:ad:6f | {"subnet_id": "2c8f446f- |
- | 6c36-4bc3-a798-a6145fed442a | ba02-4140-a793-913033aa3580 | | ba02-4140-a793-913033aa3580", |
- | | | | "ip_address": "10.0.1.3"} |
- | 805b16de- | | fa:16:3e:94:cd:82 | {"subnet_id": "356947cf-88e2-408b- |
- | fbe9-4b54-b891-b39bc2f73a86 | | | ab49-7c0e79110a25", "ip_address": |
- | | | | "10.0.2.1"} |
- | 30243711-d113-42b7-b712-81ca0d7454 | dhcp_port_356947cf-88e2-408b- | fa:16:3e:83:3d:c8 | {"subnet_id": "356947cf-88e2-408b- |
- | 6d | ab49-7c0e79110a25 | | ab49-7c0e79110a25", "ip_address": |
- | | | | "10.0.2.2"} |
- | 27fab5a2-0710-4742-a731-331f6c2150 | interface_RegionTwo_356947cf-88e2 | fa:16:3e:39:0a:f5 | {"subnet_id": "356947cf-88e2-408b- |
- | fa | -408b-ab49-7c0e79110a25 | | ab49-7c0e79110a25", "ip_address": |
- | | | | "10.0.2.6"} |
- | a7d0bae1-51de- | interface_RegionTwo_5ab92c3c-b799 | fa:16:3e:d6:3f:ca | {"subnet_id": "5ab92c3c-b799-451c- |
- | 4b47-9f81-b012e511e4a7 | -451c-b5d5-b72274fb0fcc | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.11"} |
- | 077c63b6-0184-4bf7-b3aa- | dhcp_port_5ab92c3c-b799-451c- | fa:16:3e:d2:a3:53 | {"subnet_id": "5ab92c3c-b799-451c- |
- | b071de6f39be | b5d5-b72274fb0fcc | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.2"} |
- | c90be7bc- | interface_RegionOne_5ab92c3c-b799 | fa:16:3e:b6:e4:bc | {"subnet_id": "5ab92c3c-b799-451c- |
- | 31ea-4015-a432-2bef62e343d1 | -451c-b5d5-b72274fb0fcc | | b5d5-b72274fb0fcc", "ip_address": |
- | | | | "10.0.3.9"} |
- | 3053fcb9-b6ad-4a9c-b89e- | bridge_port_532890c765604609a8d2ef | fa:16:3e:fc:d0:fc | {"subnet_id": "53def0ac-59ef- |
- | ffe6aff6523b | 6fc8e5f6ef_0c4faa42-5230-4adc- | | 4c7b-b694-3375598954da", |
- | | bab5-10ee53ebf888 | | "ip_address": "100.0.0.11"} |
- | 5a10c53f-1f8f-43c1-a61c- | bridge_port_532890c765604609a8d2ef | fa:16:3e:dc:f7:4a | {"subnet_id": "53def0ac-59ef- |
- | 6cdbd052985e | 6fc8e5f6ef_cf71a43d-6df1-491d- | | 4c7b-b694-3375598954da", |
- | | 894d-bd2e6620acfc | | "ip_address": "100.0.0.8"} |
- | ce787983-a140-4c53-96d2-71f62e1545 | | fa:16:3e:1a:62:7f | {"subnet_id": "a2eecc16-deb8-42a6 |
- | 3a | | | -a41b-5058847ed20a", "ip_address": |
- | | | | "163.3.124.5"} |
- | 2d9fc640-1858-4c7e-b42c- | | fa:16:3e:00:7c:6e | {"subnet_id": "a2eecc16-deb8-42a6 |
- | d3ed3f338b8a | | | -a41b-5058847ed20a", "ip_address": |
- | | | | "163.3.124.6"} |
- | bfd53cea-6135-4515-ae63-f346125335 | | fa:16:3e:ae:81:6f | {"subnet_id": "9fb32423-95a8-4589 |
- | 27 | | | -b69c-e2955234ae56", "ip_address": |
- | | | | "163.3.125.3"} |
- | 12495d5b-5346-48d0-8ed2-daea6ad42a | | fa:16:3e:d4:83:cc | {"subnet_id": "9fb32423-95a8-4589 |
- | 3a | | | -b69c-e2955234ae56", "ip_address": |
- | | | | "163.3.125.4"} |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
-
-Associate the floating IP to the instance2's IP address in net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-associate e0dcbe62-0023-41a8-a099-a4c4b5285e03 6b0fe2e0-a236-40db-bcbf-2f31f7124d83
- Associated floating IP e0dcbe62-0023-41a8-a099-a4c4b5285e03
-
-Make sure the floating IP association works.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | 645f9cd6-d8d4-427a-88fe-770240c96d09 | 10.0.1.13 | 163.3.124.6 | 185b5185-0254-486c-9d8b-198af4b4d40e |
- | e0dcbe62-0023-41a8-a099-a4c4b5285e03 | 10.0.2.3 | 163.3.125.4 | 6b0fe2e0-a236-40db-bcbf-2f31f7124d83 |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
-You can verify that in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=RegionTwo floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | b8a6b83a-cc8f-4335-894c-ef71e7504ee1 | 10.0.2.3 | 163.3.125.4 | 6b0fe2e0-a236-40db-bcbf-2f31f7124d83 |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
-Instance1 can ping instance2 through the IP address in the net3, and vice versa.
-
-Note: Not all images will bring up the second nic, so you can ssh into
-instance1 or instance2, use ifconfig -a to check whether all NICs are created,
-and bring up all NICs if necessary.
diff --git a/doc/source/networking/networking-guide-multiple-ns-with-ew-enabled.rst b/doc/source/networking/networking-guide-multiple-ns-with-ew-enabled.rst
deleted file mode 100644
index a7455301..00000000
--- a/doc/source/networking/networking-guide-multiple-ns-with-ew-enabled.rst
+++ /dev/null
@@ -1,1263 +0,0 @@
-===============================================================
-Multiple North-South gateways with East-West Networking enabled
-===============================================================
-
-The following figure illustrates another typical networking mode.
-In multi-region cloud deployment, a requirement is that each OpenStack
-cloud provides external network, north-south traffic is expected to be
-handled locally for shortest path, and/or use multiple external networks
-to ensure application north-south traffic redundancy, at the same time
-east-west networking of tenant's networks among OpenStack clouds is also
-needed.
-
-.. code-block:: console
-
- +-------------------------------+ +------------------------------+
- | RegionOne ext-net1 | | RegionTwo ext-net2 |
- | +-------+ | | +--+---+ |
- | | | | | |
- | +---+-------+ | | +---------------+--+ |
- | | R1 | | | | R2 | |
- | +--+--+-----+ | | +-------+-----+----+ |
- | net1 | | | | | | net3 |
- | +-+-------+--++ | | | | +-++----+----+ |
- | | | | net2 | | net4 | | | |
- | +---+-----+ | ++-+---+--+ | | +-+-----+---++ | +--+------+ |
- | |Instance1| | | | | | | | | |Instance3| |
- | +---------+ | | +---+-----+ | | +-+-------+ | | +---------+ |
- | | | |Instance2| | | |Instance4| | | |
- | | | +---------+ | | +---------+ | | |
- | +----+---+--------+ | bridge-net | +----------+--+-----+ |
- | | R3(1) +---------------------+ R3(2) | |
- | +-----------------+ | | +-------------------+ |
- +-------------------------------+ +------------------------------+
-
-The logical topology to be composed in Tricircle is as follows. R3(1), R3(2)
-and bridge-net will be one logical router R3, and R3 is only for cross
-Neutron east-west traffic. North-south traffic of net1, net2 will go
-through R1, north-south traffic of net3, net4 will go through R2.
-
-.. code-block:: console
-
- ext-net1 ext-net2
- +-------+ +--+---+
- | |
- +---+----------+ +---------------+--+
- | R1(RegionOne)| | R2(RegionTwo) |
- +--+--+--------+ +-------+-----+----+
- net1(RegionOne) | | | | net3(RegionTwo)
- +-+-------+--++ | net4 | +-++----+----+
- | | | net2(RegionOne) (RegionTwo) | | |
- +---+-----+ | ++-+---+--+ +-+-----+---++ | +--+------+
- |Instance1| | | | | | | |Instance3|
- +---------+ | | +---+-----+ +-+-------+ | | +---------+
- | | |Instance2| |Instance4| | |
- | | +---------+ +---------+ | |
- +----+---+-----------------------------------------+--+-----+
- | R3(RegionOne,RegionTwo) |
- +-----------------------------------------------------------+
-
-
-How to create this network topology
-===================================
-
-Create external network ext-net1, which will be located in RegionOne.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionOne ext-net1
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | ff7375f3-5bc6-4349-b097-72e42a90648a |
- | name | ext-net1 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- +---------------------------+--------------------------------------+
-
-Create subnet in ext-net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet1 --disable-dhcp ext-net1 163.3.3.0/24
- +-------------------+----------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------+
- | allocation_pools | {"start": "163.3.3.2", "end": "163.3.3.254"} |
- | cidr | 163.3.3.0/24 |
- | created_at | 2017-04-19T06:04:07Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.3.1 |
- | host_routes | |
- | id | 3d0cfacc-ce90-4924-94b9-a95d567568b9 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet1 |
- | network_id | ff7375f3-5bc6-4349-b097-72e42a90648a |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T06:04:07Z |
- +-------------------+----------------------------------------------+
-
-Create router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne R1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | availability_zones | |
- | created_at | 2017-04-19T06:04:37Z |
- | description | |
- | distributed | False |
- | external_gateway_info | |
- | id | a665d383-bb0b-478a-b4c7-d0b316a01806 |
- | name | R1 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 4 |
- | status | ACTIVE |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T06:04:37Z |
- +-------------------------+--------------------------------------+
-
-Set the router gateway to ext-net1 for R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R1 ext-net1
- Set gateway for router R1
-
- $ neutron --os-region-name=CentralRegion router-show R1
- +-------------------------+----------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-------------------------+----------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | availability_zones | |
- | created_at | 2017-04-19T06:04:37Z |
- | description | |
- | distributed | False |
- | external_gateway_info | {"network_id": "ff7375f3-5bc6-4349-b097-72e42a90648a", "external_fixed_ips": [{"subnet_id": "3d0cfacc- |
- | | ce90-4924-94b9-a95d567568b9", "ip_address": "163.3.3.7"}]} |
- | id | a665d383-bb0b-478a-b4c7-d0b316a01806 |
- | name | R1 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 6 |
- | status | ACTIVE |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T06:05:11Z |
- +-------------------------+----------------------------------------------------------------------------------------------------------+
-
-Create local network net1 which will reside in RegionOne. You can use
-RegionOne as the value of availability-zone-hint to create a local network.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | bbc5527d-25a5-4ea1-9ef6-47e7dca9029b |
- | name | net1 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net1.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion subnet create --network=net1 --subnet-range 10.0.1.0/24 subnet-net1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.1.2-10.0.1.254 |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-04-19T06:16:32Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | |
- | id | b501197b-53c8-44a6-8e4a-ee36260da239 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net1 |
- | network_id | bbc5527d-25a5-4ea1-9ef6-47e7dca9029b |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T06:16:32Z |
- +-------------------+--------------------------------------+
-
-Add this subnet to router R1.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion router add subnet R1 subnet-net1
-
-Create local network net2 which will reside in RegionOne. You can use
-RegionOne as the value of availability-zone-hint to create a local network.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion network create --availability-zone-hint=RegionOne net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | RegionOne |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 3779cfd5-790c-43a7-9231-ed473789dc93 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | None |
- | name | net2 |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | local |
- | provider:physical_network | None |
- | provider:segmentation_id | None |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net2.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion subnet create --network=net2 --subnet-range 10.0.2.0/24 subnet-net2
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.2.2-10.0.2.254 |
- | cidr | 10.0.2.0/24 |
- | created_at | 2017-04-19T06:28:19Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.2.1 |
- | host_routes | |
- | id | d0222001-e80f-49c3-9f0a-7f3688843e66 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net2 |
- | network_id | 3779cfd5-790c-43a7-9231-ed473789dc93 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T06:28:19Z |
- +-------------------+--------------------------------------+
-
-Add this subnet to router R1.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion router add subnet R1 subnet-net2
-
-Create external network ext-net2, which will be located in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionTwo ext-net2
-
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | 6f0f139d-6857-45f5-925d-419b5f896c2a |
- | name | ext-net2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- +---------------------------+--------------------------------------+
-
-Create subnet in ext-net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet2 --disable-dhcp ext-net2 163.3.5.0/24
- +-------------------+----------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------+
- | allocation_pools | {"start": "163.3.5.2", "end": "163.3.5.254"} |
- | cidr | 163.3.5.0/24 |
- | created_at | 2017-04-19T07:00:01Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.5.1 |
- | host_routes | |
- | id | 7680acd4-db7c-44f0-bf7d-6f76e2de5778 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet2 |
- | network_id | 6f0f139d-6857-45f5-925d-419b5f896c2a |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T07:00:01Z |
- +-------------------+----------------------------------------------+
-
-Create router R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionTwo R2
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | availability_zones | |
- | created_at | 2017-04-19T07:00:31Z |
- | description | |
- | distributed | False |
- | external_gateway_info | |
- | id | 643cc4ec-cdd5-4b14-bcc6-328b86035d50 |
- | name | R2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 4 |
- | status | ACTIVE |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T07:00:31Z |
- +-------------------------+--------------------------------------+
-
-Set the router gateway to ext-net2 for R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R2 ext-net2
- Set gateway for router R2
-
- $ neutron --os-region-name=CentralRegion router-show R2
- +-------------------------+----------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-------------------------+----------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | availability_zones | |
- | created_at | 2017-04-19T07:00:31Z |
- | description | |
- | distributed | False |
- | external_gateway_info | {"network_id": "6f0f139d-6857-45f5-925d-419b5f896c2a", "external_fixed_ips": [{"subnet_id": "7680acd4 |
- | | -db7c-44f0-bf7d-6f76e2de5778", "ip_address": "163.3.5.10"}]} |
- | id | 643cc4ec-cdd5-4b14-bcc6-328b86035d50 |
- | name | R2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 6 |
- | status | ACTIVE |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T07:00:54Z |
- +-------------------------+----------------------------------------------------------------------------------------------------------+
-
-Create local network net3 which will reside in RegionTwo. You can use
-RegionTwo as the value of availability-zone-hint to create a local network.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion network create --availability-zone-hint=RegionTwo net3
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | RegionTwo |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | a914edd9-629e-41bd-98ef-ec52736aeaa2 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | None |
- | name | net3 |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | local |
- | provider:physical_network | None |
- | provider:segmentation_id | None |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net3.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion subnet create --network=net3 --subnet-range 10.0.3.0/24 subnet-net3
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.3.2-10.0.3.254 |
- | cidr | 10.0.3.0/24 |
- | created_at | 2017-04-19T07:15:46Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.3.1 |
- | host_routes | |
- | id | a2582af0-ab39-43e7-8b23-f2911804633b |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net3 |
- | network_id | a914edd9-629e-41bd-98ef-ec52736aeaa2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:15:46Z |
- +-------------------+--------------------------------------+
-
-Add this subnet to router R2.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion router add subnet R2 subnet-net3
-
-
-Create local network net4 which will reside in RegionTwo. You can use
-RegionTwo as the value of availability-zone-hint to create a local network.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion network create --availability-zone-hint=RegionTwo net4
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | RegionTwo |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | 60c2e42a-3875-4d11-9850-59148aee24c2 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | None |
- | name | net4 |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | provider:network_type | local |
- | provider:physical_network | None |
- | provider:segmentation_id | None |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
-
-Create a subnet in net4.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion subnet create --network=net4 --subnet-range 10.0.4.0/24 subnet-net4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.4.2-10.0.4.254 |
- | cidr | 10.0.4.0/24 |
- | created_at | 2017-04-19T07:19:25Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.4.1 |
- | host_routes | |
- | id | 5a76080f-efe5-4890-855e-56bd9068c6cf |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net4 |
- | network_id | 60c2e42a-3875-4d11-9850-59148aee24c2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:19:25Z |
- +-------------------+--------------------------------------+
-
-Add this subnet to router R2.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion router add subnet R2 subnet-net4
-
-Create router R3 in RegionOne and RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne --availability-zone-hint RegionTwo R3
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | | RegionTwo |
- | availability_zones | |
- | created_at | 2017-04-19T07:21:37Z |
- | description | |
- | distributed | False |
- | external_gateway_info | |
- | id | 01fb7cf9-7b24-486f-8170-0282ebe2fc06 |
- | name | R3 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 4 |
- | status | ACTIVE |
- | tags | |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated_at | 2017-04-19T07:21:37Z |
- +-------------------------+--------------------------------------+
-
-Create port in net1 and attach net1 to R3 using this port.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion port create --network=net1 net1-R3-interface
- +-----------------------+-------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------+
- | admin_state_up | UP |
- | allowed_address_pairs | None |
- | binding_host_id | None |
- | binding_profile | None |
- | binding_vif_details | None |
- | binding_vif_type | None |
- | binding_vnic_type | None |
- | created_at | 2017-04-19T07:22:40Z |
- | description | |
- | device_id | |
- | device_owner | |
- | dns_assignment | None |
- | dns_name | None |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='10.0.1.8', subnet_id='b501197b-53c8-44a6-8e4a-ee36260da239' |
- | id | 53b28b73-9aaf-4432-9c11-24243a92c931 |
- | ip_address | None |
- | mac_address | fa:16:3e:1e:c7:fe |
- | name | net1-R3-interface |
- | network_id | bbc5527d-25a5-4ea1-9ef6-47e7dca9029b |
- | option_name | None |
- | option_value | None |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | security_groups | dee6ea7c-eec5-426a-9385-c40d00565a3a |
- | status | ACTIVE |
- | subnet_id | None |
- | updated_at | 2017-04-19T07:22:40Z |
- +-----------------------+-------------------------------------------------------------------------+
-
- $ openstack --os-region-name=CentralRegion router add port R3 net1-R3-interface
-
-Create port in net2 and attach net2 to R3 using this port.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion port create --network=net2 net2-R3-interface
- +-----------------------+-------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------+
- | admin_state_up | UP |
- | allowed_address_pairs | None |
- | binding_host_id | None |
- | binding_profile | None |
- | binding_vif_details | None |
- | binding_vif_type | None |
- | binding_vnic_type | None |
- | created_at | 2017-04-19T07:24:07Z |
- | description | |
- | device_id | |
- | device_owner | |
- | dns_assignment | None |
- | dns_name | None |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='10.0.2.5', subnet_id='d0222001-e80f-49c3-9f0a-7f3688843e66' |
- | id | a0d7a00b-db0b-48e8-9ec4-62a7aa15de98 |
- | ip_address | None |
- | mac_address | fa:16:3e:1c:e4:10 |
- | name | net2-R3-interface |
- | network_id | 3779cfd5-790c-43a7-9231-ed473789dc93 |
- | option_name | None |
- | option_value | None |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | security_groups | dee6ea7c-eec5-426a-9385-c40d00565a3a |
- | status | ACTIVE |
- | subnet_id | None |
- | updated_at | 2017-04-19T07:24:07Z |
- +-----------------------+-------------------------------------------------------------------------+
-
- $ openstack --os-region-name=CentralRegion router add port R3 net2-R3-interface
-
-Create port in net3 and attach net3 to R3 using this port.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion port create --network=net3 net3-R3-interface
- +-----------------------+--------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------------------------------------------+
- | admin_state_up | UP |
- | allowed_address_pairs | None |
- | binding_host_id | None |
- | binding_profile | None |
- | binding_vif_details | None |
- | binding_vif_type | None |
- | binding_vnic_type | None |
- | created_at | 2017-04-19T07:25:21Z |
- | description | |
- | device_id | |
- | device_owner | |
- | dns_assignment | None |
- | dns_name | None |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='10.0.3.11', subnet_id='a2582af0-ab39-43e7-8b23-f2911804633b' |
- | id | 95a73056-e75c-46cf-911e-a979bc46f2c4 |
- | ip_address | None |
- | mac_address | fa:16:3e:0d:a3:be |
- | name | net3-R3-interface |
- | network_id | a914edd9-629e-41bd-98ef-ec52736aeaa2 |
- | option_name | None |
- | option_value | None |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | security_groups | dee6ea7c-eec5-426a-9385-c40d00565a3a |
- | status | ACTIVE |
- | subnet_id | None |
- | updated_at | 2017-04-19T07:25:21Z |
- +-----------------------+--------------------------------------------------------------------------+
-
- $ openstack --os-region-name=CentralRegion router add port R3 net3-R3-interface
-
-Create port in net4 and attach net4 to R3 using this port.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion port create --network=net4 net4-R3-interface
- +-----------------------+-------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------+
- | admin_state_up | UP |
- | allowed_address_pairs | None |
- | binding_host_id | None |
- | binding_profile | None |
- | binding_vif_details | None |
- | binding_vif_type | None |
- | binding_vnic_type | None |
- | created_at | 2017-04-19T07:26:18Z |
- | description | |
- | device_id | |
- | device_owner | |
- | dns_assignment | None |
- | dns_name | None |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='10.0.4.4', subnet_id='5a76080f-efe5-4890-855e-56bd9068c6cf' |
- | id | 2d29593b-ad4a-4904-9053-9dbdddfcfc05 |
- | ip_address | None |
- | mac_address | fa:16:3e:df:4c:d0 |
- | name | net4-R3-interface |
- | network_id | 60c2e42a-3875-4d11-9850-59148aee24c2 |
- | option_name | None |
- | option_value | None |
- | port_security_enabled | False |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | security_groups | dee6ea7c-eec5-426a-9385-c40d00565a3a |
- | status | ACTIVE |
- | subnet_id | None |
- | updated_at | 2017-04-19T07:26:18Z |
- +-----------------------+-------------------------------------------------------------------------+
-
- $ openstack --os-region-name=CentralRegion router add port R3 net4-R3-interface
-
-Now the networking topology has been composed. Just boot instances in different network.
-
-List the available images in RegionOne.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionOne image-list
-
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 1f87a3d9-9de1-47c9-bae3-9d1c02ec6ea1 | cirros-0.3.4-x86_64-uec |
- | be37ca60-aaa1-4b6f-854e-1610be8fc32a | cirros-0.3.4-x86_64-uec-kernel |
- | ea820854-2655-4aff-b6b3-8ca234bb8c85 | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List the available flavors in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance1 in RegionOne, and connect this instance to net1.
-
-.. code-block:: console
-
- $ regionone_image_id=$(openstack --os-region-name=RegionOne image list | awk 'NR==4 {print $2}')
- $ net1_id=$(openstack --os-region-name=CentralRegion network show net1 -f value -c id)
- $ nova --os-region-name=RegionOne boot --flavor 1 --image $regionone_image_id --nic net-id=$net1_id instance1
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | be37ca60-aaa1-4b6f-854e-1610be8fc32a |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | ea820854-2655-4aff-b6b3-8ca234bb8c85 |
- | OS-EXT-SRV-ATTR:reservation_id | r-2dutlvrz |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | u9D7s45DxcaP |
- | config_drive | |
- | created | 2017-04-19T07:43:27Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 94d25a05-81e7-4c71-bcf5-5953a225574a |
- | image | cirros-0.3.4-x86_64-uec (1f87a3d9-9de1-47c9-bae3-9d1c02ec6ea1) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated | 2017-04-19T07:43:27Z |
- | user_id | 76ae1ba819994f37a0ca2563641421da |
- +--------------------------------------+----------------------------------------------------------------+
-
-Boot instance2 in RegionOne, and connect this instance to net2.
-
-.. code-block:: console
-
- $ regionone_image_id=$(openstack --os-region-name=RegionOne image list | awk 'NR==4 {print $2}')
- $ net2_id=$(openstack --os-region-name=CentralRegion network show net2 -f value -c id)
- $ nova --os-region-name=RegionOne boot --flavor 1 --image $regionone_image_id --nic net-id=$net2_id instance2
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance2 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | be37ca60-aaa1-4b6f-854e-1610be8fc32a |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | ea820854-2655-4aff-b6b3-8ca234bb8c85 |
- | OS-EXT-SRV-ATTR:reservation_id | r-n0qb1dot |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | YSB6w9Yb6dZb |
- | config_drive | |
- | created | 2017-04-19T07:45:36Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 2d53907a-8de9-4c8f-a330-28e5057e1ce5 |
- | image | cirros-0.3.4-x86_64-uec (1f87a3d9-9de1-47c9-bae3-9d1c02ec6ea1) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance2 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated | 2017-04-19T07:45:35Z |
- | user_id | 76ae1ba819994f37a0ca2563641421da |
- +--------------------------------------+----------------------------------------------------------------+
-
-List the available images in RegionTwo.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionTwo image-list
- +--------------------------------------+--------------------------+
- | ID | Name |
- +--------------------------------------+--------------------------+
- | f5100ea6-f4c9-4e79-b5fc-96a4b6c6dcd2 | cirros-0.3.5-x86_64-disk |
- +--------------------------------------+--------------------------+
-
-List the available flavors in RegionTwo.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance3 in RegionTwo, and connect this instance to net3.
-
-.. code-block:: console
-
- $ regiontwo_image_id=$(openstack --os-region-name=RegionTwo image list | awk 'NR==4 {print $2}')
- $ net3_id=$(openstack --os-region-name=CentralRegion network show net3 -f value -c id)
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image $regiontwo_image_id --nic net-id=$net3_id instance3
- +--------------------------------------+-----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance3 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | |
- | OS-EXT-SRV-ATTR:reservation_id | r-3tokqjyn |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | 6WPPfJg2uyp4 |
- | config_drive | |
- | created | 2017-04-19T07:57:17Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | d60a0fd6-15d7-4220-92e6-4a5b71e10f34 |
- | image | cirros-0.3.5-x86_64-disk (f5100ea6-f4c9-4e79-b5fc-96a4b6c6dcd2) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance3 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated | 2017-04-19T07:57:17Z |
- | user_id | 76ae1ba819994f37a0ca2563641421da |
- +--------------------------------------+-----------------------------------------------------------------+
-
-Boot instance4 in RegionTwo, and connect this instance to net4.
-
-.. code-block:: console
-
- $ regiontwo_image_id=$(openstack --os-region-name=RegionTwo image list | awk 'NR==4 {print $2}')
- $ net4_id=$(openstack --os-region-name=CentralRegion network show net4 -f value -c id)
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image $regiontwo_image_id --nic net-id=$net4_id instance4
- +--------------------------------------+-----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+-----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance4 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | |
- | OS-EXT-SRV-ATTR:reservation_id | r-d0i6qz01 |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | QzU8ttUdiTEk |
- | config_drive | |
- | created | 2017-04-19T07:57:47Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | 8746772e-889b-4132-9d6b-9fb44350f336 |
- | image | cirros-0.3.5-x86_64-disk (f5100ea6-f4c9-4e79-b5fc-96a4b6c6dcd2) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance4 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | a79642b4c1674be1b306d8c436d07793 |
- | updated | 2017-04-19T07:57:47Z |
- | user_id | 76ae1ba819994f37a0ca2563641421da |
- +--------------------------------------+-----------------------------------------------------------------+
-
-Check to see if all instances are booted successfully.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne list
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | 94d25a05-81e7-4c71-bcf5-5953a225574a | instance1 | ACTIVE | - | Running | net1=10.0.1.7 |
- | 2d53907a-8de9-4c8f-a330-28e5057e1ce5 | instance2 | ACTIVE | - | Running | net2=10.0.2.9 |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
-
- $ nova --os-region-name=RegionTwo list
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
- | d60a0fd6-15d7-4220-92e6-4a5b71e10f34 | instance3 | ACTIVE | - | Running | net3=10.0.3.6 |
- | 8746772e-889b-4132-9d6b-9fb44350f336 | instance4 | ACTIVE | - | Running | net4=10.0.4.14 |
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
-
-Check to see if the east-west routes were set correctly.
-
-.. code-block:: console
-
- $ openstack --os-region-name=RegionOne subnet show subnet-net1
- +-------------------+-----------------------------------------------+
- | Field | Value |
- +-------------------+-----------------------------------------------+
- | allocation_pools | 10.0.1.2-10.0.1.254 |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-04-19T06:24:11Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | destination='10.0.3.0/24', gateway='10.0.1.6' |
- | | destination='10.0.4.0/24', gateway='10.0.1.6' |
- | id | b501197b-53c8-44a6-8e4a-ee36260da239 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net1 |
- | network_id | bbc5527d-25a5-4ea1-9ef6-47e7dca9029b |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 11 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:58:25Z |
- +-------------------+-----------------------------------------------+
-
- $ openstack --os-region-name=RegionOne subnet show subnet-net2
- +-------------------+------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------+
- | allocation_pools | 10.0.2.2-10.0.2.254 |
- | cidr | 10.0.2.0/24 |
- | created_at | 2017-04-19T06:29:50Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.2.1 |
- | host_routes | destination='10.0.3.0/24', gateway='10.0.2.12' |
- | | destination='10.0.4.0/24', gateway='10.0.2.12' |
- | id | d0222001-e80f-49c3-9f0a-7f3688843e66 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net2 |
- | network_id | 3779cfd5-790c-43a7-9231-ed473789dc93 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 10 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:58:28Z |
- +-------------------+------------------------------------------------+
-
- $ openstack --os-region-name=RegionTwo subnet show subnet-net3
- +-------------------+-----------------------------------------------+
- | Field | Value |
- +-------------------+-----------------------------------------------+
- | allocation_pools | 10.0.3.2-10.0.3.254 |
- | cidr | 10.0.3.0/24 |
- | created_at | 2017-04-19T07:17:46Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.3.1 |
- | host_routes | destination='10.0.1.0/24', gateway='10.0.3.4' |
- | | destination='10.0.2.0/24', gateway='10.0.3.4' |
- | id | a2582af0-ab39-43e7-8b23-f2911804633b |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net3 |
- | network_id | a914edd9-629e-41bd-98ef-ec52736aeaa2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 9 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:58:30Z |
- +-------------------+-----------------------------------------------+
-
- $ openstack --os-region-name=RegionTwo subnet show subnet-net4
- +-------------------+------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------+
- | allocation_pools | 10.0.4.2-10.0.4.254 |
- | cidr | 10.0.4.0/24 |
- | created_at | 2017-04-19T07:20:39Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.4.1 |
- | host_routes | destination='10.0.1.0/24', gateway='10.0.4.13' |
- | | destination='10.0.2.0/24', gateway='10.0.4.13' |
- | id | 5a76080f-efe5-4890-855e-56bd9068c6cf |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-net4 |
- | network_id | 60c2e42a-3875-4d11-9850-59148aee24c2 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 8 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2017-04-19T07:58:32Z |
- +-------------------+------------------------------------------------+
-
-
-Create a floating IP and associate it to instance1. The port id in local Neutron
-is same as that in central Neutron, because there is no Nova in CentralRegion,
-the command to get port id and store it in environment variable is issued to
-RegionOne or RegionTwo. You can also get the port id manually if you issue
-command to CentralRegion without --server being specified.
-
-.. code-block:: console
-
- $ instance1_net1_port_id=$(openstack --os-region-name=RegionOne port list --network net1 --server instance1 -f value -c ID)
- $ openstack --os-region-name=CentralRegion floating ip create --port=$instance1_net1_port_id ext-net1
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-04-19T08:29:45Z |
- | description | |
- | fixed_ip_address | 10.0.1.7 |
- | floating_ip_address | 163.3.3.13 |
- | floating_network_id | ff7375f3-5bc6-4349-b097-72e42a90648a |
- | id | 7af7503a-5b9b-441c-bf90-bdce47cf1e16 |
- | name | None |
- | port_id | 588e9a9c-67f8-47b5-ab3b-8f5f93f00c15 |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 1 |
- | router_id | a665d383-bb0b-478a-b4c7-d0b316a01806 |
- | status | DOWN |
- | updated_at | 2017-04-19T08:29:45Z |
- +---------------------+--------------------------------------+
-
- $ instance3_net3_port_id=$(openstack --os-region-name=RegionTwo port list --network net3 --server instance3 -f value -c ID)
- $ openstack --os-region-name=CentralRegion floating ip create --port=$instance3_net3_port_id ext-net2
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-04-19T08:32:09Z |
- | description | |
- | fixed_ip_address | 10.0.3.6 |
- | floating_ip_address | 163.3.5.4 |
- | floating_network_id | 6f0f139d-6857-45f5-925d-419b5f896c2a |
- | id | f84c92eb-a724-4658-bae9-09e140f59705 |
- | name | None |
- | port_id | 72d062e1-8a30-4c3e-a2f2-6414200c135a |
- | project_id | a79642b4c1674be1b306d8c436d07793 |
- | revision_number | 1 |
- | router_id | 643cc4ec-cdd5-4b14-bcc6-328b86035d50 |
- | status | DOWN |
- | updated_at | 2017-04-19T08:32:09Z |
- +---------------------+--------------------------------------+
-
-
-Create a security group and add security group rule to allow ping, ssh to instance1 and instance3.
-
-.. code-block:: console
-
- $ openstack --os-region-name=CentralRegion security group create icmpssh
- $ openstack --os-region-name=CentralRegion security group rule create --protocol icmp icmpssh
- $ openstack --os-region-name=CentralRegion security group rule create --protocol tcp --dst-port 22:22 icmpssh
- $ neutron --os-region-name=CentralRegion port-update --security-group=icmpssh $instance1_net1_port_id
- $ neutron --os-region-name=CentralRegion port-update --security-group=icmpssh $instance3_net3_port_id
-
-N-S and E-W networking can work now. Use network name space to do the test,
-because instance1 and instance3 have been allowed with security
-group rule for ssh and ping, instance1 and instance3 in different subnets
-from different clouds can ping each other. And if you check the route in
-each instance, the default N-S gateway is R1 or R2.
-
-.. code-block:: console
-
- $ openstack --os-region-name=RegionOne subnet list
- +--------------------------------------+--------------------------------------+--------------------------------------+--------------+
- | ID | Name | Network | Subnet |
- +--------------------------------------+--------------------------------------+--------------------------------------+--------------+
- | 3d0cfacc-ce90-4924-94b9-a95d567568b9 | 3d0cfacc-ce90-4924-94b9-a95d567568b9 | ff7375f3-5bc6-4349-b097-72e42a90648a | 163.3.3.0/24 |
- | b501197b-53c8-44a6-8e4a-ee36260da239 | subnet-net1 | bbc5527d-25a5-4ea1-9ef6-47e7dca9029b | 10.0.1.0/24 |
- | d0222001-e80f-49c3-9f0a-7f3688843e66 | subnet-net2 | 3779cfd5-790c-43a7-9231-ed473789dc93 | 10.0.2.0/24 |
- | d0dc980f-e21e-4d97-b397-6e9067ca3ee4 | d0dc980f-e21e-4d97-b397-6e9067ca3ee4 | 73633eb0-7771-410a-82c1-942f5c7a9726 | 100.0.0.0/24 |
- +--------------------------------------+--------------------------------------+--------------------------------------+--------------+
-
- $ ip netns
- qrouter-fb892c30-6368-4595-9194-fa8933bc65cc
- qdhcp-3779cfd5-790c-43a7-9231-ed473789dc93
- qdhcp-bbc5527d-25a5-4ea1-9ef6-47e7dca9029b
- qrouter-7f8aa44e-df15-4737-92be-58fde99e14c6
-
- $ sudo ip netns exec qdhcp-bbc5527d-25a5-4ea1-9ef6-47e7dca9029b ping 10.0.1.7
-
- $ sudo ip netns exec qdhcp-bbc5527d-25a5-4ea1-9ef6-47e7dca9029b ssh cirros@10.0.1.7
-
- In instance1:
-
- $ route
- Kernel IP routing table
- Destination Gateway Genmask Flags Metric Ref Use Iface
- default host-10-0-1-1.o 0.0.0.0 UG 0 0 0 eth0
- 10.0.1.0 * 255.255.255.0 U 0 0 0 eth0
- 10.0.3.0 host-10-0-1-6.o 255.255.255.0 UG 0 0 0 eth0
- 10.0.4.0 host-10-0-1-6.o 255.255.255.0 UG 0 0 0 eth0
- 169.254.169.254 host-10-0-1-1.o 255.255.255.255 UGH 0 0 0 eth0
-
- $ ping 10.0.3.6
-
- $ ssh cirros@10.0.3.6
-
- In instance3:
-
- $ ping 10.0.1.7
diff --git a/doc/source/networking/networking-guide-newL3-using-segment.rst b/doc/source/networking/networking-guide-newL3-using-segment.rst
deleted file mode 100644
index 84ee8b11..00000000
--- a/doc/source/networking/networking-guide-newL3-using-segment.rst
+++ /dev/null
@@ -1,228 +0,0 @@
-================================================================
-How to use the new layer-3 networking model for multi-NS-with-EW
-================================================================
-
-The following figure illustrates the new layer-3 networking model for multi-NS-with-EW::
-
- ext-net1 ext-net2
-
- +---+---+ +---+---+
- | |
- +---+---+ +---+---+
- | R1 | | R2 |
- +---+---+ +---+---+
- | |
- +---+--------------------+---+
- | bridge-net |
- +-------------+--------------+
- |
- |
- +-------------+--------------+
- | R3 |
- +---+--------------------+---+
- | net1 net2 |
- +---+-----+-+ +---+-+---+
- | |
- +---------+-+ +--+--------+
- | Instance1 | | Instance2 |
- +-----------+ +-----------+
-
- Figure 1 Logical topology in central Neutron
-
-As shown in Fig. 1, each external network(i.e., ext-net1, ext-net2) will connect to a Router(i.e., R1, R2).
-These routers will take charge of routing NS traffic and connect with the logical(non-local) router through
-bridge network. This is the networking model in the spec [1]_, a routed network is using to manage the
-external networks in central Neutron.
-
-When we create a logical router(i.e., R3) in central Neutron, Tricircle will create local router in each region.
-Then attach the network(i.e, net1, net2) to central router(i.e, R3), this router will take charge of all
-traffic (no matter NS or EW traffic).
-
-For EW traffic, from net1 to net2, R3(in net1's region) will forwards packets to the
-interface of net2 in R3(in net2's region) router namespace. For NS traffic, R3 forwards
-packets to the interface of an available local router (i.e., R1 or R2)
-which attached to the real external network.
-
-More details in the specs of A New Layer-3 Networking multi-NS-with-EW-enabled [1]
-
-How to use segment for managing multiple networks in this network topology
-==========================================================================
-
-1. Enable the configuration of enable_l3_route_network in /tricircle/network/central_plugin.py
-
-.. code-block:: console
-
- cfg.BoolOpt('enable_l3_route_network',
- default=True,
- help=_('Whether using new l3 networking model. When it is'
- 'set True, Tricircle will create a local router'
- 'automatically after creating an external network'))
-
-2. Add segment plugin in /etc/neutron/neutron.conf.0
-
-.. code-block:: console
-
- service_plugins = tricircle.network.segment_plugin.TricircleSegmentPlugin
-
-Now we start to create segments and subnetworks.
-
-.. code-block:: console
-
- stack@stack-atom:~/devstack$ openstack multiregion networking pod list
- stack@stack-atom:~/devstack$ openstack multiregion networking pod create --region-name CentralRegion
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | az_name | |
- | dc_name | |
- | pod_az_name | |
- | pod_id | f2f5757d-350f-4278-91a4-3baca12ebccc |
- | region_name | CentralRegion |
- +-------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack multiregion networking pod create --region-name RegionOne --availability-zone az1
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | az_name | az1 |
- | dc_name | |
- | pod_az_name | |
- | pod_id | 7c34177a-a210-4edc-a5ca-b9615a7061b3 |
- | region_name | RegionOne |
- +-------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network create --share --provider-physical-network extern --provider-network-type vlan --provider-segment 3005 multisegment
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | None |
- | created_at | None |
- | description | None |
- | dns_domain | None |
- | id | e848d653-e777-4715-9596-bd0427d9fd27 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | is_vlan_transparent | None |
- | location | None |
- | mtu | None |
- | name | multisegment |
- | port_security_enabled | False |
- | project_id | 1f31124fadd247f18098a20a6da207ec |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 3005 |
- | qos_policy_id | None |
- | revision_number | None |
- | router:external | Internal |
- | segments | None |
- | shared | True |
- | status | ACTIVE |
- | subnets | |
- | tags | |
- | updated_at | None |
- +---------------------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network segment create --physical-network extern --network-type vlan --segment 3005 --network multisegment newl3-RegionOne-sgmtnet01
- +------------------+--------------------------------------+
- | Field | Value |
- +------------------+--------------------------------------+
- | description | |
- | id | 802ccc73-1c99-455e-858a-1c19d77d1995 |
- | location | None |
- | name | newl3-RegionOne-sgmtnet01 |
- | network_id | e848d653-e777-4715-9596-bd0427d9fd27 |
- | network_type | vlan |
- | physical_network | extern |
- | segmentation_id | 3005 |
- +------------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network list
- +--------------------------------------+---------------------------+---------+
- | ID | Name | Subnets |
- +--------------------------------------+---------------------------+---------+
- | 5596d53f-d6ed-4ac5-9722-ad7e3e82e187 | newl3-RegionOne-sgmtnet01 | |
- | e848d653-e777-4715-9596-bd0427d9fd27 | multisegment | |
- +--------------------------------------+---------------------------+---------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=RegionOne network list
- +--------------------------------------+---------------------------+---------+
- | ID | Name | Subnets |
- +--------------------------------------+---------------------------+---------+
- | 2b9f4e56-57be-4624-87b9-ab745ec321c0 | newl3-RegionOne-sgmtnet01 | |
- +--------------------------------------+---------------------------+---------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion subnet create --network newl3-RegionOne-sgmtnet01 --subnet-range 10.0.0.0/24 newl3segment01-subnet-v4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 10.0.0.2-10.0.0.254 |
- | cidr | 10.0.0.0/24 |
- | created_at | 2018-11-28T09:22:39Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.0.1 |
- | host_routes | |
- | id | f00f7eb0-a72a-4c25-8f71-46e3d872064a |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | location | None |
- | name | newl3segment01-subnet-v4 |
- | network_id | 5596d53f-d6ed-4ac5-9722-ad7e3e82e187 |
- | project_id | 1f31124fadd247f18098a20a6da207ec |
- | revision_number | 0 |
- | segment_id | None |
- | service_types | None |
- | subnetpool_id | None |
- | tags | |
- | updated_at | 2018-11-28T09:22:39Z |
- +-------------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network list
- +--------------------------------------+---------------------------+--------------------------------------+
- | ID | Name | Subnets |
- +--------------------------------------+---------------------------+--------------------------------------+
- | 5596d53f-d6ed-4ac5-9722-ad7e3e82e187 | newl3-RegionOne-sgmtnet01 | f00f7eb0-a72a-4c25-8f71-46e3d872064a |
- | e848d653-e777-4715-9596-bd0427d9fd27 | multisegment | |
- +--------------------------------------+---------------------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion subnet list
- +--------------------------------------+--------------------------+--------------------------------------+-------------+
- | ID | Name | Network | Subnet |
- +--------------------------------------+--------------------------+--------------------------------------+-------------+
- | f00f7eb0-a72a-4c25-8f71-46e3d872064a | newl3segment01-subnet-v4 | 5596d53f-d6ed-4ac5-9722-ad7e3e82e187 | 10.0.0.0/24 |
- +--------------------------------------+--------------------------+--------------------------------------+-------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=RegionOne network list
- +--------------------------------------+---------------------------+--------------------------------------+
- | ID | Name | Subnets |
- +--------------------------------------+---------------------------+--------------------------------------+
- | 2b9f4e56-57be-4624-87b9-ab745ec321c0 | newl3-RegionOne-sgmtnet01 | f00f7eb0-a72a-4c25-8f71-46e3d872064a |
- +--------------------------------------+---------------------------+--------------------------------------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=RegionOne subnet list
- +--------------------------------------+--------------------------------------+--------------------------------------+-------------+
- | ID | Name | Network | Subnet |
- +--------------------------------------+--------------------------------------+--------------------------------------+-------------+
- | f00f7eb0-a72a-4c25-8f71-46e3d872064a | f00f7eb0-a72a-4c25-8f71-46e3d872064a | 2b9f4e56-57be-4624-87b9-ab745ec321c0 | 10.0.0.0/24 |
- +--------------------------------------+--------------------------------------+--------------------------------------+-------------+
-
-This part is for how to delete segments and subnetworks.
-
-.. code-block:: console
-
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion subnet delete newl3segment01-subnet-v4
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion subnet list
-
- stack@stack-atom:~/devstack$ openstack --os-region-name=RegionOne subnet list
-
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network delete newl3-RegionOne-sgmtnet01
- stack@stack-atom:~/devstack$ openstack --os-region-name=CentralRegion network list
- +--------------------------------------+--------------+---------+
- | ID | Name | Subnets |
- +--------------------------------------+--------------+---------+
- | e848d653-e777-4715-9596-bd0427d9fd27 | multisegment | |
- +--------------------------------------+--------------+---------+
- stack@stack-atom:~/devstack$ openstack --os-region-name=RegionOne network list
-
- stack@stack-atom:~/devstack$
-
-
-Reference
-=========
-
-.. [1] https://github.com/openstack/tricircle/blob/master/specs/stein/new-l3-networking-mulit-NS-with-EW.rst
diff --git a/doc/source/networking/networking-guide-single-external-network.rst b/doc/source/networking/networking-guide-single-external-network.rst
deleted file mode 100644
index a50ece2c..00000000
--- a/doc/source/networking/networking-guide-single-external-network.rst
+++ /dev/null
@@ -1,664 +0,0 @@
-==================================================
-North South Networking via Single External Network
-==================================================
-
-The following figure illustrates one typical networking mode, the north
-south networking traffic for the tenant will be centralized through
-single external network. Only one virtual non-local router R1 is needed
-even if the tenant's network are located in multiple OpenStack regions.
-
-Only Neutron and Tricircle Local Neutron Plugin are required to be deployed
-in RegionThree if you want to make the external network being floating and
-applicable to all tenant's network.
-
-.. code-block:: console
-
- +-------------+
- | RegionThree |
- | |
- | ext-net1 |
- | +----+----+ |
- | | |
- | +--+--+ |
- | | R1 | |
- | +-+---+ |
- | | |
- +-----+-------+
- +-----------------+ | +-----------------+
- | RegionOne | | | RegionTwo |
- | | bridge | net | |
- | ++-----------------+-----------------+-+ |
- | | | | | |
- | +--+--+ | | +-+---+ |
- | | R1 | | | | R1 | |
- | +--+--+ | | +--+--+ |
- | | net1 | | net2 | |
- | +---+--+-+ | | +-+--+---+ |
- | | | | | |
- | +---------+-+ | | +--+--------+ |
- | | Instance1 | | | | Instance2 | |
- | +-----------+ | | +-----------+ |
- +-----------------+ +-----------------+
-
- Figure 1 North South Networking via Single External Network
-
-.. note:: Please note that if local network and external network are located
- in the same region, attaching router interface to non local router will
- lead to one additional logical router for east-west networking. For example,
- in the following figure, external network ext-net1 is in RegionTwo, and
- if local network net2 is attached to the router R1, then the additional
- logical router R1 for east-west networking is created.
-
-.. code-block:: console
-
- +-----------------+ +-----------------+ +-----------------+ +-----------------+
- | RegionOne | | RegionTwo | | RegionOne | | RegionTwo |
- | | | | | | | |
- | | | ext-net1 | | | | ext-net1 |
- | | | +-------+---+ | | | | +-------+---+ |
- | bridge net | | | bridge net | |
- | -+-------+-------+---+-+ | | | -+-------+-------+-+-+-+ | |
- | | | | | +--+--+ | | | | | | | +--+--+ |
- | +--+--+ | | +----+ R1 | | | +--+--+ | | | +----+ R1 | |
- | | R1 | | | +-----+ | ---> | | R1 | | | | +-----+ |
- | +--+--+ | | | | +--+--+ | | | |
- | | | | | | | | | | +-----+ |
- | | | | | | | | | +---+ R1 | |
- | | | | | | | | | +--+--+ |
- | | | | | | | | | | |
- | | net1 | | | | | net1 | | net2 | |
- | +---+--+-+ | | | | +---+--+-+ | | +-+--+---+ |
- | | | | | | | | | | |
- | | | | | | | | | | |
- | +---------+-+ | | | | +---------+-+ | | +--+--------+ |
- | | Instance1 | | | | | | Instance1 | | | | Instance2 | |
- | +-----------+ | | | | +-----------+ | | +-----------+ |
- +-----------------+ +-----------------+ +-----------------+ +-----------------+
-
- Figure 2 What happens if local network and external network are in the same region
-
-How to create this network topology
-===================================
-
-Following commands are executed to create the Figure 1 topology. Different
-order to create this topology is also possible, for example, create router
-and tenant network first, then boot instance, set the router gateway, and
-associate floating IP as the last step.
-
-Create external network ext-net1, which will be located in RegionThree.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type vlan --provider:physical_network extern --router:external --availability-zone-hint RegionThree ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionThree |
- | id | 494a1d2f-9a0f-4d0d-a5e9-f926fce912ac |
- | name | ext-net1 |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | provider:network_type | vlan |
- | provider:physical_network | extern |
- | provider:segmentation_id | 170 |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- +---------------------------+--------------------------------------+
-
-Now you can also create flat type external network
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --provider:network_type flat --provider:physical_network extern --router:external --availability-zone-hint RegionTwo ext-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | c151c1a2-ec8c-4975-bb85-9a8e143100b0 |
- | name | ext-net1 |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | provider:network_type | flat |
- | provider:physical_network | extern |
- | provider:segmentation_id | |
- | router:external | True |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- +---------------------------+--------------------------------------+
-
-Create subnet in ext-net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create --name ext-subnet1 --disable-dhcp ext-net1 163.3.124.0/24
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | allocation_pools | {"start": "163.3.124.2", "end": "163.3.124.254"} |
- | cidr | 163.3.124.0/24 |
- | created_at | 2017-01-14T02:11:48Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 163.3.124.1 |
- | host_routes | |
- | id | 5485feab-f843-4ffe-abd5-6afe5319ad82 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | ext-subnet1 |
- | network_id | 494a1d2f-9a0f-4d0d-a5e9-f926fce912ac |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:11:48Z |
- +-------------------+--------------------------------------------------+
-
-Create router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-create R1
- +-----------------------+--------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------+
- | admin_state_up | True |
- | created_at | 2017-01-14T02:12:15Z |
- | description | |
- | external_gateway_info | |
- | id | 4c4c164d-2cfa-4d2b-ba81-3711f44a6962 |
- | name | R1 |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 1 |
- | status | ACTIVE |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:12:15Z |
- +-----------------------+--------------------------------------+
-
-Set the router gateway to ext-net1 for R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-gateway-set R1 ext-net1
- Set gateway for router R1
-
-Create local network net1 which will reside in RegionOne, so you use RegionOne
-as the value of availability-zone-hint.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionOne net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionOne |
- | id | dde37c9b-7fe6-4ca9-be1a-0abb9ba1eddf |
- | name | net1 |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- +---------------------------+--------------------------------------+
-
-Create subnet in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.1.2", "end": "10.0.1.254"} |
- | cidr | 10.0.1.0/24 |
- | created_at | 2017-01-14T02:14:09Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.1.1 |
- | host_routes | |
- | id | 409f3b9e-3b14-4147-9443-51930eb9a882 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | dde37c9b-7fe6-4ca9-be1a-0abb9ba1eddf |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:14:09Z |
- +-------------------+--------------------------------------------+
-
-Add this subnet to router R1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-interface-add R1 409f3b9e-3b14-4147-9443-51930eb9a882
- Added interface 92eaf94d-e345-489a-bc91-3d3645d27f8b to router R1.
-
-List the available images in RegionOne.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionOne image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 570b5674-4d7d-4c17-9e8a-1caed6194ff1 | cirros-0.3.4-x86_64-uec |
- | 548cf82c-4353-407e-9aa2-3feac027c297 | cirros-0.3.4-x86_64-uec-kernel |
- | 1d40fb9f-1669-4b4d-82b8-4c3b9cde0c03 | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List the available flavors in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-
-Boot instance1 in RegionOne, and connect this instance to net1.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne boot --flavor 1 --image 570b5674-4d7d-4c17-9e8a-1caed6194ff1 --nic net-id=dde37c9b-7fe6-4ca9-be1a-0abb9ba1eddf instance1
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance1 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | 548cf82c-4353-407e-9aa2-3feac027c297 |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | 1d40fb9f-1669-4b4d-82b8-4c3b9cde0c03 |
- | OS-EXT-SRV-ATTR:reservation_id | r-n0k0u15s |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | N9A9iArByrdt |
- | config_drive | |
- | created | 2017-01-14T02:17:05Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | e7206415-e497-4110-b644-a64272625cef |
- | image | cirros-0.3.4-x86_64-uec (570b5674-4d7d-4c17-9e8a-1caed6194ff1) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated | 2017-01-14T02:17:05Z |
- | user_id | 8e84fae0a5b74464b3300a4576d090a4 |
- +--------------------------------------+----------------------------------------------------------------+
-
-Make sure the instance1 is active in RegionOne.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionOne list
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
- | e7206415-e497-4110-b644-a64272625cef | instance1 | ACTIVE | - | Running | net1=10.0.1.5 |
- +--------------------------------------+-----------+--------+------------+-------------+---------------+
-
-Create a floating IP for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-create ext-net1
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-01-14T02:19:24Z |
- | description | |
- | fixed_ip_address | |
- | floating_ip_address | 163.3.124.7 |
- | floating_network_id | 494a1d2f-9a0f-4d0d-a5e9-f926fce912ac |
- | id | 04c18e73-675b-4273-a73a-afaf1e4f9811 |
- | port_id | |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 1 |
- | router_id | |
- | status | DOWN |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:19:24Z |
- +---------------------+--------------------------------------+
-
-List the port in net1 for instance1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion port-list
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | id | name | mac_address | fixed_ips |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | 37e9cfe5-d410-4625-963d- | | fa:16:3e:14:47:a8 | {"subnet_id": "409f3b9e- |
- | b7ea4347d72e | | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.5"} |
- | 92eaf94d-e345-489a- | | fa:16:3e:63:a9:08 | {"subnet_id": "409f3b9e- |
- | bc91-3d3645d27f8b | | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.1"} |
- | d3ca5e74-470e-4953-a280-309b5e8e11 | dhcp_port_409f3b9e- | fa:16:3e:7e:72:98 | {"subnet_id": "409f3b9e- |
- | 46 | 3b14-4147-9443-51930eb9a882 | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.2"} |
- | b4eef6a0-70e6-4a42-b0c5-f8f49cee25 | interface_RegionOne_409f3b9e- | fa:16:3e:00:e1:5b | {"subnet_id": "409f3b9e- |
- | c0 | 3b14-4147-9443-51930eb9a882 | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.7"} |
- | 65b52fe3-f765-4124-a97f- | bridge_port_640e791e767e49939d5c60 | fa:16:3e:df:7b:97 | {"subnet_id": "d637f4e5-4b9a-4237 |
- | f73a76e820e6 | 0fdb3f8431_daa08da0-c60e- | | -b3bc-ccfba45a5c37", "ip_address": |
- | | 42c8-bc30-1ed887111ecb | | "100.0.0.7"} |
- | e0755307-a498-473e- | | fa:16:3e:1c:70:b9 | {"subnet_id": "5485feab-f843-4ffe- |
- | 99e5-30cbede36b8e | | | abd5-6afe5319ad82", "ip_address": |
- | | | | "163.3.124.7"} |
- | 2404eb83-f2f4-4a36-b377-dbc8befee1 | | fa:16:3e:25:80:e6 | {"subnet_id": "5485feab-f843-4ffe- |
- | 93 | | | abd5-6afe5319ad82", "ip_address": |
- | | | | "163.3.124.9"} |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
-
-Associate the floating IP to instance1's IP in net1.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-associate 04c18e73-675b-4273-a73a-afaf1e4f9811 37e9cfe5-d410-4625-963d-b7ea4347d72e
- Associated floating IP 04c18e73-675b-4273-a73a-afaf1e4f9811
-
-Proceed with the creation network topology in RegionTwo.
-
-Create net2 in RegionTwo.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion net-create --availability-zone-hint RegionTwo net2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | RegionTwo |
- | id | cfe622f9-1851-4033-a4ba-6718659a147c |
- | name | net2 |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | provider:network_type | local |
- | provider:physical_network | |
- | provider:segmentation_id | |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- +---------------------------+--------------------------------------+
-
-Create subnet in net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion subnet-create net2 10.0.2.0/24
- +-------------------+--------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------+
- | allocation_pools | {"start": "10.0.2.2", "end": "10.0.2.254"} |
- | cidr | 10.0.2.0/24 |
- | created_at | 2017-01-14T02:36:03Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 10.0.2.1 |
- | host_routes | |
- | id | 4e3376f8-0bda-450d-b4fb-9eb77c4ef919 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | |
- | network_id | cfe622f9-1851-4033-a4ba-6718659a147c |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 2 |
- | subnetpool_id | |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:36:03Z |
- +-------------------+--------------------------------------------+
-
-Add router interface for the subnet to R2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion router-interface-add R1 4e3376f8-0bda-450d-b4fb-9eb77c4ef919
- Added interface d4b0e6d9-8bfb-4cd6-8824-92731c0226da to router R1.
-
-List available images in RegionTwo.
-
-.. code-block:: console
-
- $ glance --os-region-name=RegionTwo image-list
- +--------------------------------------+---------------------------------+
- | ID | Name |
- +--------------------------------------+---------------------------------+
- | 392aa24f-a1a8-4897-bced-70301e1c7e3b | cirros-0.3.4-x86_64-uec |
- | 41ac5372-764a-4e31-8c3a-66cdc5a6529e | cirros-0.3.4-x86_64-uec-kernel |
- | 55523513-719d-4949-b697-db98ab3e938e | cirros-0.3.4-x86_64-uec-ramdisk |
- +--------------------------------------+---------------------------------+
-
-List available flavors in RegionTwo.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo flavor-list
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
- | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
- | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
- | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
- | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
- | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
- | c1 | cirros256 | 256 | 0 | 0 | | 1 | 1.0 | True |
- | d1 | ds512M | 512 | 5 | 0 | | 1 | 1.0 | True |
- | d2 | ds1G | 1024 | 10 | 0 | | 1 | 1.0 | True |
- | d3 | ds2G | 2048 | 10 | 0 | | 2 | 1.0 | True |
- | d4 | ds4G | 4096 | 20 | 0 | | 4 | 1.0 | True |
- +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
-
-Boot instance2, and connect the instance2 to net2.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo boot --flavor 1 --image 392aa24f-a1a8-4897-bced-70301e1c7e3b --nic net-id=cfe622f9-1851-4033-a4ba-6718659a147c instance2
- +--------------------------------------+----------------------------------------------------------------+
- | Property | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | - |
- | OS-EXT-SRV-ATTR:hostname | instance2 |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
- | OS-EXT-SRV-ATTR:instance_name | |
- | OS-EXT-SRV-ATTR:kernel_id | 41ac5372-764a-4e31-8c3a-66cdc5a6529e |
- | OS-EXT-SRV-ATTR:launch_index | 0 |
- | OS-EXT-SRV-ATTR:ramdisk_id | 55523513-719d-4949-b697-db98ab3e938e |
- | OS-EXT-SRV-ATTR:reservation_id | r-3v42ltzp |
- | OS-EXT-SRV-ATTR:root_device_name | - |
- | OS-EXT-SRV-ATTR:user_data | - |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | o62QufgY2JAF |
- | config_drive | |
- | created | 2017-01-14T02:39:42Z |
- | description | - |
- | flavor | m1.tiny (1) |
- | hostId | |
- | host_status | |
- | id | e489ab4e-957d-4537-9870-fff87406aac5 |
- | image | cirros-0.3.4-x86_64-uec (392aa24f-a1a8-4897-bced-70301e1c7e3b) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | instance2 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tags | [] |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated | 2017-01-14T02:39:42Z |
- | user_id | 8e84fae0a5b74464b3300a4576d090a4 |
- +--------------------------------------+----------------------------------------------------------------+
-
-Check to see if instance2 is active.
-
-.. code-block:: console
-
- $ nova --os-region-name=RegionTwo list
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
- | ID | Name | Status | Task State | Power State | Networks |
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
- | e489ab4e-957d-4537-9870-fff87406aac5 | instance2 | ACTIVE | - | Running | net2=10.0.2.10 |
- +--------------------------------------+-----------+--------+------------+-------------+----------------+
-
-You can ping instance2 from instance1, or vice versa now.
-
-Create floating IP for instance2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-create ext-net1
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | created_at | 2017-01-14T02:40:55Z |
- | description | |
- | fixed_ip_address | |
- | floating_ip_address | 163.3.124.13 |
- | floating_network_id | 494a1d2f-9a0f-4d0d-a5e9-f926fce912ac |
- | id | f917dede-6e0d-4c5a-8d02-7d5774d094ba |
- | port_id | |
- | project_id | 640e791e767e49939d5c600fdb3f8431 |
- | revision_number | 1 |
- | router_id | |
- | status | DOWN |
- | tenant_id | 640e791e767e49939d5c600fdb3f8431 |
- | updated_at | 2017-01-14T02:40:55Z |
- +---------------------+--------------------------------------+
-
-List port of instance2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion port-list
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | id | name | mac_address | fixed_ips |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
- | 37e9cfe5-d410-4625-963d- | | fa:16:3e:14:47:a8 | {"subnet_id": "409f3b9e- |
- | b7ea4347d72e | | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.5"} |
- | ed9bdc02-0f0d-4763-a993-e0972c6563 | | fa:16:3e:c1:10:a3 | {"subnet_id": "4e3376f8-0bda-450d- |
- | fa | | | b4fb-9eb77c4ef919", "ip_address": |
- | | | | "10.0.2.10"} |
- | 92eaf94d-e345-489a- | | fa:16:3e:63:a9:08 | {"subnet_id": "409f3b9e- |
- | bc91-3d3645d27f8b | | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.1"} |
- | f98ceee7-777b-4cff- | interface_RegionTwo_409f3b9e- | fa:16:3e:aa:cf:e2 | {"subnet_id": "409f3b9e- |
- | b5b9-c27b4277bb7f | 3b14-4147-9443-51930eb9a882 | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.12"} |
- | d3ca5e74-470e-4953-a280-309b5e8e11 | dhcp_port_409f3b9e- | fa:16:3e:7e:72:98 | {"subnet_id": "409f3b9e- |
- | 46 | 3b14-4147-9443-51930eb9a882 | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.2"} |
- | b4eef6a0-70e6-4a42-b0c5-f8f49cee25 | interface_RegionOne_409f3b9e- | fa:16:3e:00:e1:5b | {"subnet_id": "409f3b9e- |
- | c0 | 3b14-4147-9443-51930eb9a882 | | 3b14-4147-9443-51930eb9a882", |
- | | | | "ip_address": "10.0.1.7"} |
- | d4b0e6d9-8bfb- | | fa:16:3e:f9:5f:4e | {"subnet_id": "4e3376f8-0bda-450d- |
- | 4cd6-8824-92731c0226da | | | b4fb-9eb77c4ef919", "ip_address": |
- | | | | "10.0.2.1"} |
- | e54f0a40-837f- | interface_RegionTwo_4e3376f8-0bda- | fa:16:3e:fa:84:da | {"subnet_id": "4e3376f8-0bda-450d- |
- | 48e7-9397-55170300d06e | 450d-b4fb-9eb77c4ef919 | | b4fb-9eb77c4ef919", "ip_address": |
- | | | | "10.0.2.11"} |
- | d458644d-a401-4d98-bec3-9468fdd56d | dhcp_port_4e3376f8-0bda-450d-b4fb- | fa:16:3e:b2:a6:03 | {"subnet_id": "4e3376f8-0bda-450d- |
- | 1c | 9eb77c4ef919 | | b4fb-9eb77c4ef919", "ip_address": |
- | | | | "10.0.2.2"} |
- | 65b52fe3-f765-4124-a97f- | bridge_port_640e791e767e49939d5c60 | fa:16:3e:df:7b:97 | {"subnet_id": "d637f4e5-4b9a-4237 |
- | f73a76e820e6 | 0fdb3f8431_daa08da0-c60e- | | -b3bc-ccfba45a5c37", "ip_address": |
- | | 42c8-bc30-1ed887111ecb | | "100.0.0.7"} |
- | cee45aac- | bridge_port_640e791e767e49939d5c60 | fa:16:3e:d0:50:0d | {"subnet_id": "d637f4e5-4b9a-4237 |
- | fd07-4a2f-8008-02757875d1fe | 0fdb3f8431_b072000e-3cd1-4a1a- | | -b3bc-ccfba45a5c37", "ip_address": |
- | | aa60-9ffbca119b1a | | "100.0.0.8"} |
- | dd4707cc-fe2d-429c-8c2f- | | fa:16:3e:9e:85:62 | {"subnet_id": "5485feab-f843-4ffe- |
- | 084b525e1789 | | | abd5-6afe5319ad82", "ip_address": |
- | | | | "163.3.124.13"} |
- | e0755307-a498-473e- | | fa:16:3e:1c:70:b9 | {"subnet_id": "5485feab-f843-4ffe- |
- | 99e5-30cbede36b8e | | | abd5-6afe5319ad82", "ip_address": |
- | | | | "163.3.124.7"} |
- | 2404eb83-f2f4-4a36-b377-dbc8befee1 | | fa:16:3e:25:80:e6 | {"subnet_id": "5485feab-f843-4ffe- |
- | 93 | | | abd5-6afe5319ad82", "ip_address": |
- | | | | "163.3.124.9"} |
- +------------------------------------+------------------------------------+-------------------+--------------------------------------+
-
-Associate the floating IP to the instance2's IP address in net2.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-associate f917dede-6e0d-4c5a-8d02-7d5774d094ba ed9bdc02-0f0d-4763-a993-e0972c6563fa
- Associated floating IP f917dede-6e0d-4c5a-8d02-7d5774d094ba
-
-Make sure the floating IP association works.
-
-.. code-block:: console
-
- $ neutron --os-region-name=CentralRegion floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | 04c18e73-675b-4273-a73a-afaf1e4f9811 | 10.0.1.5 | 163.3.124.7 | 37e9cfe5-d410-4625-963d-b7ea4347d72e |
- | f917dede-6e0d-4c5a-8d02-7d5774d094ba | 10.0.2.10 | 163.3.124.13 | ed9bdc02-0f0d-4763-a993-e0972c6563fa |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
-
- $ neutron --os-region-name=RegionThree floatingip-list
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | id | fixed_ip_address | floating_ip_address | port_id |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
- | 3a220f53-fdfe-44e3-847a-b00464135416 | 10.0.1.5 | 163.3.124.7 | 37e9cfe5-d410-4625-963d-b7ea4347d72e |
- | fe15192f-04cb-48c8-8a90-7a7c016f40ae | 10.0.2.10 | 163.3.124.13 | ed9bdc02-0f0d-4763-a993-e0972c6563fa |
- +--------------------------------------+------------------+---------------------+--------------------------------------+
diff --git a/doc/source/networking/networking-guide.rst b/doc/source/networking/networking-guide.rst
deleted file mode 100644
index 55f99ee3..00000000
--- a/doc/source/networking/networking-guide.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-Networking Guide
-================
-
-The Tricircle is to provide networking automation across Neutron
-servers in multi-region OpenStack clouds deployment, many cross Neutron
-networking modes are supported. In this guide, how to use CLI to setup
-typical networking mode will be described.
-
-
-.. include:: ./networking-terms.rst
-.. include:: ./networking-prerequisites.rst
-.. include:: ./networking-scenarios.rst
-.. include:: ./service-function-chaining-guide.rst
-.. include:: ./vlan-aware-vms-guide.rst
diff --git a/doc/source/networking/networking-prerequisites.rst b/doc/source/networking/networking-prerequisites.rst
deleted file mode 100644
index 7bedf812..00000000
--- a/doc/source/networking/networking-prerequisites.rst
+++ /dev/null
@@ -1,130 +0,0 @@
-=============
-Prerequisites
-=============
-One CentralRegion in which central Neutron and Tricircle services
-are started, and central Neutron is configured with Tricircle Central Neutron
-plugin properly. And at least two regions(RegionOne, RegionTwo) in which
-Tricircle Local Neutron plugin is configured properly in local Neutron.
-
-RegionOne is mapped to az1, and RegionTwo is mapped to az2 by pod management
-through Tricircle Admin API.
-
-You can use az1 or RegionOne as the value of availability-zone-hint when
-creating a network. Although in this document only one region in one
-availability zone, one availability zone can include more than one region in
-Tricircle pod management, so if you specify az1 as the value, then it means
-the network will reside in az1, and az1 is mapped to RegionOne, if you add
-more regions into az1, then the network can spread into these regions too.
-
-Please refer to the installation guide and configuration guide how to setup
-multi-region environment with Tricircle service enabled.
-
-If you setup the environment through devstack, you can get these settings
-which are used in this document as follows:
-
-Suppose that each node has 3 interfaces, and eth1 for tenant vlan network,
-eth2 for external vlan network. If you want to verify the data plane
-connectivity, please make sure the bridges "br-vlan" and "br-ext" are
-connected to regarding interface. Using following command to connect
-the bridge to physical ethernet interface, as shown below, "br-vlan" is
-wired to eth1, and "br-ext" to eth2::
-
- sudo ovs-vsctl add-br br-vlan
- sudo ovs-vsctl add-port br-vlan eth1
- sudo ovs-vsctl add-br br-ext
- sudo ovs-vsctl add-port br-ext eth2
-
-Suppose the vlan range for tenant network is 101~150, external network is
-151~200, in the node which will run central Neutron and Tricircle services,
-configure the local.conf like this::
-
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:101:150,extern:151:200)
- OVS_BRIDGE_MAPPINGS=bridge:br-vlan,extern:br-ext
-
- TRICIRCLE_START_SERVICES=True
- enable_plugin tricircle https://github.com/openstack/tricircle/
-
-In the node which will run local Neutron without Tricircle services, configure
-the local.conf like this::
-
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:101:150,extern:151:200)
- OVS_BRIDGE_MAPPINGS=bridge:br-vlan,extern:br-ext
-
- TRICIRCLE_START_SERVICES=False
- enable_plugin tricircle https://github.com/openstack/tricircle/
-
-You may have noticed that the only difference is TRICIRCLE_START_SERVICES
-is True or False. All examples given in this document will be based on these
-settings.
-
-If you also want to configure vxlan network, suppose the vxlan range for tenant
-network is 1001~2000, add the following configuration to the above local.conf::
-
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=(vni_ranges=1001:2000)
-
-If you also want to configure flat network, suppose you use the same physical
-network as the vlan network, configure the local.conf like this::
-
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS=(flat_networks=bridge,extern)
-
-In both RegionOne and RegionTwo, external network is able to be provisioned,
-the settings will look like this in /etc/neutron/plugins/ml2/ml2_conf.ini::
-
- network_vlan_ranges = bridge:101:150,extern:151:200
-
- vni_ranges = 1001:2000(or the range that you configure)
-
- flat_networks = bridge,extern
-
- bridge_mappings = bridge:br-vlan,extern:br-ext
-
-Please be aware that the physical network name for tenant VLAN network is
-"bridge", and the external network physical network name is "extern".
-
-In central Neutron's configuration file, the default settings look like as
-follows::
-
- bridge_network_type = vxlan
- network_vlan_ranges = bridge:101:150,extern:151:200
- vni_ranges = 1001:2000
- flat_networks = bridge,extern
- tenant_network_types = vxlan,vlan,flat,local
- type_drivers = vxlan,vlan,flat,local
-
-If you want to create a local network, it is recommend that you specify
-availability_zone_hint as region name when creating the network, instead of
-specifying the network type as "local". The "local" type has two drawbacks.
-One is that you can not control the exact type of the network in local Neutron,
-it's up to your local Neutron's configuration. The other is that the segment
-ID of the network is allocated by local Neutron, so it may conflict with a
-segment ID that is allocated by central Neutron. Considering such problems, we
-have plan to deprecate "local" type.
-
-If you want to create a L2 network across multiple Neutron servers, then you
-have to speficy --provider-network-type vlan in network creation
-command for vlan network type, or --provider-network-type vxlan for vxlan
-network type. Both vlan and vxlan network type could work as the bridge
-network. The default bridge network type is vxlan.
-
-If you want to create a flat network, which is usually used as the external
-network type, then you have to specify --provider-network-type flat in network
-creation command.
-
-You can create L2 network for different purposes, and the supported network
-types for different purposes are summarized as follows.
-
-.. _supported_network_types:
-
-.. list-table::
- :header-rows: 1
-
- * - Networking purpose
- - Supported
- * - Local L2 network for instances
- - FLAT, VLAN, VxLAN
- * - Cross Neutron L2 network for instances
- - FLAT, VLAN, VxLAN
- * - Bridge network for routers
- - FLAT, VLAN, VxLAN
- * - External network
- - FLAT, VLAN
diff --git a/doc/source/networking/networking-scenarios.rst b/doc/source/networking/networking-scenarios.rst
deleted file mode 100644
index 68d9cecb..00000000
--- a/doc/source/networking/networking-scenarios.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-===================
-Networking Scenario
-===================
-
-.. toctree::
- :maxdepth: 4
-
- networking-guide-direct-provider-networks.rst
- networking-guide-multiple-external-networks.rst
- networking-guide-multiple-ns-with-ew-enabled.rst
- networking-guide-single-external-network.rst
- networking-guide-local-networking.rst
- networking-guide-newL3-using-segment.rst
diff --git a/doc/source/networking/networking-terms.rst b/doc/source/networking/networking-terms.rst
deleted file mode 100644
index 1be05c40..00000000
--- a/doc/source/networking/networking-terms.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-================
-Networking Terms
-================
-
-There are four important networking terms will be used in networking
-automation across Neutron.
-
-Local Network
- - Local Network is a network which can only reside in one OpenStack cloud.
- - Network type could be VLAN, VxLAN, Flat.
- - If you specify a region name as the value of availability-zone-hint
- during network creation, then the network will be created as local
- network in that region.
- - If the default network type to be created is configured to "local" in
- central Neutron, then no matter you specify availability-zone-hint or
- not, the network will be local network if the network was created
- without explicitly given non-local provider network type.
- - External network should be created as local network, that means external
- network is explicitly existing in some specified region. It's possible
- that each region provides multiple external networks, that means there
- is no limitation on how many external networks can be created.
- - For example, local network could be created as follows:
-
- .. code-block:: console
-
- openstack --os-region-name=CentralRegion network create --availability-zone-hint=RegionOne net1
-
-Local Router
- - Local Router is a logical router which can only reside in one OpenStack
- cloud.
- - If you specify a region name as the value of availability-zone-hint
- during router creation, then the router will be created as local
- router in that region.
- - For example, local router could be created as follows:
-
- .. code-block:: console
-
- neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne R1
-
-Cross Neutron L2 Network
- - Cross Neutron L2 Network is a network which can be stretched into more
- than one Neutron servers, these Neutron servers may work in one
- OpenStack cloud or multiple OpenStack clouds.
- - Network type could be VLAN, VxLAN, Flat.
- - During the network creation, if availability-zone-hint is not specified,
- or specified with availability zone name, or more than one region name,
- or more than one availability zone name, then the network will be created
- as cross Neutron L2 network.
- - If the default network type to be created is not configured to "local" in
- central Neutron, then the network will be cross Neutron L2 network if
- the network was created without specified provider network type and single
- region name in availability-zone-hint.
- - For example, cross Neutron L2 network could be created as follows:
-
- .. code-block:: console
-
- neutron --os-region-name=CentralRegion net-create --provider:network_type vxlan --availability-zone-hint RegionOne --availability-zone-hint RegionTwo net1
-
-Non-Local Router
- - Non-Local Router will be able to reside in more than one OpenStack cloud,
- and internally inter-connected with bridge network.
- - Bridge network used internally for non-local router is a special cross
- Neutron L2 network.
- - Local networks or cross Neutron L2 networks can be attached to local
- router or non-local routers if the network can be presented in the region
- where the router can reside.
- - During the router creation, if availability-zone-hint is not specified,
- or specified with availability zone name, or more than one region name,
- or more than one availability zone name, then the router will be created
- as non-local router.
- - For example, non-local router could be created as follows:
-
- .. code-block:: console
-
- neutron --os-region-name=CentralRegion router-create --availability-zone-hint RegionOne --availability-zone-hint RegionTwo R3
-
-It's also important to understand that cross Neutron L2 network, local
-router and non-local router can be created for different north-south/east-west
-networking purpose.
-
-North-South and East-West Networking
- - Instances in different OpenStack clouds can be attached to a cross
- Neutron L2 network directly, so that they can communicate with
- each other no matter in which OpenStack cloud.
- - If L3 networking across OpenStack clouds is preferred, local network
- attached to non-local router can be created for instances to attach.
- - Local router can be set gateway with external networks to support
- north-south traffic handled locally.
- - Non-local router can work only for cross Neutron east-west networking
- purpose if no external network is set to the router.
- - Non-local router can serve as the centralized north-south traffic gateway
- if external network is attached to the router, and support east-west
- traffic at the same time.
diff --git a/doc/source/networking/service-function-chaining-guide.rst b/doc/source/networking/service-function-chaining-guide.rst
deleted file mode 100644
index ffe13816..00000000
--- a/doc/source/networking/service-function-chaining-guide.rst
+++ /dev/null
@@ -1,120 +0,0 @@
-===============================
-Service Function Chaining Guide
-===============================
-
-Service Function Chaining provides the ability to define an ordered list of
-network services (e.g. firewalls, load balancers). These services are then
-“stitched” together in the network to create a service chain.
-
-
-Installation
-^^^^^^^^^^^^
-
-After installing tricircle, please refer to
-https://docs.openstack.org/networking-sfc/latest/install/install.html
-to install networking-sfc.
-
-Configuration
-^^^^^^^^^^^^^
-
-- 1 Configure central Neutron server
-
- After installing the Tricircle and networing-sfc, enable the service plugins
- in central Neutron server by adding them in ``neutron.conf.0``
- (typically found in ``/etc/neutron/``)::
-
- service_plugins=networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,tricircle.network.central_sfc_plugin.TricircleSfcPlugin
-
- In the same configuration file, specify the driver to use in the plugins. ::
-
- [sfc]
- drivers = tricircle_sfc
-
- [flowclassifier]
- drivers = tricircle_fc
-
-- 2 Configure local Neutron
-
- Please refer to https://docs.openstack.org/networking-sfc/latest/install/configuration.html
- to config local networking-sfc.
-
-
-How to play
-^^^^^^^^^^^
-
-- 1 Create pods via Tricircle Admin API
-
-- 2 Create necessary resources in central Neutron server ::
-
- neutron --os-region-name=CentralRegion net-create --provider:network_type vxlan net1
- neutron --os-region-name=CentralRegion subnet-create net1 10.0.0.0/24
- neutron --os-region-name=CentralRegion port-create net1 --name p1
- neutron --os-region-name=CentralRegion port-create net1 --name p2
- neutron --os-region-name=CentralRegion port-create net1 --name p3
- neutron --os-region-name=CentralRegion port-create net1 --name p4
- neutron --os-region-name=CentralRegion port-create net1 --name p5
- neutron --os-region-name=CentralRegion port-create net1 --name p6
-
- Please note that network type must be vxlan.
-
-- 3 Get image ID and flavor ID which will be used in VM booting. In the following step,
- the VM will boot from RegionOne and RegionTwo. ::
-
- glance --os-region-name=RegionOne image-list
- nova --os-region-name=RegionOne flavor-list
- glance --os-region-name=RegionTwo image-list
- nova --os-region-name=RegionTwo flavor-list
-
-- 4 Boot virtual machines ::
-
- openstack --os-region-name=RegionOne server create --flavor 1 --image $image1_id --nic port-id=$p1_id vm_src
- openstack --os-region-name=RegionOne server create --flavor 1 --image $image1_id --nic port-id=$p2_id --nic port-id=$p3_id vm_sfc1
- openstack --os-region-name=RegionTwo server create --flavor 1 --image $image2_id --nic port-id=$p4_id --nic port-id=$p5_id vm_sfc2
- openstack --os-region-name=RegionTwo server create --flavor 1 --image $image2_id --nic port-id=$p6_id vm_dst
-
-- 5 Create port pairs in central Neutron server ::
-
- neutron --os-region-name=CentralRegion port-pair-create --ingress p2 --egress p3 pp1
- neutron --os-region-name=CentralRegion port-pair-create --ingress p4 --egress p5 pp2
-
-- 6 Create port pair groups in central Neutron server ::
-
- neutron --os-region-name=CentralRegion port-pair-group-create --port-pair pp1 ppg1
- neutron --os-region-name=CentralRegion port-pair-group-create --port-pair pp2 ppg2
-
-- 7 Create flow classifier in central Neutron server ::
-
- neutron --os-region-name=CentralRegion flow-classifier-create --source-ip-prefix 10.0.0.0/24 --logical-source-port p1 fc1
-
-- 8 Create port chain in central Neutron server ::
-
- neutron --os-region-name=CentralRegion port-chain-create --flow-classifier fc1 --port-pair-group ppg1 --port-pair-group ppg2 pc1
-
-- 9 Show result in CentralRegion, RegionOne and RegionTwo ::
-
- neutron --os-region-name=CentralRegion port-chain-list
- neutron --os-region-name=RegionOne port-chain-list
- neutron --os-region-name=RegionTwo port-chain-list
-
- You will find a same port chain in each region.
-
-- 10 Check if the port chain is working
-
- In vm_dst, ping the p1's ip address, it should fail.
-
- Enable vm_sfc1, vm_sfc2's forwarding function ::
-
- sudo sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
-
- Add the following route for vm_sfc1, vm_sfc2 ::
-
- sudo ip route add $p6_ip_address dev eth1
-
- In vm_dst, ping the p1's ip address, it should be successfully this time.
-
- .. note:: Not all images will bring up the second NIC, so you can ssh into vm, use
- "ifconfig -a" to check whether all NICs are up, and bring up all NICs if necessary.
- In CirrOS you can type the following command to bring up one NIC. ::
-
- sudo cirros-dhcpc up $nic_name
diff --git a/doc/source/networking/vlan-aware-vms-guide.rst b/doc/source/networking/vlan-aware-vms-guide.rst
deleted file mode 100644
index 59694765..00000000
--- a/doc/source/networking/vlan-aware-vms-guide.rst
+++ /dev/null
@@ -1,79 +0,0 @@
-====================
-VLAN aware VMs Guide
-====================
-
-VLAN aware VM is a VM that sends and receives VLAN tagged frames over its vNIC.
-The main point of that is to overcome the limitations of the current one vNIC
-per network model. A VLAN (or other encapsulation) aware VM can differentiate
-between traffic of many networks by different encapsulation types and IDs,
-instead of using many vNICs. This approach scales to higher number of networks
-and enables dynamic handling of network attachments (without hotplugging vNICs).
-
-Installation
-^^^^^^^^^^^^
-
-No additional installation required, Please refer to the Tricircle
-installation guide to install Tricircle then configure Neutron server to
-enable trunk extension.
-
-Configuration
-^^^^^^^^^^^^^
-
-- 1 Configure central Neutron server
-
- Edit neutron.conf, add the following configuration then restart central
- Neutron server
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [DEFAULT] service_plugins, "service plugin central Neutron server uses", tricircle.network.central_trunk_plugin. TricircleTrunkPlugin
-
-- 2 Configure local Neutron server
-
- Edit neutron.conf, add the following configuration then restart local
- Neutron server
-
- .. csv-table::
- :header: "Option", "Description", "Example"
-
- [DEFAULT] service_plugins, "service plugin central Neutron server uses", trunk
-
-How to play
-^^^^^^^^^^^
-
-- 1 Create pods via Tricircle Admin API
-
-- 2 Create necessary resources in central Neutron server ::
-
- neutron --os-region-name=CentralRegion net-create --provider:network_type vlan net1
- neutron --os-region-name=CentralRegion subnet-create net1 10.0.1.0/24
- neutron --os-region-name=CentralRegion port-create net1 --name p1
- neutron --os-region-name=CentralRegion net-create --provider:network_type vlan net2
- neutron --os-region-name=CentralRegion subnet-create net2 10.0.2.0/24
- neutron --os-region-name=CentralRegion port-create net2 --name p2
-
- Please note that network type must be vlan, the port p1, p2 and net2's provider
- segmentation_id will be used in later step to create trunk and boot vm.
-
-- 3 Create trunk in central Neutron server ::
-
- openstack --os-region-name=CentralRegion network trunk create trunk1 --parent-port p1 --subport port=p2,segmentation-type=vlan,segmentation-id=$net2_segment_id
-
-- 4 Get image ID and flavor ID which will be used in VM booting. In the following step,
- the trunk is to be used in the VM in RegionOne, you can replace RegionOne to other
- region's name if you want to boot VLAN aware VM in other region. ::
-
- glance --os-region-name=RegionOne image-list
- nova --os-region-name=RegionOne flavor-list
-
-- 5 Boot virtual machines ::
-
- nova --os-region-name=RegionOne boot --flavor 1 --image $image1_id --nic port-id=$p1_id vm1
-
-- 6 Show result on CentralRegion and RegionOne ::
-
- openstack --os-region-name=CentralRegion network trunk show trunk1
- openstack --os-region-name=RegionOne network trunk show trunk1
-
- The result will be the same, except for the trunk id.
\ No newline at end of file
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
deleted file mode 100644
index 0c408000..00000000
--- a/doc/source/user/index.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-====================
-Tricircle User Guide
-====================
-
-.. toctree::
- :maxdepth: 3
-
- readme
- usage
diff --git a/doc/source/user/readme.rst b/doc/source/user/readme.rst
deleted file mode 100644
index 16aa8cde..00000000
--- a/doc/source/user/readme.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-.. include:: ../../../README.rst
- :start-line: 10
diff --git a/doc/source/user/usage.rst b/doc/source/user/usage.rst
deleted file mode 100644
index 36df369f..00000000
--- a/doc/source/user/usage.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-=====
-Usage
-=====
-
-To use tricircle in a project::
-
- import tricircle
diff --git a/etc/api-cfg-gen.conf b/etc/api-cfg-gen.conf
deleted file mode 100644
index 5070824c..00000000
--- a/etc/api-cfg-gen.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-[DEFAULT]
-output_file = etc/api.conf.sample
-wrap_width = 79
-namespace = tricircle.api
-namespace = tricircle.common
-namespace = tricircle.db
-namespace = oslo.log
-namespace = oslo.messaging
-namespace = oslo.policy
-namespace = oslo.service.periodic_task
-namespace = oslo.service.service
-namespace = oslo.service.sslutils
-namespace = oslo.db
-namespace = oslo.middleware
-namespace = oslo.concurrency
-namespace = keystonemiddleware.auth_token
diff --git a/etc/policy-generator.conf b/etc/policy-generator.conf
deleted file mode 100644
index 040ca211..00000000
--- a/etc/policy-generator.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-output_file = etc/tricircle-policy.yaml.sample
-namespace = tricircle
diff --git a/etc/xjob-cfg-gen.conf b/etc/xjob-cfg-gen.conf
deleted file mode 100644
index dc1ed8a5..00000000
--- a/etc/xjob-cfg-gen.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[DEFAULT]
-output_file = etc/xjob.conf.sample
-wrap_width = 79
-namespace = tricircle.xjob
-namespace = tricircle.common
-namespace = oslo.log
-namespace = oslo.messaging
-namespace = oslo.policy
-namespace = oslo.service.periodic_task
-namespace = oslo.service.service
-namespace = oslo.service.sslutils
-namespace = oslo.db
-namespace = oslo.middleware
-namespace = oslo.concurrency
-namespace = keystonemiddleware.auth_token
diff --git a/index.rst b/index.rst
deleted file mode 100644
index 63038c86..00000000
--- a/index.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-.. tricircle documentation master file, created by
- sphinx-quickstart on Wed Dec 2 17:00:36 2015.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-=====================================
-Welcome to Tricircle's documentation!
-=====================================
-
-User Documentation
-==================
-.. toctree::
- :maxdepth: 3
-
- user/index
-
-Contributor Guide
-=================
-.. toctree::
- :maxdepth: 1
-
- contributor/index
-
-Admin Guide
-===========
-.. toctree::
- :maxdepth: 3
-
- admin/index
-
-Installation Guide
-==================
-.. toctree::
- :maxdepth: 3
-
- install/index
-
-Configuration Guide
-===================
-.. toctree::
- :maxdepth: 3
-
- configuration/index
-
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index e01e89b2..00000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-alabaster==0.7.10
-alembic==0.8.10
-amqp==2.1.1
-appdirs==1.3.0
-astroid==1.6.5
-Babel==2.3.4
-bandit==1.1.0
-bashate==0.5.1
-beautifulsoup4==4.6.0
-cachetools==2.0.0
-cliff==2.8.0
-cmd2==0.8.0
-contextlib2==0.4.0
-coverage==4.0
-ddt==1.0.1
-debtcollector==1.19.0
-decorator==3.4.0
-deprecation==1.0
-docutils==0.11
-dogpile.cache==0.6.2
-dulwich==0.15.0
-eventlet==0.18.2
-extras==1.0.0
-fasteners==0.7.0
-fixtures==3.0.0
-future==0.16.0
-futurist==1.2.0
-gitdb==0.6.4
-GitPython==1.0.1
-greenlet==0.4.10
-httplib2==0.9.1
-imagesize==0.7.1
-iso8601==0.1.11
-Jinja2==2.10
-jmespath==0.9.0
-jsonpatch==1.16
-jsonpointer==1.13
-jsonschema==2.6.0
-keystoneauth1==3.4.0;python_version<'3.3'
-keystoneauth1==3.14.0;python_version>'3.3'
-keystonemiddleware==4.17.0
-kombu==4.0.0
-linecache2==1.0.0
-logilab-common==1.4.1
-logutils==0.3.5
-Mako==0.4.0
-MarkupSafe==1.0
-mccabe==0.2.1
-mock==3.0.0
-monotonic==0.6;python_version<'3.3'
-mox3==0.20.0
-msgpack-python==0.4.0
-munch==2.1.0
-netaddr==0.7.18
-netifaces==0.10.4
-networking-sfc==8.0.0.0b1
-neutron-lib==1.25.0;python_version<'3.3'
-neutron-lib==1.29.1;python_version>'3.3'
-openstackdocstheme==1.30.0
-openstacksdk==0.31.2
-os-client-config==1.28.0
-os-service-types==1.7.0
-os-xenapi==0.3.1
-osc-lib==1.8.0
-oslo.cache==1.26.0
-oslo.concurrency==3.26.0
-oslo.config==5.2.0
-oslo.context==2.19.2
-oslo.db==4.37.0
-oslo.i18n==3.15.3
-oslo.log==3.36.0
-oslo.messaging==5.29.0
-oslo.middleware==3.31.0
-oslo.policy==1.30.0
-oslo.privsep==1.32.0
-oslo.reports==1.18.0
-oslo.rootwrap==5.8.0
-oslo.serialization==2.18.0
-oslo.service==1.24.0
-oslo.upgradecheck==0.1.1
-oslo.utils==3.33.0
-oslo.versionedobjects==1.35.1
-oslosphinx==4.7.0
-oslotest==3.2.0
-osprofiler==2.3.0
-os-testr==1.0.0
-ovs==2.8.0
-ovsdbapp==0.12.1
-Paste==2.0.2
-PasteDeploy==1.5.0
-pbr==4.0.0
-pecan==1.3.2
-pika-pool==0.1.3
-pika==0.10.0
-positional==1.2.1
-prettytable==0.7.2
-psutil==3.2.2
-pycadf==1.1.0
-pycodestyle==2.4.0
-pycparser==2.18
-Pygments==2.2.0
-pyinotify==0.9.6
-pylint==2.2.0
-PyMySQL==0.7.6
-pyparsing==2.1.0
-pyperclip==1.5.27
-pyroute2==0.5.3
-python-cinderclient==3.3.0
-python-dateutil==2.5.3
-python-designateclient==2.7.0
-python-editor==1.0.3
-python-glanceclient==2.8.0
-python-keystoneclient==3.8.0
-python-mimeparse==1.6.0
-python-neutronclient==6.7.0
-python-novaclient==9.1.0
-python-subunit==1.0.0
-pytz==2013.6
-PyYAML==3.12
-reno==2.5.0
-repoze.lru==0.7
-requests==2.14.2
-requests-mock==1.2.0
-requestsexceptions==1.2.0
-rfc3986==0.3.1
-Routes==2.3.1
-ryu==4.24
-simplejson==3.5.1
-six==1.10.0
-smmap==0.9.0
-snowballstemmer==1.2.1
-Sphinx==1.6.5
-sphinxcontrib-websupport==1.0.1
-sqlalchemy-migrate==0.11.0
-SQLAlchemy==1.2.0
-sqlparse==0.2.2
-statsd==3.2.1
-stestr==1.0.0
-stevedore==1.20.0
-Tempita==0.5.2
-tenacity==3.2.1
-testrepository==0.0.18
-testresources==2.0.0
-testscenarios==0.4
-testtools==2.2.0
-tinyrpc==0.6
-traceback2==1.4.0
-unittest2==1.1.0
-vine==1.1.4
-waitress==1.1.0
-weakrefmethod==1.0.2
-WebOb==1.8.2
-WebTest==2.0.27
-wrapt==1.7.0
diff --git a/playbooks/tricircle-dsvm-functional/post.yaml b/playbooks/tricircle-dsvm-functional/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/tricircle-dsvm-functional/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/tricircle-dsvm-functional/run.yaml b/playbooks/tricircle-dsvm-functional/run.yaml
deleted file mode 100644
index d9a083be..00000000
--- a/playbooks/tricircle-dsvm-functional/run.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-tricircle-dsvm-functional from old job gate-tricircle-dsvm-functional-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- https://opendev.org \
- openstack/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- cat << 'EOF' >>"/tmp/dg-local.conf"
- [[local|localrc]]
- enable_plugin tricircle https://opendev.org/openstack/tricircle
-
- EOF
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export BRANCH_OVERRIDE=default
- export PROJECTS="openstack/tricircle openstack/neutron openstack/networking-sfc $PROJECTS"
- export LIBS_FROM_GIT="neutron,networking-sfc"
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_TEMPEST=0
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=0
- export DEVSTACK_GATE_TEMPEST_REGEX="tricircle.tempestplugin"
-
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- function pre_test_hook {
- cd /opt/stack/new/tricircle/tricircle/tempestplugin/
- ./pre_test_hook.sh
- }
- export -f pre_test_hook
-
- function post_test_hook {
- cd /opt/stack/new/tricircle/tricircle/tempestplugin/
- ./post_test_hook.sh
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/tricircle-dsvm-multiregion/post.yaml b/playbooks/tricircle-dsvm-multiregion/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/tricircle-dsvm-multiregion/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/tricircle-dsvm-multiregion/run.yaml b/playbooks/tricircle-dsvm-multiregion/run.yaml
deleted file mode 100644
index a766186e..00000000
--- a/playbooks/tricircle-dsvm-multiregion/run.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-- hosts: primary
- name: Autoconverted job legacy-tricircle-dsvm-multiregion from old job gate-tricircle-dsvm-multiregion-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- https://opendev.org \
- openstack/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export PROJECTS="openstack/tricircle $PROJECTS"
- export PROJECTS="openstack/networking-sfc $PROJECTS"
- export DEVSTACK_GATE_CONFIGDRIVE=0
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_USE_PYTHON3=True
- export DEVSTACK_GATE_TEMPEST=0
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=0
- export DEVSTACK_GATE_TEMPEST_REGEX="tricircle.tempestplugin"
-
- # Keep localrc to be able to set some vars in pre_test_hook
- export KEEP_LOCALRC=1
-
- # Enable multinode mode, so that the subnode(the second node)
- # will be configured to run as second region in pre_test_hook.sh
- export DEVSTACK_GATE_TOPOLOGY="multinode"
-
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- function gate_hook {
- bash -xe $BASE/new/tricircle/tricircle/tempestplugin/gate_hook.sh
- }
- export -f gate_hook
-
- function post_test_hook {
- bash -xe $BASE/new/tricircle/tricircle/tempestplugin/post_test_hook.sh
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder
deleted file mode 100644
index e69de29b..00000000
diff --git a/releasenotes/notes/add-lbaas-ebb1009abd3da0dd.yaml b/releasenotes/notes/add-lbaas-ebb1009abd3da0dd.yaml
deleted file mode 100644
index a85c1a19..00000000
--- a/releasenotes/notes/add-lbaas-ebb1009abd3da0dd.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-features:
- - |
- Support LBaaS in multi-region scenario. To enable adding instances as
- members with VIP, amphora routes the traffic sent from VIP to its
- gateway. However, in Tricircle, the gateway obtained from central neutron
- is not the real gateway in local neutron. As a result, only subnet
- without gateway is supported as member subnet. We will remove the
- limitation in the future, and LBaaS working together with Nova Cells V2
- multi-cells will also be supported in the future.
\ No newline at end of file
diff --git a/releasenotes/notes/add-qos-policy-rule-f8f1529d7ad5d888.yaml b/releasenotes/notes/add-qos-policy-rule-f8f1529d7ad5d888.yaml
deleted file mode 100644
index e734f74f..00000000
--- a/releasenotes/notes/add-qos-policy-rule-f8f1529d7ad5d888.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-features:
- - Provide central Neutron QoS plugin and implement QoS driver. Support QoS
- policy creation, update and delete, QoS policy binding with network or
- port.
diff --git a/releasenotes/notes/add-service-function-chaining-fc2cf9a2e8610b91.yaml b/releasenotes/notes/add-service-function-chaining-fc2cf9a2e8610b91.yaml
deleted file mode 100644
index 8b76edf1..00000000
--- a/releasenotes/notes/add-service-function-chaining-fc2cf9a2e8610b91.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-features:
- - |
- Support service function chaining creation and deletion based on networking-sfc,
- currently all the ports in the port chain need to be in the same network and the
- network type must be VxLAN.
diff --git a/releasenotes/notes/add-vlan-aware-vms-afa8c5a906f2ab49.yaml b/releasenotes/notes/add-vlan-aware-vms-afa8c5a906f2ab49.yaml
deleted file mode 100644
index 1fa8828d..00000000
--- a/releasenotes/notes/add-vlan-aware-vms-afa8c5a906f2ab49.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-features:
- - |
- Support VLAN aware VMs
\ No newline at end of file
diff --git a/releasenotes/notes/asynchronous-job-management-api-c16acb43b495af7c.yaml b/releasenotes/notes/asynchronous-job-management-api-c16acb43b495af7c.yaml
deleted file mode 100644
index 7b277c7f..00000000
--- a/releasenotes/notes/asynchronous-job-management-api-c16acb43b495af7c.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-features:
- - |
- Asynchronous job management API allows administrator
- to perform CRUD operations on a job. For jobs in job
- log, only list and show operations are allowed.
-
- * Create a job
- * List jobs
- * Show job details
- * Delete a job
- * Redo a job
diff --git a/releasenotes/notes/combine-bridge-network-c137a03f067c49a7.yaml b/releasenotes/notes/combine-bridge-network-c137a03f067c49a7.yaml
deleted file mode 100644
index 864e4aee..00000000
--- a/releasenotes/notes/combine-bridge-network-c137a03f067c49a7.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-features:
- - North-south bridge network and east-west bridge network are combined into
- one to bring better DVR and shared VxLAN network support.
diff --git a/releasenotes/notes/drop-py-2-7-afccd2f6bec9dfb5.yaml b/releasenotes/notes/drop-py-2-7-afccd2f6bec9dfb5.yaml
deleted file mode 100644
index 5fbccdf3..00000000
--- a/releasenotes/notes/drop-py-2-7-afccd2f6bec9dfb5.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-upgrade:
- - |
- Python 2.7 support has been dropped. Last release of Tricircle
- to support py2.7 is OpenStack Train. The minimum version of Python now
- supported by Tricircle is Python 3.6.
diff --git a/releasenotes/notes/enable-allowed-address-pairs-bca659413012b06c.yaml b/releasenotes/notes/enable-allowed-address-pairs-bca659413012b06c.yaml
deleted file mode 100644
index 8edf3a9c..00000000
--- a/releasenotes/notes/enable-allowed-address-pairs-bca659413012b06c.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-features:
- - |
- Enable allowed-address-pairs in the central plugin.
diff --git a/releasenotes/notes/enable-router-az-and-simplify-net-topology-5ac8739b167e3e4a.yaml b/releasenotes/notes/enable-router-az-and-simplify-net-topology-5ac8739b167e3e4a.yaml
deleted file mode 100644
index d57c067f..00000000
--- a/releasenotes/notes/enable-router-az-and-simplify-net-topology-5ac8739b167e3e4a.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-features:
- - |
- Router
-
- * Support availability zone for router
- * Local router, which will reside only inside one region, can be
- attached with external network directly, no additional intermediate
- router is needed.
diff --git a/releasenotes/notes/enable-update-default-securitygroup-9bb426021926d3e8.yaml b/releasenotes/notes/enable-update-default-securitygroup-9bb426021926d3e8.yaml
deleted file mode 100644
index c122dd74..00000000
--- a/releasenotes/notes/enable-update-default-securitygroup-9bb426021926d3e8.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-features:
- - Support updating default security group using asynchronous methods.
diff --git a/releasenotes/notes/flat-network-8634686c1fede7b2.yaml b/releasenotes/notes/flat-network-8634686c1fede7b2.yaml
deleted file mode 100644
index 20878f2a..00000000
--- a/releasenotes/notes/flat-network-8634686c1fede7b2.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-features:
- - |
- Support flat type of tenant network or external network
diff --git a/releasenotes/notes/initial-release-notes-bd28a4a4bf1f84d2.yaml b/releasenotes/notes/initial-release-notes-bd28a4a4bf1f84d2.yaml
deleted file mode 100644
index 6c7ae437..00000000
--- a/releasenotes/notes/initial-release-notes-bd28a4a4bf1f84d2.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
----
-prelude: >
- The Tricircle is to provide networking automation across
- Neutron in OpenStack multi-region deployment.
-features:
- - |
- Network
-
- * List networks
- * Create network
- * Show network details
- * Delete network
-
- - |
- Subnet
-
- * List subnets
- * Create subnet
- * Show subnet details
- * Delete subnet
-
- - |
- Port
-
- * List ports
- * Create port
- * Show port details
- * Delete port
-
- - |
- Router
-
- * List routers
- * Create router
- * Show router details
- * Delete router
- * Add interface to router
- * Delete interface from router
- * List floating IPs
- * Create floating IP
- * Show floating IP details
- * Update floating IP
- * Delete floating IP
-
- - |
- Security Group
-
- * List security groups
- * Create security group
- * Show security group details
- * List security group rules
- * Create security group rule
- * Delete security group rule
-
- - |
- Note for Networking
-
- * Only Local Network and VLAN network supported.
- Local Network means the network will only present in one region,
- it could be VxLAN or VLAN network.
- VLAN is the only L2 network type which supports cross
- Neutron L2 networking and the bridge network for L3 networking.
- * Pagination and sort are not supported at the same time for list
- operation.
- * For security group rule, remote group is not supported yet. Use IP
- prefix to create security group rule.
- * One availability zone can include more than one region through
- Tricircle pod management.
- * Availability zone or region name for availability zone hint can be
- specified during network creation, that means this network will be
- presented in the specified list of availability zone or region. If no
- availability zone hint is specified and the network is not Local
- Network, then the network can be spread into all regions. For Local
- Network without availability zone hint specified in creation, then
- the network will only be presented in the first region where the
- resource(VM, baremetal or container) is booted and plugged into this
- network.
- * Need to specify one region name as the availability zone hint for
- external network creation, that means the external network will
- be located in the specified region.
-issues:
- - refer to https://bugs.launchpad.net/tricircle
diff --git a/releasenotes/notes/multi-gateway-ns-networking-fbd876c7659a55a9.yaml b/releasenotes/notes/multi-gateway-ns-networking-fbd876c7659a55a9.yaml
deleted file mode 100644
index 35921d6b..00000000
--- a/releasenotes/notes/multi-gateway-ns-networking-fbd876c7659a55a9.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-features:
- - Support network topology that each OpenStack cloud provides external
- network for tenant's north-south traffic and at the same time east-west
- networking of tenant networks among OpenStack clouds is also enabled
diff --git a/releasenotes/notes/network-subnet-update-baed5ded548f7269.yaml b/releasenotes/notes/network-subnet-update-baed5ded548f7269.yaml
deleted file mode 100644
index 7834dc8e..00000000
--- a/releasenotes/notes/network-subnet-update-baed5ded548f7269.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-features:
- - |
- Network
-
- * Update networks
-
- * qos-policy not supported
-
- - |
- Subnet
-
- * Update subnets
-
-issues:
- - |
- Update network or subnet may not lead to the expected result if an
- instance is being booted at the same time. You can redo the update
- operation later to make it execute correctly.
-
-
diff --git a/releasenotes/notes/port-base-update-6668b76c2346633c.yaml b/releasenotes/notes/port-base-update-6668b76c2346633c.yaml
deleted file mode 100644
index 3a609d0c..00000000
--- a/releasenotes/notes/port-base-update-6668b76c2346633c.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-features:
- - |
- Port
-
- * Update port
-
- * name, description, admin_state_up, extra_dhcp_opts, device_owner,
- device_id, mac_address, security group attribute updates supported
-
-issues:
- - |
- Update port may not lead to the expected result if an instance is being
- booted at the same time. You can redo the update operation later to make
- it execute correctly.
\ No newline at end of file
diff --git a/releasenotes/notes/resource-routing-operation-649eb810911312ec.yaml b/releasenotes/notes/resource-routing-operation-649eb810911312ec.yaml
deleted file mode 100644
index 164c240a..00000000
--- a/releasenotes/notes/resource-routing-operation-649eb810911312ec.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-features:
- - |
- Resource routing APIs add operations on resource routing
- table. This makes it possible to create, show, delete
- and update the resource routing entry in the resource
- routing by cloud administrator for the maintenance and
- emergency fix need. But the update and delete operations
- on the entry generated by the Tricircle itself is not
- proposed, because central Neutron may make wrong
- judgement on whether the resource exists or not
- without this routing entry. Moreover, related request
- can not be forwarded to the proper local Neutron
- either. So even though the update and delete operations
- are provided, they are better not to be used in case of
- causing unexpected problems.
-
- * List resource routings
- * Create resource routing
- * Show resource routing details
- * Delete resource routing
- * Update resource routing
diff --git a/releasenotes/notes/support-pagination-for-async-job-81728e9cb7aef731.yaml b/releasenotes/notes/support-pagination-for-async-job-81728e9cb7aef731.yaml
deleted file mode 100644
index 9d7e4e8e..00000000
--- a/releasenotes/notes/support-pagination-for-async-job-81728e9cb7aef731.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-features:
- - |
- Support pagination for asynchronous job list operation. Jobs in job table
- will be shown ahead of those in job log table. If page size is not specified
- from client, then maximum pagination limit from configuration will be used.
-
diff --git a/releasenotes/notes/support-pagination-for-resource-routing-list-13bcb0f1897dedf8.yaml b/releasenotes/notes/support-pagination-for-resource-routing-list-13bcb0f1897dedf8.yaml
deleted file mode 100644
index 672133f1..00000000
--- a/releasenotes/notes/support-pagination-for-resource-routing-list-13bcb0f1897dedf8.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-features:
- - |
- Support pagination for resource routing list operation. If page size is
- not specified from client, then maximum pagination limit from
- configuration will be used.
diff --git a/releasenotes/notes/support-wsgi-deployment-21eb19bcb04932f0.yaml b/releasenotes/notes/support-wsgi-deployment-21eb19bcb04932f0.yaml
deleted file mode 100644
index fa390352..00000000
--- a/releasenotes/notes/support-wsgi-deployment-21eb19bcb04932f0.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-prelude: >
- Tricircle Admin API now supports WSGI deployment. The endpoint of
- Tricircle Admin API could be accessed via the format of
- http://host/tricircle, and no need to expose special port, thus
- reduce the risk of port management.
diff --git a/releasenotes/notes/tricircle-status-upgrade-check-framework-13ee546a958d17f7.yaml b/releasenotes/notes/tricircle-status-upgrade-check-framework-13ee546a958d17f7.yaml
deleted file mode 100644
index 4ad407e1..00000000
--- a/releasenotes/notes/tricircle-status-upgrade-check-framework-13ee546a958d17f7.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-prelude: >
- Added new tool ``tricircle-status upgrade check``.
-features:
- - |
- New framework for ``tricircle-status upgrade check`` command is added.
- This framework allows adding various checks which can be run before a
- Tricircle upgrade to ensure if the upgrade can be performed safely.
-upgrade:
- - |
- Operator can now use new CLI tool ``tricircle-status upgrade check``
- to check if Tricircle deployment can be safely upgraded from
- N-1 to N release.
diff --git a/releasenotes/notes/vxlan-network-2a21433b4b691f72.yaml b/releasenotes/notes/vxlan-network-2a21433b4b691f72.yaml
deleted file mode 100644
index 1d76e74e..00000000
--- a/releasenotes/notes/vxlan-network-2a21433b4b691f72.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-features:
- - |
- Support VxLAN network type for tenant network and bridge network to be
- stretched into multiple OpenStack clouds
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
deleted file mode 100644
index e69de29b..00000000
diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder
deleted file mode 100644
index e69de29b..00000000
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
deleted file mode 100644
index 0f80729e..00000000
--- a/releasenotes/source/conf.py
+++ /dev/null
@@ -1,279 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Glance Release Notes documentation build configuration file, created by
-# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-# sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
- 'openstackdocstheme',
- 'reno.sphinxext',
-]
-
-# openstackdocstheme options
-repository_name = 'openstack/tricircle'
-bug_project = 'tricircle'
-bug_tag = ''
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-# source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'The Tricircle Release Notes'
-copyright = u'2016, OpenStack Foundation'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-# The full version, including alpha/beta/rc tags.
-release = ''
-# The short X.Y version.
-version = ''
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-# language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-# today = ''
-# Else, today_fmt is used as the format for a strftime call.
-# today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-# add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-# show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-# modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-# keep_warnings = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'openstackdocs'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-# html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# " v documentation".
-# html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-# html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-# html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-# html_extra_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-# html_domain_indices = True
-
-# If false, no index is generated.
-# html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-# html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-# html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-# html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-# html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'GlanceReleaseNotesdoc'
-
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
- u'Glance Developers', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-# latex_use_parts = False
-
-# If true, show page references after internal links.
-# latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-# latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-# latex_appendices = []
-
-# If false, no module index is generated.
-# latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
- [u'Glance Developers'], 1)
-]
-
-# If true, show URL addresses after external links.
-# man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
- u'Glance Developers', 'GlanceReleaseNotes',
- 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-# texinfo_no_detailmenu = False
-
-# -- Options for Internationalization output ------------------------------
-locale_dirs = ['locale/']
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
deleted file mode 100644
index 7dd2c374..00000000
--- a/releasenotes/source/index.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-..
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-========================
- Tricircle Release Notes
-========================
-
-.. toctree::
- :maxdepth: 1
-
- unreleased
- ussuri
- train
- stein
- rocky
- queens
- pike
- ocata
diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst
deleted file mode 100644
index ebe62f42..00000000
--- a/releasenotes/source/ocata.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===================================
- Ocata Series Release Notes
-===================================
-
-.. release-notes::
- :branch: origin/stable/ocata
diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst
deleted file mode 100644
index e43bfc0c..00000000
--- a/releasenotes/source/pike.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===================================
- Pike Series Release Notes
-===================================
-
-.. release-notes::
- :branch: stable/pike
diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst
deleted file mode 100644
index 36ac6160..00000000
--- a/releasenotes/source/queens.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===================================
- Queens Series Release Notes
-===================================
-
-.. release-notes::
- :branch: stable/queens
diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst
deleted file mode 100644
index 40dd517b..00000000
--- a/releasenotes/source/rocky.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===================================
- Rocky Series Release Notes
-===================================
-
-.. release-notes::
- :branch: stable/rocky
diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst
deleted file mode 100644
index efaceb66..00000000
--- a/releasenotes/source/stein.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===================================
- Stein Series Release Notes
-===================================
-
-.. release-notes::
- :branch: stable/stein
diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst
deleted file mode 100644
index 58390039..00000000
--- a/releasenotes/source/train.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-==========================
-Train Series Release Notes
-==========================
-
-.. release-notes::
- :branch: stable/train
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
deleted file mode 100644
index cd22aabc..00000000
--- a/releasenotes/source/unreleased.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-==============================
- Current Series Release Notes
-==============================
-
-.. release-notes::
diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst
deleted file mode 100644
index e21e50e0..00000000
--- a/releasenotes/source/ussuri.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===========================
-Ussuri Series Release Notes
-===========================
-
-.. release-notes::
- :branch: stable/ussuri
diff --git a/reno.yaml b/reno.yaml
deleted file mode 100644
index 2ce04486..00000000
--- a/reno.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-closed_branch_tag_re: "(.+)(?=4.0.0 # Apache-2.0
-Babel!=2.4.0,>=2.3.4 # BSD
-
-Paste>=2.0.2 # MIT
-PasteDeploy>=1.5.0 # MIT
-Routes>=2.3.1 # MIT
-debtcollector>=1.19.0 # Apache-2.0
-eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
-pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.3.2 # BSD
-requests>=2.14.2 # Apache-2.0
-Jinja2>=2.10 # BSD License (3 clause)
-keystonemiddleware>=4.17.0 # Apache-2.0
-netaddr>=0.7.18 # BSD
-netifaces>=0.10.4 # MIT
-SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.2.0 # MIT
-WebOb>=1.8.2 # MIT
-python-cinderclient>=3.3.0 # Apache-2.0
-python-glanceclient>=2.8.0 # Apache-2.0
-python-keystoneclient>=3.8.0 # Apache-2.0
-python-neutronclient>=6.7.0 # Apache-2.0
-python-novaclient>=9.1.0 # Apache-2.0
-alembic>=0.8.10 # MIT
-six>=1.10.0 # MIT
-stevedore>=1.20.0 # Apache-2.0
-oslo.concurrency>=3.26.0 # Apache-2.0
-oslo.config>=5.2.0 # Apache-2.0
-oslo.context>=2.19.2 # Apache-2.0
-oslo.db>=4.37.0 # Apache-2.0
-oslo.i18n>=3.15.3 # Apache-2.0
-oslo.log>=3.36.0 # Apache-2.0
-oslo.messaging>=5.29.0 # Apache-2.0
-oslo.middleware>=3.31.0 # Apache-2.0
-oslo.policy>=1.30.0 # Apache-2.0
-oslo.rootwrap>=5.8.0 # Apache-2.0
-oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
-oslo.upgradecheck>=0.1.1 # Apache-2.0
-oslo.utils>=3.33.0 # Apache-2.0
-sqlalchemy-migrate>=0.11.0 # Apache-2.0
-
-# These repos are installed from git in OpenStack CI if the job
-# configures them as required-projects:
-#keystoneauth1>=3.4.0;python_version<'3.3' # Apache-2.0
-#neutron-lib>=1.29.1;python_version>'3.3' # Apache-2.0
-neutron>=12.0.0 # Apache-2.0
-networking-sfc>=8.0.0.0b1 # Apache-2.0
-
-# The comment below indicates this project repo is current with neutron-lib
-# and should receive neutron-lib consumption patches as they are released
-# in neutron-lib. It also implies the project will stay current with TC
-# and infra initiatives ensuring consumption patches can land.
-# neutron-lib-current
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index e48811db..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,74 +0,0 @@
-[metadata]
-name = tricircle
-summary = The Tricircle is to provide networking automation across Neutron in multi-region OpenStack deployments.
-description-file = README.rst
-author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://docs.openstack.org/tricircle/latest/
-classifier =
- Environment :: OpenStack
- Intended Audience :: Information Technology
- Intended Audience :: System Administrators
- License :: OSI Approved :: Apache Software License
- Operating System :: POSIX :: Linux
- Programming Language :: Python
- Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
-
-[files]
-packages =
- tricircle
-
-[build_sphinx]
-source-dir = doc/source
-build-dir = doc/build
-all_files = 1
-warning-is-error = 1
-
-[upload_sphinx]
-upload-dir = doc/build/html
-
-[compile_catalog]
-directory = tricircle/locale
-domain = tricircle
-
-[update_catalog]
-domain = tricircle
-output_dir = tricircle/locale
-input_file = tricircle/locale/tricircle.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = tricircle/locale/tricircle.pot
-
-[entry_points]
-console_scripts =
- tricircle-api = tricircle.cmd.api:main
- tricircle-db-manage = tricircle.cmd.manage:main
- tricircle-status = tricircle.cmd.status:main
- tricircle-xjob = tricircle.cmd.xjob:main
-wsgi_scripts =
- tricircle-api-wsgi = tricircle.api.wsgi:init_application
-oslo.config.opts =
- tricircle.api = tricircle.api.opts:list_opts
- tricircle.common = tricircle.common.opts:list_opts
- tricircle.db = tricircle.db.opts:list_opts
- tricircle.network = tricircle.network.opts:list_opts
- tricircle.xjob = tricircle.xjob.opts:list_opts
-oslo.policy.policies =
- tricircle = tricircle.common.policy:list_policies
-tricircle.network.type_drivers =
- local = tricircle.network.drivers.type_local:LocalTypeDriver
- vlan = tricircle.network.drivers.type_vlan:VLANTypeDriver
- vxlan = tricircle.network.drivers.type_vxlan:VxLANTypeDriver
- flat = tricircle.network.drivers.type_flat:FlatTypeDriver
-tricircle.network.extension_drivers =
- qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver
-networking_sfc.flowclassifier.drivers =
- tricircle_fc = tricircle.network.central_fc_driver:TricircleFcDriver
-networking_sfc.sfc.drivers =
- tricircle_sfc = tricircle.network.central_sfc_driver:TricircleSfcDriver
-networking_trunk.trunk.drivers =
- tricircle_tk = tricircle.network.central_trunk_driver:TricircleTrunkDriver
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 566d8443..00000000
--- a/setup.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
-setuptools.setup(
- setup_requires=['pbr>=2.0.0'],
- pbr=True)
diff --git a/specs/newton/cross-neutron-l2-networking.rst b/specs/newton/cross-neutron-l2-networking.rst
deleted file mode 100644
index df393a73..00000000
--- a/specs/newton/cross-neutron-l2-networking.rst
+++ /dev/null
@@ -1,564 +0,0 @@
-========================================
-Cross Neutron L2 networking in Tricircle
-========================================
-
-Background
-==========
-The Tricircle provides unified OpenStack API gateway and networking automation
-functionality. Those main functionalities allow cloud operators to manage
-multiple OpenStack instances which are running in one site or multiple sites
-as a single OpenStack cloud.
-
-Each bottom OpenStack instance which is managed by the Tricircle is also called
-a pod.
-
-The Tricircle has the following components:
-
-* Nova API-GW
-* Cinder API-GW
-* Neutron API Server with Neutron Tricircle plugin
-* Admin API
-* XJob
-* DB
-
-Nova API-GW provides the functionality to trigger automatic networking creation
-when new VMs are being provisioned. Neutron Tricircle plug-in is the
-functionality to create cross Neutron L2/L3 networking for new VMs. After the
-binding of tenant-id and pod finished in the Tricircle, Cinder API-GW and Nova
-API-GW will pass the cinder api or nova api request to appropriate bottom
-OpenStack instance.
-
-Please refer to the Tricircle design blueprint[1], especially from
-'7. Stateless Architecture Proposal' for the detail description of each
-components.
-
-
-Problem Description
-===================
-When a user wants to create a network in Neutron API Server, the user can
-specify the 'availability_zone_hints'(AZ or az will be used for short for
-availability zone) during network creation[5], in the Tricircle, the
-'az_hints' means which AZ the network should be spread into. The 'az_hints'
-meaning in Tricircle is a little different from the 'az_hints' meaning in
-Neutron[5]. If no 'az_hints' was specified during network creation, this created
-network will be spread into any AZ. If there is a list of 'az_hints' during the
-network creation, that means the network should be able to be spread into these
-AZs which are suggested by a list of 'az_hints'.
-
-When a user creates VM or Volume, there is also one parameter called
-availability zone. The AZ parameter is used for Volume and VM co-location, so
-that the Volume and VM will be created into same bottom OpenStack instance.
-
-When a VM is being attached to a network, the Tricircle will check whether a
-VM's AZ is inside in the network's AZs scope. If a VM is not in the network's
-AZs scope, the VM creation will be rejected.
-
-Currently, the Tricircle only supports one pod in one AZ. And only supports a
-network associated with one AZ. That means currently a tenant's network will
-be presented only in one bottom OpenStack instance, that also means all VMs
-connected to the network will be located at one bottom OpenStack instance.
-If there are more than one pod in one AZ, refer to the dynamic pod binding[6].
-
-There are lots of use cases where a tenant needs a network being able to be
-spread out into multiple bottom OpenStack instances in one AZ or multiple AZs.
-
-* Capacity expansion: tenants add VMs more and more, the capacity of one
- OpenStack may not be enough, then a new OpenStack instance has to be added
- to the cloud. But the tenant still wants to add new VMs into same network.
-
-* Cross Neutron network service chaining. Service chaining is based on
- the port-pairs. Leveraging the cross Neutron L2 networking capability which
- is provided by the Tricircle, the chaining could also be done by across sites.
- For example, vRouter1 in pod1, but vRouter2 in pod2, these two VMs could be
- chained.
-
-* Applications are often required to run in different availability zones to
- achieve high availability. Application needs to be designed as
- Active-Standby/Active-Active/N-Way to achieve high availability, and some
- components inside one application are designed to work as distributed
- cluster, this design typically leads to state replication or heart
- beat among application components (directly or via replicated database
- services, or via private designed message format). When this kind of
- applications are distributedly deployed into multiple OpenStack instances,
- cross Neutron L2 networking is needed to support heart beat
- or state replication.
-
-* When a tenant's VMs are provisioned in different OpenStack instances, there
- is E-W (East-West) traffic for these VMs, the E-W traffic should be only
- visible to the tenant, and isolation is needed. If the traffic goes through
- N-S (North-South) via tenant level VPN, overhead is too much, and the
- orchestration for multiple site to site VPN connection is also complicated.
- Therefore cross Neutron L2 networking to bridge the tenant's routers in
- different Neutron servers can provide more light weight isolation.
-
-* In hybrid cloud, there is cross L2 networking requirement between the
- private OpenStack and the public OpenStack. Cross Neutron L2 networking will
- help the VMs migration in this case and it's not necessary to change the
- IP/MAC/Security Group configuration during VM migration.
-
-The spec[5] is to explain how one AZ can support more than one pod, and how
-to schedule a proper pod during VM or Volume creation.
-
-And this spec is to deal with the cross Neutron L2 networking automation in
-the Tricircle.
-
-The simplest way to spread out L2 networking to multiple OpenStack instances
-is to use same VLAN. But there is a lot of limitations: (1) A number of VLAN
-segment is limited, (2) the VLAN network itself is not good to spread out
-multiple sites, although you can use some gateways to do the same thing.
-
-So flexible tenant level L2 networking across multiple Neutron servers in
-one site or in multiple sites is needed.
-
-Proposed Change
-===============
-
-Cross Neutron L2 networking can be divided into three categories,
-``VLAN``, ``Shared VxLAN`` and ``Mixed VLAN/VxLAN``.
-
-* VLAN
-
- Network in each bottom OpenStack is VLAN type and has the same VLAN ID.
- If we want VLAN L2 networking to work in multi-site scenario, i.e.,
- Multiple OpenStack instances in multiple sites, physical gateway needs to
- be manually configured to make one VLAN networking be extended to other
- sites.
-
- *Manual setup physical gateway is out of the scope of this spec*
-
-* Shared VxLAN
-
- Network in each bottom OpenStack instance is VxLAN type and has the same
- VxLAN ID.
-
- Leverage L2GW[2][3] to implement this type of L2 networking.
-
-* Mixed VLAN/VxLAN
-
- Network in each bottom OpenStack instance may have different types and/or
- have different segment IDs.
-
- Leverage L2GW[2][3] to implement this type of L2 networking.
-
-There is another network type called “Local Network”. For “Local Network”,
-the network will be only presented in one bottom OpenStack instance. And the
-network won't be presented in different bottom OpenStack instances. If a VM
-in another pod tries to attach to the “Local Network”, it should be failed.
-This use case is quite useful for the scenario in which cross Neutron L2
-networking is not required, and one AZ will not include more than bottom
-OpenStack instance.
-
-Cross Neutron L2 networking will be able to be established dynamically during
-tenant's VM is being provisioned.
-
-There is assumption here that only one type of L2 networking will work in one
-cloud deployment.
-
-
-A Cross Neutron L2 Networking Creation
---------------------------------------
-
-A cross Neutron L2 networking creation will be able to be done with the az_hint
-attribute of the network. If az_hint includes one AZ or more AZs, the network
-will be presented only in this AZ or these AZs, if no AZ in az_hint, it means
-that the network can be extended to any bottom OpenStack.
-
-There is a special use case for external network creation. For external
-network creation, you need to specify the pod_id but not AZ in the az_hint
-so that the external network will be only created in one specified pod per AZ.
-
- *Support of External network in multiple OpenStack instances in one AZ
- is out of scope of this spec.*
-
-Pluggable L2 networking framework is proposed to deal with three types of
-L2 cross Neutron networking, and it should be compatible with the
-``Local Network``.
-
-1. Type Driver under Tricircle Plugin in Neutron API server
-
-* Type driver to distinguish different type of cross Neutron L2 networking. So
- the Tricircle plugin need to load type driver according to the configuration.
- The Tricircle can reuse the type driver of ML2 with update.
-
-* Type driver to allocate VLAN segment id for VLAN L2 networking.
-
-* Type driver to allocate VxLAN segment id for shared VxLAN L2 networking.
-
-* Type driver for mixed VLAN/VxLAN to allocate VxLAN segment id for the
- network connecting L2GWs[2][3].
-
-* Type driver for Local Network only updating ``network_type`` for the
- network to the Tricircle Neutron DB.
-
-When a network creation request is received in Neutron API Server in the
-Tricircle, the type driver will be called based on the configured network
-type.
-
-2. Nova API-GW to trigger the bottom networking automation
-
-Nova API-GW can be aware of when a new VM is provisioned if boot VM api request
-is received, therefore Nova API-GW is responsible for the network creation in
-the bottom OpenStack instances.
-
-Nova API-GW needs to get the network type from Neutron API server in the
-Tricircle, and deal with the networking automation based on the network type:
-
-* VLAN
- Nova API-GW creates network in bottom OpenStack instance in which the VM will
- run with the VLAN segment id, network name and type that are retrieved from
- the Neutron API server in the Tricircle.
-
-* Shared VxLAN
- Nova API-GW creates network in bottom OpenStack instance in which the VM will
- run with the VxLAN segment id, network name and type which are retrieved from
- Tricricle Neutron API server. After the network in the bottom OpenStack
- instance is created successfully, Nova API-GW needs to make this network in the
- bottom OpenStack instance as one of the segments in the network in the Tricircle.
-
-* Mixed VLAN/VxLAN
- Nova API-GW creates network in different bottom OpenStack instance in which the
- VM will run with the VLAN or VxLAN segment id respectively, network name and type
- which are retrieved from Tricricle Neutron API server. After the network in the
- bottom OpenStack instances is created successfully, Nova API-GW needs to update
- network in the Tricircle with the segmentation information of bottom netwoks.
-
-3. L2GW driver under Tricircle Plugin in Neutron API server
-
-Tricircle plugin needs to support multi-segment network extension[4].
-
-For Shared VxLAN or Mixed VLAN/VxLAN L2 network type, L2GW driver will utilize the
-multi-segment network extension in Neutron API server to build the L2 network in the
-Tricircle. Each network in the bottom OpenStack instance will be a segment for the
-whole cross Neutron L2 networking in the Tricircle.
-
-After the network in the bottom OpenStack instance was created successfully, Nova
-API-GW will call Neutron server API to update the network in the Tricircle with a
-new segment from the network in the bottom OpenStack instance.
-
-If the network in the bottom OpenStack instance was removed successfully, Nova
-API-GW will call Neutron server api to remove the segment in the bottom OpenStack
-instance from network in the Tricircle.
-
-When L2GW driver under Tricircle plugin in Neutron API server receives the
-segment update request, L2GW driver will start async job to orchestrate L2GW API
-for L2 networking automation[2][3].
-
-
-Data model impact
------------------
-
-In database, we are considering setting physical_network in top OpenStack instance
-as ``bottom_physical_network#bottom_pod_id`` to distinguish segmentation information
-in different bottom OpenStack instance.
-
-REST API impact
----------------
-
-None
-
-Security impact
----------------
-
-None
-
-Notifications impact
---------------------
-
-None
-
-Other end user impact
----------------------
-
-None
-
-Performance Impact
-------------------
-
-None
-
-Other deployer impact
----------------------
-
-None
-
-Developer impact
-----------------
-
-None
-
-
-Implementation
-==============
-
-**Local Network Implementation**
-
-For Local Network, L2GW is not required. In this scenario, no cross Neutron L2/L3
-networking is required.
-
-A user creates network ``Net1`` with single AZ1 in az_hint, the Tricircle plugin
-checks the configuration, if ``tenant_network_type`` equals ``local_network``,
-it will invoke Local Network type driver. Local Network driver under the
-Tricircle plugin will update ``network_type`` in database.
-
-For example, a user creates VM1 in AZ1 which has only one pod ``POD1``, and
-connects it to network ``Net1``. ``Nova API-GW`` will send network creation
-request to ``POD1`` and the VM will be booted in AZ1 (There should be only one
-pod in AZ1).
-
-If a user wants to create VM2 in AZ2 or ``POD2`` in AZ1, and connect it to
-network ``Net1`` in the Tricircle, it would be failed. Because the ``Net1`` is
-local_network type network and it is limited to present in ``POD1`` in AZ1 only.
-
-**VLAN Implementation**
-
-For VLAN, L2GW is not required. This is the most simplest cross Neutron
-L2 networking for limited scenario. For example, with a small number of
-networks, all VLANs are extended through physical gateway to support cross
-Neutron VLAN networking, or all Neutron servers under same core switch with same visible
-VLAN ranges that supported by the core switch are connected by the core
-switch.
-
-when a user creates network called ``Net1``, the Tricircle plugin checks the
-configuration. If ``tenant_network_type`` equals ``vlan``, the
-Tricircle will invoke VLAN type driver. VLAN driver will
-create ``segment``, and assign ``network_type`` with VLAN, update
-``segment`` and ``network_type`` and ``physical_network`` with DB
-
-A user creates VM1 in AZ1, and connects it to network Net1. If VM1 will be
-booted in ``POD1``, ``Nova API-GW`` needs to get the network information and
-send network creation message to ``POD1``. Network creation message includes
-``network_type`` and ``segment`` and ``physical_network``.
-
-Then the user creates VM2 in AZ2, and connects it to network Net1. If VM will
-be booted in ``POD2``, ``Nova API-GW`` needs to get the network information and
-send create network message to ``POD2``. Create network message includes
-``network_type`` and ``segment`` and ``physical_network``.
-
-**Shared VxLAN Implementation**
-
-A user creates network ``Net1``, the Tricircle plugin checks the configuration, if
-``tenant_network_type`` equals ``shared_vxlan``, it will invoke shared VxLAN
-driver. Shared VxLAN driver will allocate ``segment``, and assign
-``network_type`` with VxLAN, and update network with ``segment`` and
-``network_type`` with DB
-
-A user creates VM1 in AZ1, and connects it to network ``Net1``. If VM1 will be
-booted in ``POD1``, ``Nova API-GW`` needs to get the network information and send
-create network message to ``POD1``, create network message includes
-``network_type`` and ``segment``.
-
-``Nova API-GW`` should update ``Net1`` in Tricircle with the segment information
-got by ``POD1``.
-
-Then the user creates VM2 in AZ2, and connects it to network ``Net1``. If VM2 will
-be booted in ``POD2``, ``Nova API-GW`` needs to get the network information and
-send network creation massage to ``POD2``, network creation message includes
-``network_type`` and ``segment``.
-
-``Nova API-GW`` should update ``Net1`` in the Tricircle with the segment information
-get by ``POD2``.
-
-The Tricircle plugin detects that the network includes more than one segment
-network, calls L2GW driver to start async job for cross Neutron networking for
-``Net1``. The L2GW driver will create L2GW1 in ``POD1`` and L2GW2 in ``POD2``. In
-``POD1``, L2GW1 will connect the local ``Net1`` and create L2GW remote connection
-to L2GW2, then populate the information of MAC/IP which resides in L2GW1. In
-``POD2``, L2GW2 will connect the local ``Net1`` and create L2GW remote connection
-to L2GW1, then populate remote MAC/IP information which resides in ``POD1`` in L2GW2.
-
-L2GW driver in the Tricircle will also detect the new port creation/deletion API
-request. If port (MAC/IP) created or deleted in ``POD1`` or ``POD2``, it needs to
-refresh the L2GW2 MAC/IP information.
-
-Whether to populate the information of port (MAC/IP) should be configurable according
-to L2GW capability. And only populate MAC/IP information for the ports that are not
-resides in the same pod.
-
-**Mixed VLAN/VxLAN**
-
-To achieve cross Neutron L2 networking, L2GW will be used to connect L2 network
-in different Neutron servers, using L2GW should work for Shared VxLAN and Mixed VLAN/VxLAN
-scenario.
-
-When L2GW connected with local network in the same OpenStack instance, no
-matter it's VLAN or VxLAN or GRE, the L2GW should be able to connect the
-local network, and because L2GW is extension of Neutron, only network
-UUID should be enough for L2GW to connect the local network.
-
-When admin user creates network in Tricircle, he/she specifies the network
-type as one of the network type as discussed above. In the phase of creating
-network in Tricircle, only one record is saved in the database, no network
-will be created in bottom OpenStack.
-
-After the network in the bottom created successfully, need to retrieve the
-network information like segment id, network name and network type, and make
-this network in the bottom pod as one of the segments in the network in
-Tricircle.
-
-In the Tricircle, network could be created by tenant or admin. For tenant, no way
-to specify the network type and segment id, then default network type will
-be used instead. When user uses the network to boot a VM, ``Nova API-GW``
-checks the network type. For Mixed VLAN/VxLAN network, ``Nova API-GW`` first
-creates network in bottom OpenStack without specifying network type and segment
-ID, then updates the top network with bottom network segmentation information
-returned by bottom OpenStack.
-
-A user creates network ``Net1``, plugin checks the configuration, if
-``tenant_network_type`` equals ``mixed_vlan_vxlan``, it will invoke mixed VLAN
-and VxLAN driver. The driver needs to do nothing since segment is allocated
-in bottom.
-
-A user creates VM1 in AZ1, and connects it to the network ``Net1``, the VM is
-booted in bottom ``POD1``, and ``Nova API-GW`` creates network in ``POD1`` and
-queries the network detail segmentation information (using admin role), and
-gets network type, segment id, then updates this new segment to the ``Net1``
-in Tricircle ``Neutron API Server``.
-
-Then the user creates another VM2, and with AZ info AZ2, then the VM should be
-able to be booted in bottom ``POD2`` which is located in AZ2. And when VM2 should
-be able to be booted in AZ2, ``Nova API-GW`` also creates a network in ``POD2``,
-and queries the network information including segment and network type,
-updates this new segment to the ``Net1`` in Tricircle ``Neutron API Server``.
-
-The Tricircle plugin detects that the ``Net1`` includes more than one network
-segments, calls L2GW driver to start async job for cross Neutron networking for
-``Net1``. The L2GW driver will create L2GW1 in ``POD1`` and L2GW2 in ``POD2``. In
-``POD1``, L2GW1 will connect the local ``Net1`` and create L2GW remote connection
-to L2GW2, then populate information of MAC/IP which resides in ``POD2`` in L2GW1.
-In ``POD2``, L2GW2 will connect the local ``Net1`` and create L2GW remote connection
-to L2GW1, then populate remote MAC/IP information which resides in ``POD1`` in L2GW2.
-
-L2GW driver in Tricircle will also detect the new port creation/deletion api
-calling, if port (MAC/IP) created or deleted in ``POD1``, then needs to refresh
-the L2GW2 MAC/IP information. If port (MAC/IP) created or deleted in ``POD2``,
-then needs to refresh the L2GW1 MAC/IP information,
-
-Whether to populate MAC/IP information should be configurable according to
-L2GW capability. And only populate MAC/IP information for the ports that are
-not resides in the same pod.
-
-**L3 bridge network**
-
-Current implementation without cross Neutron L2 networking.
-
-* A special bridge network is created and connected to the routers in
- different bottom OpenStack instances. We configure the extra routes of the routers
- to route the packets from one OpenStack to another. In current
- implementation, we create this special bridge network in each bottom
- OpenStack with the same ``VLAN ID``, so we have an L2 network to connect
- the routers.
-
-Difference between L2 networking for tenant's VM and for L3 bridging network.
-
-* The creation of bridge network is triggered during attaching router
- interface and adding router external gateway.
-
-* The L2 network for VM is triggered by ``Nova API-GW`` when a VM is to be
- created in one pod, and finds that there is no network, then the network
- will be created before the VM is booted, network or port parameter is
- required to boot VM. The IP/Mac for VM is allocated in the ``Tricircle``,
- top layer to avoid IP/mac collision if they are allocated separately in
- bottom pods.
-
-After cross Neutron L2 networking is introduced, the L3 bridge network should
-be updated too.
-
-L3 bridge network N-S (North-South):
-
-* For each tenant, one cross Neutron N-S bridge network should be created for
- router N-S inter-connection. Just replace the current VLAN N-S bridge network
- to corresponding Shared VxLAN or Mixed VLAN/VxLAN.
-
-L3 bridge network E-W (East-West):
-
-* When attaching router interface happened, for VLAN, it will keep
- current process to establish E-W bridge network. For Shared VxLAN and Mixed
- VLAN/VxLAN, if a L2 network is able to expand to the current pod, then just
- expand the L2 network to the pod, all E-W traffic will go out from local L2
- network, then no bridge network is needed.
-
-* For example, (Net1, Router1) in ``Pod1``, (Net2, Router1) in ``Pod2``, if
- ``Net1`` is a cross Neutron L2 network, and can be expanded to Pod2, then
- will just expand ``Net1`` to Pod2. After the ``Net1`` expansion ( just like
- cross Neutron L2 networking to spread one network in multiple Neutron servers ), it'll
- look like (Net1, Router1) in ``Pod1``, (Net1, Net2, Router1) in ``Pod2``, In
- ``Pod2``, no VM in ``Net1``, only for E-W traffic. Now the E-W traffic will
- look like this:
-
-from Net2 to Net1:
-
-Net2 in Pod2 -> Router1 in Pod2 -> Net1 in Pod2 -> L2GW in Pod2 ---> L2GW in
-Pod1 -> Net1 in Pod1.
-
-Note: The traffic for ``Net1`` in ``Pod2`` to ``Net1`` in ``Pod1`` can bypass the L2GW in
-``Pod2``, that means outbound traffic can bypass the local L2GW if the remote VTEP of
-L2GW is known to the local compute node and the packet from the local compute
-node with VxLAN encapsulation cloud be routed to remote L2GW directly. It's up
-to the L2GW implementation. With the inbound traffic through L2GW, the inbound
-traffic to the VM will not be impacted by the VM migration from one host to
-another.
-
-If ``Net2`` is a cross Neutron L2 network, and can be expanded to ``Pod1`` too,
-then will just expand ``Net2`` to ``Pod1``. After the ``Net2`` expansion(just
-like cross Neutron L2 networking to spread one network in multiple Neutron servers ), it'll
-look like (Net2, Net1, Router1) in ``Pod1``, (Net1, Net2, Router1) in ``Pod2``,
-In ``Pod1``, no VM in Net2, only for E-W traffic. Now the E-W traffic will look
-like this: from ``Net1`` to ``Net2``:
-
-Net1 in Pod1 -> Router1 in Pod1 -> Net2 in Pod1 -> L2GW in Pod1 ---> L2GW in
-Pod2 -> Net2 in Pod2.
-
-To limit the complexity, one network's az_hint can only be specified when
-creating, and no update is allowed, if az_hint need to be updated, you have
-to delete the network and create again.
-
-If the network can't be expanded, then E-W bridge network is needed. For
-example, Net1(AZ1, AZ2,AZ3), Router1; Net2(AZ4, AZ5, AZ6), Router1.
-Then a cross Neutron L2 bridge network has to be established:
-
-Net1(AZ1, AZ2, AZ3), Router1 --> E-W bridge network ---> Router1,
-Net2(AZ4, AZ5, AZ6).
-
-Assignee(s)
-------------
-
-Primary assignee:
-
-
-Other contributors:
-
-
-Work Items
-------------
-
-Dependencies
-============
-
-None
-
-
-Testing
-=======
-
-None
-
-
-Documentation Impact
-====================
-
-None
-
-
-References
-==========
-[1] https://docs.google.com/document/d/18kZZ1snMOCD9IQvUKI5NVDzSASpw-QKj7l2zNqMEd3g/
-
-[2] https://review.opendev.org/#/c/270786/
-
-[3] https://github.com/openstack/networking-l2gw/blob/master/specs/kilo/l2-gateway-api.rst
-
-[4] https://docs.openstack.org/api-ref/network/v2/index.html#networks-multi-provider-ext
-
-[5] https://docs.openstack.org/mitaka/networking-guide/config-az.html
-
-[6] https://review.opendev.org/#/c/306224/
diff --git a/specs/newton/dynamic-pod-binding.rst b/specs/newton/dynamic-pod-binding.rst
deleted file mode 100644
index dc666142..00000000
--- a/specs/newton/dynamic-pod-binding.rst
+++ /dev/null
@@ -1,236 +0,0 @@
-=================================
-Dynamic Pod Binding in Tricircle
-=================================
-
-Background
-===========
-
-Most public cloud infrastructure is built with Availability Zones (AZs).
-Each AZ is consisted of one or more discrete data centers, each with high
-bandwidth and low latency network connection, separate power and facilities.
-These AZs offer cloud tenants the ability to operate production
-applications and databases deployed into multiple AZs are more highly
-available, fault tolerant and scalable than a single data center.
-
-In production clouds, each AZ is built by modularized OpenStack, and each
-OpenStack is one pod. Moreover, one AZ can include multiple pods. Among the
-pods, they are classified into different categories. For example, servers
-in one pod are only for general purposes, and the other pods may be built
-for heavy load CAD modeling with GPU. So pods in one AZ could be divided
-into different groups. Different pod groups for different purposes, and
-the VM's cost and performance are also different.
-
-The concept "pod" is created for the Tricircle to facilitate managing
-OpenStack instances among AZs, which therefore is transparent to cloud
-tenants. The Tricircle maintains and manages a pod binding table which
-records the mapping relationship between a cloud tenant and pods. When the
-cloud tenant creates a VM or a volume, the Tricircle tries to assign a pod
-based on the pod binding table.
-
-Motivation
-===========
-
-In resource allocation scenario, when a tenant creates a VM in one pod and a
-new volume in a another pod respectively. If the tenant attempt to attach the
-volume to the VM, the operation will fail. In other words, the volume should
-be in the same pod where the VM is, otherwise the volume and VM would not be
-able to finish the attachment. Hence, the Tricircle needs to ensure the pod
-binding so as to guarantee that VM and volume are created in one pod.
-
-In capacity expansion scenario, when resources in one pod are exhausted,
-then a new pod with the same type should be added into the AZ. Therefore,
-new resources of this type should be provisioned in the new added pod, which
-requires dynamical change of pod binding. The pod binding could be done
-dynamically by the Tricircle, or by admin through admin api for maintenance
-purpose. For example, for maintenance(upgrade, repairement) window, all
-new provision requests should be forwarded to the running one, but not
-the one under maintenance.
-
-Solution: dynamic pod binding
-==============================
-
-It's quite headache for capacity expansion inside one pod, you have to
-estimate, calculate, monitor, simulate, test, and do online grey expansion
-for controller nodes and network nodes whenever you add new machines to the
-pod. It's quite big challenge as more and more resources added to one pod,
-and at last you will reach limitation of one OpenStack. If this pod's
-resources exhausted or reach the limit for new resources provisioning, the
-Tricircle needs to bind tenant to a new pod instead of expanding the current
-pod unlimitedly. The Tricircle needs to select a proper pod and stay binding
-for a duration, in this duration VM and volume will be created for one tenant
-in the same pod.
-
-For example, suppose we have two groups of pods, and each group has 3 pods,
-i.e.,
-
-GroupA(Pod1, Pod2, Pod3) for general purpose VM,
-
-GroupB(Pod4, Pod5, Pod6) for CAD modeling.
-
-Tenant1 is bound to Pod1, Pod4 during the first phase for several months.
-In the first phase, we can just add weight in Pod, for example, Pod1, weight 1,
-Pod2, weight2, this could be done by adding one new field in pod table, or no
-field at all, just link them by the order created in the Tricircle. In this
-case, we use the pod creation time as the weight.
-
-If the tenant wants to allocate VM/volume for general VM, Pod1 should be
-selected. It can be implemented with flavor or volume type metadata. For
-general VM/Volume, there is no special tag in flavor or volume type metadata.
-
-If the tenant wants to allocate VM/volume for CAD modeling VM, Pod4 should be
-selected. For CAD modeling VM/Volume, a special tag "resource: CAD Modeling"
-in flavor or volume type metadata determines the binding.
-
-When it is detected that there is no more resources in Pod1, Pod4. Based on
-the resource_affinity_tag, the Tricircle queries the pod table for available
-pods which provision a specific type of resources. The field resource_affinity
-is a key-value pair. The pods will be selected when there are matched
-key-value in flavor extra-spec or volume extra-spec. A tenant will be bound
-to one pod in one group of pods with same resource_affinity_tag. In this case,
-the Tricircle obtains Pod2 and Pod3 for general purpose, as well as Pod5 an
-Pod6 for CAD purpose. The Tricircle needs to change the binding, for example,
-tenant1 needs to be bound to Pod2, Pod5.
-
-Implementation
-===============
-
-Measurement
--------------
-
-To get the information of resource utilization of pods, the Tricircle needs to
-conduct some measurements on pods. The statistic task should be done in
-bottom pod.
-
-For resources usages, current cells provide interface to retrieve usage for
-cells [1]. OpenStack provides details of capacity of a cell, including disk
-and ram via api of showing cell capacities [1].
-
-If OpenStack is not running with cells mode, we can ask Nova to provide
-an interface to show the usage detail in AZ. Moreover, an API for usage
-query at host level is provided for admins [3], through which we can obtain
-details of a host, including cpu, memory, disk, and so on.
-
-Cinder also provides interface to retrieve the backend pool usage,
-including updated time, total capacity, free capacity and so on [2].
-
-The Tricircle needs to have one task to collect the usage in the bottom on
-daily base, to evaluate whether the threshold is reached or not. A threshold
-or headroom could be configured for each pod, but not to reach 100% exhaustion
-of resources.
-
-On top there should be no heavy process. So getting the sum info from the
-bottom can be done in the Tricircle. After collecting the details, the
-Tricircle can judge whether a pod reaches its limit.
-
-Tricircle
-----------
-
-The Tricircle needs a framework to support different binding policy (filter).
-
-Each pod is one OpenStack instance, including controller nodes and compute
-nodes. E.g.,
-
-::
-
- +-> controller(s) - pod1 <--> compute nodes <---+
- |
- The tricircle +-> controller(s) - pod2 <--> compute nodes <---+ resource migration, if necessary
- (resource controller) .... |
- +-> controller(s) - pod{N} <--> compute nodes <-+
-
-
-The Tricircle selects a pod to decide where the requests should be forwarded
-to which controller. Then the controllers in the selected pod will do its own
-scheduling.
-
-One simplest binding filter is as follows. Line up all available pods in a
-list and always select the first one. When all the resources in the first pod
-has been allocated, remove it from the list. This is quite like how production
-cloud is built: at first, only a few pods are in the list, and then add more
-and more pods if there is not enough resources in current cloud. For example,
-
-List1 for general pool: Pod1 <- Pod2 <- Pod3
-List2 for CAD modeling pool: Pod4 <- Pod5 <- Pod6
-
-If Pod1's resource exhausted, Pod1 is removed from List1. The List1 is changed
-to: Pod2 <- Pod3.
-If Pod4's resource exhausted, Pod4 is removed from List2. The List2 is changed
-to: Pod5 <- Pod6
-
-If the tenant wants to allocate resources for general VM, the Tricircle
-selects Pod2. If the tenant wants to allocate resources for CAD modeling VM,
-the Tricircle selects Pod5.
-
-Filtering
--------------
-
-For the strategy of selecting pods, we need a series of filters. Before
-implementing dynamic pod binding, the binding criteria are hard coded to
-select the first pod in the AZ. Hence, we need to design a series of filter
-algorithms. Firstly, we plan to design an ALLPodsFilter which does no
-filtering and passes all the available pods. Secondly, we plan to design an
-AvailabilityZoneFilter which passes the pods matching the specified available
-zone. Thirdly, we plan to design a ResourceAffiniyFilter which passes the pods
-matching the specified resource type. Based on the resource_affinity_tag,
-the Tricircle can be aware of which type of resource the tenant wants to
-provision. In the future, we can add more filters, which requires adding more
-information in the pod table.
-
-Weighting
--------------
-
-After filtering all the pods, the Tricircle obtains the available pods for a
-tenant. The Tricircle needs to select the most suitable pod for the tenant.
-Hence, we need to define a weight function to calculate the corresponding
-weight of each pod. Based on the weights, the Tricircle selects the pod which
-has the maximum weight value. When calculating the weight of a pod, we need
-to design a series of weigher. We first take the pod creation time into
-consideration when designing the weight function. The second one is the idle
-capacity, to select a pod which has the most idle capacity. Other metrics
-will be added in the future, e.g., cost.
-
-Data Model Impact
-==================
-
-Firstly, we need to add a column “resource_affinity_tag” to the pod table,
-which is used to store the key-value pair, to match flavor extra-spec and
-volume extra-spec.
-
-Secondly, in the pod binding table, we need to add fields of start binding
-time and end binding time, so the history of the binding relationship could
-be stored.
-
-Thirdly, we need a table to store the usage of each pod for Cinder/Nova.
-We plan to use JSON object to store the usage information. Hence, even if
-the usage structure is changed, we don't need to update the table. And if
-the usage value is null, that means the usage has not been initialized yet.
-As just mentioned above, the usage could be refreshed in daily basis. If it's
-not initialized yet, it means there is still lots of resources available,
-which could be scheduled just like this pod has not reach usage threshold.
-
-Dependencies
-=============
-
-None
-
-
-Testing
-========
-
-None
-
-
-Documentation Impact
-=====================
-
-None
-
-
-Reference
-==========
-
-[1] https://docs.openstack.org/api-ref/compute/#capacities
-
-[2] https://docs.openstack.org/api-ref/block-storage/v2/index.html#volumes-volumes
-
-[3] https://docs.openstack.org/api-ref/compute/#show-server-details
diff --git a/specs/ocata/enhance-xjob-reliability.rst b/specs/ocata/enhance-xjob-reliability.rst
deleted file mode 100644
index 45e64c51..00000000
--- a/specs/ocata/enhance-xjob-reliability.rst
+++ /dev/null
@@ -1,234 +0,0 @@
-=======================================
-Enhance Reliability of Asynchronous Job
-=======================================
-
-Background
-==========
-
-Currently we are using cast method in our RPC client to trigger asynchronous
-job in XJob daemon. After one of the worker threads receives the RPC message
-from the message broker, it registers the job in the database and starts to
-run the handle function. The registration guarantees that asynchronous job will
-not be lost after the job fails and the failed job can be redone. The detailed
-discussion of the asynchronous job process in XJob daemon is covered in our
-design document [1]_.
-
-Though asynchronous jobs are correctly saved after worker threads get the RPC
-message, we still have risk to lose jobs. By using cast method, it's only
-guaranteed that the message is received by the message broker, but there's no
-guarantee that the message can be received by the message consumer, i.e., the
-RPC server thread running in XJob daemon. According to the RabbitMQ document,
-undelivered messages will be lost if RabbitMQ server stops [2]_. Message
-persistence or publisher confirm [3]_ can be used to increase reliability, but
-they sacrifice performance. On the other hand, we can not assume that message
-brokers other than RabbitMQ will provide similar persistence or confirmation
-functionality. Therefore, Tricircle itself should handle the asynchronous job
-reliability problem as far as possible. Since we already have a framework to
-register, run and redo asynchronous jobs in XJob daemon, we propose a cheaper
-way to improve reliability.
-
-Proposal
-========
-
-One straightforward way to make sure that the RPC server has received the RPC
-message is to use call method. RPC client will be blocked until the RPC server
-replies the message if it uses call method to send the RPC request. So if
-something wrong happens before the reply, RPC client can be aware of it. Of
-course we cannot make RPC client wait too long, thus RPC handlers in the RPC
-server side need to be simple and quick to run. Thanks to the asynchronous job
-framework we already have, migrating from cast method to call method is easy.
-
-Here is the flow of the current process::
-
- +--------+ +--------+ +---------+ +---------------+ +----------+
- | | | | | | | | | |
- | API | | RPC | | Message | | RPC Server | | Database |
- | Server | | client | | Broker | | Handle Worker | | |
- | | | | | | | | | |
- +---+----+ +---+----+ +----+----+ +-------+-------+ +----+-----+
- | | | | |
- | call RPC API | | | |
- +--------------> | | |
- | | send cast message | | |
- | +-------------------> | |
- | call return | | dispatch message | |
- <--------------+ +------------------> |
- | | | | register job |
- | | | +---------------->
- | | | | |
- | | | | obtain lock |
- | | | +---------------->
- | | | | |
- | | | | run job |
- | | | +----+ |
- | | | | | |
- | | | | | |
- | | | <----+ |
- | | | | |
- | | | | |
- + + + + +
-
-We can just leave **register job** phase in the RPC handle and put **obtain
-lock** and **run job** phase in a separate thread, so the RPC handle is simple
-enough to use call method to invoke it. Here is the proposed flow::
-
- +--------+ +--------+ +---------+ +---------------+ +----------+ +-------------+ +-------+
- | | | | | | | | | | | | | |
- | API | | RPC | | Message | | RPC Server | | Database | | RPC Server | | Job |
- | Server | | client | | Broker | | Handle Worker | | | | Loop Worker | | Queue |
- | | | | | | | | | | | | | |
- +---+----+ +---+----+ +----+----+ +-------+-------+ +----+-----+ +------+------+ +---+---+
- | | | | | | |
- | call RPC API | | | | | |
- +--------------> | | | | |
- | | send call message | | | | |
- | +--------------------> | | | |
- | | | dispatch message | | | |
- | | +------------------> | | |
- | | | | register job | | |
- | | | +----------------> | |
- | | | | | | |
- | | | | job enqueue | | |
- | | | +------------------------------------------------>
- | | | | | | |
- | | | reply message | | | job dequeue |
- | | <------------------+ | |-------------->
- | | send reply message | | | obtain lock | |
- | <--------------------+ | <----------------+ |
- | call return | | | | | |
- <--------------+ | | | run job | |
- | | | | | +----+ |
- | | | | | | | |
- | | | | | | | |
- | | | | | +----> |
- | | | | | | |
- | | | | | | |
- + + + + + + +
-
-In the above graph, **Loop Worker** is a new-introduced thread to do the actual
-work. **Job Queue** is an eventlet queue [4]_ used to coordinate **Handle
-Worker** who produces job entries and **Loop Worker** who consumes job entries.
-While accessing an empty queue, **Loop Worker** will be blocked until some job
-entries are put into the queue. **Loop Worker** retrieves job entries from the
-job queue then starts to run it. Similar to the original flow, since multiple
-workers may get the same type of job for the same resource at the same time,
-workers need to obtain the lock before it can run the job. One problem occurs
-whenever XJob daemon stops before it finishes all the jobs in the job queue;
-all unfinished jobs are lost. To solve it, we make changes to the original
-periodical task that is used to redo failed job, and let it also handle the
-jobs which have been registered for a certain time but haven't been started.
-So both failed jobs and "orphan" new jobs can be picked up and redone.
-
-You can see that **Handle Worker** doesn't do many works, it just consumes RPC
-messages, registers jobs then puts job items in the job queue. So one extreme
-solution here, will be to register new jobs in the API server side and start
-worker threads to retrieve jobs from the database and run them. In this way, we
-can remove all the RPC processes and use database to coordinate. The drawback
-of this solution is that we don't dispatch jobs. All the workers query jobs
-from the database so there is high probability that some of the workers obtain
-the same job and thus race occurs. In the first solution, message broker
-helps us to dispatch messages, and so dispatch jobs.
-
-Considering job dispatch is important, we can make some changes to the second
-solution and move to the third one, that is to also register new jobs in the
-API server side, but we still use cast method to trigger asynchronous job in
-XJob daemon. Since job registration is done in the API server side, we are not
-afraid that the jobs will be lost if cast messages are lost. If API server side
-fails to register the job, it will return response of failure; If registration
-of job succeeds, the job will be done by XJob daemon at last. By using RPC, we
-dispatch jobs with the help of message brokers. One thing which makes cast
-method better than call method is that retrieving RPC messages and running job
-handles are done in the same thread so if one XJob daemon is busy handling
-jobs, RPC messages will not be dispatched to it. However when using call
-method, RPC messages are retrieved by one thread(the **Handle Worker**) and job
-handles are run by another thread(the **Loop Worker**), so XJob daemon may
-accumulate many jobs in the queue and at the same time it's busy handling jobs.
-This solution has the same problem with the call method solution. If cast
-messages are lost, the new jobs are registered in the database but no XJob
-daemon is aware of these new jobs. Same way to solve it, use periodical task to
-pick up these "orphan" jobs. Here is the flow::
-
- +--------+ +--------+ +---------+ +---------------+ +----------+
- | | | | | | | | | |
- | API | | RPC | | Message | | RPC Server | | Database |
- | Server | | client | | Broker | | Handle Worker | | |
- | | | | | | | | | |
- +---+----+ +---+----+ +----+----+ +-------+-------+ +----+-----+
- | | | | |
- | call RPC API | | | |
- +--------------> | | |
- | | register job | | |
- | +------------------------------------------------------->
- | | | | |
- | | [if succeed to | | |
- | | register job] | | |
- | | send cast message | | |
- | +-------------------> | |
- | call return | | dispatch message | |
- <--------------+ +------------------> |
- | | | | obtain lock |
- | | | +---------------->
- | | | | |
- | | | | run job |
- | | | +----+ |
- | | | | | |
- | | | | | |
- | | | <----+ |
- | | | | |
- | | | | |
- + + + + +
-
-Discussion
-==========
-
-In this section we discuss the pros and cons of the above three solutions.
-
-.. list-table:: **Solution Comparison**
- :header-rows: 1
-
- * - Solution
- - Pros
- - Cons
- * - API server uses call
- - no RPC message lost
- - downtime of unfinished jobs in the job queue when XJob daemon stops,
- job dispatch not based on XJob daemon workload
- * - API server register jobs + no RPC
- - no requirement on RPC(message broker), no downtime
- - no job dispatch, conflict costs time
- * - API server register jobs + uses cast
- - job dispatch based on XJob daemon workload
- - downtime of lost jobs due to cast messages lost
-
-Downtime means that after a job is dispatched to a worker, other workers need
-to wait for a certain time to determine that job is expired and take over it.
-
-Conclusion
-==========
-
-We decide to implement the third solution(API server register jobs + uses cast)
-since it improves the asynchronous job reliability and at the mean time has
-better work load dispatch.
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-None
-
-References
-==========
-
-.. [1] https://docs.google.com/document/d/1zcxwl8xMEpxVCqLTce2-dUOtB-ObmzJTbV1uSQ6qTsY
-.. [2] https://www.rabbitmq.com/tutorials/tutorial-two-python.html
-.. [3] https://www.rabbitmq.com/confirms.html
-.. [4] http://eventlet.net/doc/modules/queue.html
diff --git a/specs/ocata/l3-networking-combined-bridge-net.rst b/specs/ocata/l3-networking-combined-bridge-net.rst
deleted file mode 100644
index e73021c7..00000000
--- a/specs/ocata/l3-networking-combined-bridge-net.rst
+++ /dev/null
@@ -1,566 +0,0 @@
-==============================================
-Layer-3 Networking and Combined Bridge Network
-==============================================
-
-Background
-==========
-
-To achieve cross-Neutron layer-3 networking, we utilize a bridge network to
-connect networks in each Neutron server, as shown below:
-
-East-West networking::
-
- +-----------------------+ +-----------------------+
- | OpenStack1 | | OpenStack2 |
- | | | |
- | +------+ +---------+ | +------------+ | +---------+ +------+ |
- | | net1 | | ip1| | | bridge net | | |ip2 | | net2 | |
- | | +--+ R +---+ +---+ R +--+ | |
- | | | | | | | | | | | | | |
- | +------+ +---------+ | +------------+ | +---------+ +------+ |
- +-----------------------+ +-----------------------+
-
- Fig 1
-
-North-South networking::
-
- +---------------------+ +-------------------------------+
- | OpenStack1 | | OpenStack2 |
- | | | |
- | +------+ +-------+ | +--------------+ | +-------+ +----------------+ |
- | | net1 | | ip1| | | bridge net | | | ip2| | external net | |
- | | +--+ R1 +---+ +---+ R2 +--+ | |
- | | | | | | | 100.0.1.0/24 | | | | | 163.3.124.0/24 | |
- | +------+ +-------+ | +--------------+ | +-------+ +----------------+ |
- +---------------------+ +-------------------------------+
-
- Fig 2
-
-To support east-west networking, we configure extra routes in routers in each
-OpenStack cloud::
-
- In OpenStack1, destination: net2, nexthop: ip2
- In OpenStack2, destination: net1, nexthop: ip1
-
-To support north-south networking, we set bridge network as the external
-network in OpenStack1 and as the internal network in OpenStack2. For instance
-in net1 to access the external network, the packets are SNATed twice, first
-SNATed to ip1, then SNATed to ip2. For floating ip binding, ip in net1 is first
-bound to ip(like 100.0.1.5) in bridge network(bridge network is attached to R1
-as external network), then the ip(100.0.1.5) in bridge network is bound to ip
-(like 163.3.124.8)in the real external network (bridge network is attached to
-R2 as internal network).
-
-Problems
-========
-
-The idea of introducing a bridge network is good, but there are some problems
-in the current usage of the bridge network.
-
-Redundant Bridge Network
-------------------------
-
-We use two bridge networks to achieve layer-3 networking for each tenant. If
-VLAN is used as the bridge network type, limited by the range of VLAN tag, only
-2048 pairs of bridge networks can be created. The number of tenants supported
-is far from enough.
-
-Redundant SNAT
---------------
-
-In the current implementation, packets are SNATed two times for outbound
-traffic and are DNATed two times for inbound traffic. The drawback is that
-packets of outbound traffic consume extra operations. Also, we need to maintain
-extra floating ip pool for inbound traffic.
-
-DVR support
------------
-
-Bridge network is attached to the router as an internal network for east-west
-networking and north-south networking when the real external network and the
-router are not located in the same OpenStack cloud. It's fine when the bridge
-network is VLAN type, since packets directly go out of the host and are
-exchanged by switches. But if we would like to support VxLAN as the bridge
-network type later, attaching bridge network as an internal network in the
-DVR scenario will cause some troubles. How DVR connects the internal networks
-is that packets are routed locally in each host, and if the destination is not
-in the local host, the packets are sent to the destination host via a VxLAN
-tunnel. Here comes the problem, if bridge network is attached as an internal
-network, the router interfaces will exist in all the hosts where the router
-namespaces are created, so we need to maintain lots of VTEPs and VxLAN tunnels
-for bridge network in the Tricircle. Ports in bridge network are located in
-different OpenStack clouds so local Neutron server is not aware of ports in
-other OpenStack clouds and will not setup VxLAN tunnel for us.
-
-Proposal
-========
-
-To address the above problems, we propose to combine the bridge networks for
-east-west and north-south networking. Bridge network is always attached to
-routers as an external network. In the DVR scenario, different from router
-interfaces, router gateway will only exist in the SNAT namespace in a specific
-host, which reduces the number of VTEPs and VxLAN tunnels the Tricircle needs
-to handle. By setting "enable_snat" option to "False" when attaching the router
-gateway, packets will not be SNATed when go through the router gateway, so
-packets are only SNATed and DNATed one time in the real external gateway.
-However, since one router can only be attached to one external network, in the
-OpenStack cloud where the real external network is located, we need to add one
-more router to connect the bridge network with the real external network. The
-network topology is shown below::
-
- +-------------------------+ +-------------------------+
- |OpenStack1 | |OpenStack2 |
- | +------+ +--------+ | +------------+ | +--------+ +------+ |
- | | | | IP1| | | | | |IP2 | | | |
- | | net1 +---+ R1 XXXXXXX bridge net XXXXXXX R2 +---+ net2 | |
- | | | | | | | | | | | | | |
- | +------+ +--------+ | +---X----+---+ | +--------+ +------+ |
- | | X | | |
- +-------------------------+ X | +-------------------------+
- X |
- X |
- +--------------------------------X----|-----------------------------------+
- |OpenStack3 X | |
- | X | |
- | +------+ +--------+ X | +--------+ +--------------+ |
- | | | | IP3| X | |IP4 | | | |
- | | net3 +----+ R3 XXXXXXXXXX +---+ R4 XXXXXX external net | |
- | | | | | | | | | |
- | +------+ +--------+ +--------+ +--------------+ |
- | |
- +-------------------------------------------------------------------------+
-
- router interface: -----
- router gateway: XXXXX
- IPn: router gateway ip or router interface ip
-
- Fig 3
-
-Extra routes and gateway ip are configured to build the connection::
-
- routes of R1: net2 via IP2
- net3 via IP3
- external gateway ip of R1: IP4
- (IP2 and IP3 are from bridge net, so routes will only be created in
- SNAT namespace)
-
- routes of R2: net1 via IP1
- net3 via IP3
- external gateway ip of R2: IP4
- (IP1 and IP3 are from bridge net, so routes will only be created in
- SNAT namespace)
-
- routes of R3: net1 via IP1
- net2 via IP2
- external gateway ip of R3: IP4
- (IP1 and IP2 are from bridge net, so routes will only be created in
- SNAT namespace)
-
- routes of R4: net1 via IP1
- net2 via IP2
- net3 via IP3
- external gateway ip of R1: real-external-gateway-ip
- disable DVR mode
-
-An alternative solution which can reduce the extra router is that for the
-router that locates in the same OpenStack cloud with the real external network,
-we attach the bridge network as an internal network, so the real external
-network can be attached to the same router. Here is the topology::
-
- +-------------------------+ +-------------------------+
- |OpenStack1 | |OpenStack2 |
- | +------+ +--------+ | +------------+ | +--------+ +------+ |
- | | | | IP1| | | | | |IP2 | | | |
- | | net1 +---+ R1 XXXXXXX bridge net XXXXXXX R2 +---+ net2 | |
- | | | | | | | | | | | | | |
- | +------+ +--------+ | +-----+------+ | +--------+ +------+ |
- | | | | |
- +-------------------------+ | +-------------------------+
- |
- |
- +----------------------|---------------------------------+
- |OpenStack3 | |
- | | |
- | +------+ +---+----+ +--------------+ |
- | | | | IP3 | | | |
- | | net3 +----+ R3 XXXXXXXX external net | |
- | | | | | | | |
- | +------+ +--------+ +--------------+ |
- | |
- +--------------------------------------------------------+
-
- router interface: -----
- router gateway: XXXXX
- IPn: router gateway ip or router interface ip
-
- Fig 4
-
-The limitation of this solution is that R3 needs to be set as non-DVR mode.
-As is discussed above, for network attached to DVR mode router, the router
-interfaces of this network will be created in all the hosts where the router
-namespaces are created. Since these interfaces all have the same IP and MAC,
-packets sent between instances(could be virtual machine, container or bare
-metal) can't be directly wrapped in the VxLAN packets, otherwise packets sent
-from different hosts will have the same MAC. How Neutron solve this problem is
-to introduce DVR MACs which are allocated by Neutron server and assigned to
-each host hosting DVR mode router. Before wrapping the packets in the VxLAN
-packets, the source MAC of the packets are replaced by the DVR MAC of the host.
-If R3 is DVR mode, source MAC of packets sent from net3 to bridge network will
-be changed, but after the packets reach R1 or R2, R1 and R2 don't recognize the
-DVR MAC, so the packets are dropped.
-
-The same, extra routes and gateway ip are configured to build the connection::
-
- routes of R1: net2 via IP2
- net3 via IP3
- external gateway ip of R1: IP3
- (IP2 and IP3 are from bridge net, so routes will only be created in
- SNAT namespace)
-
- routes of R2: net1 via IP1
- net3 via IP3
- external gateway ip of R1: IP3
- (IP1 and IP3 are from bridge net, so routes will only be created in
- SNAT namespace)
-
- routes of R3: net1 via IP1
- net2 via IP2
- external gateway ip of R3: real-external-gateway-ip
- (non-DVR mode, routes will all be created in the router namespace)
-
-The real external network can be deployed in one dedicated OpenStack cloud. In
-that case, there is no need to run services like Nova and Cinder in that cloud.
-Instance and volume will not be provisioned in that cloud. Only Neutron service
-is required. Then the above two topologies transform to the same one::
-
- +-------------------------+ +-------------------------+
- |OpenStack1 | |OpenStack2 |
- | +------+ +--------+ | +------------+ | +--------+ +------+ |
- | | | | IP1| | | | | |IP2 | | | |
- | | net1 +---+ R1 XXXXXXX bridge net XXXXXXX R2 +---+ net2 | |
- | | | | | | | | | | | | | |
- | +------+ +--------+ | +-----+------+ | +--------+ +------+ |
- | | | | |
- +-------------------------+ | +-------------------------+
- |
- |
- +-----------|-----------------------------------+
- |OpenStack3 | |
- | | |
- | | +--------+ +--------------+ |
- | | |IP3 | | | |
- | +---+ R3 XXXXXX external net | |
- | | | | | |
- | +--------+ +--------------+ |
- | |
- +-----------------------------------------------+
-
- Fig 5
-
-The motivation of putting the real external network in a dedicated OpenStack
-cloud is to simplify the real external network management, and also to separate
-the real external network and the internal networking area, for better security
-control.
-
-Discussion
-==========
-
-The implementation of DVR does bring some restrictions to our cross-Neutron
-layer-2 and layer-3 networking, resulting in the limitation of the above two
-proposals. In the first proposal, if the real external network is deployed with
-internal networks in the same OpenStack cloud, one extra router is needed in
-that cloud. Also, since one of the router is DVR mode and the other is non-DVR
-mode, we need to deploy at least two l3 agents, one is dvr-snat mode and the
-other is legacy mode. The limitation of the second proposal is that the router
-is non-DVR mode, so east-west and north-south traffic are all go through the
-router namespace in the network node.
-
-Also, cross-Neutron layer-2 networking can not work with DVR because of
-source MAC replacement. Considering the following topology::
-
- +----------------------------------------------+ +-------------------------------+
- |OpenStack1 | |OpenStack2 |
- | +-----------+ +--------+ +-----------+ | | +--------+ +------------+ |
- | | | | | | | | | | | | | |
- | | net1 +---+ R1 +---+ net2 | | | | R2 +---+ net2 | |
- | | Instance1 | | | | Instance2 | | | | | | Instance3 | |
- | +-----------+ +--------+ +-----------+ | | +--------+ +------------+ |
- | | | |
- +----------------------------------------------+ +-------------------------------+
-
- Fig 6
-
-net2 supports cross-Neutron layer-2 networking, so instances in net2 can be
-created in both OpenStack clouds. If the router net1 and net2 connected to is
-DVR mode, when Instance1 ping Instance2, the packets are routed locally and
-exchanged via a VxLAN tunnel. Source MAC replacement is correctly handled
-inside OpenStack1. But when Instance1 tries to ping Instance3, OpenStack2 does
-not recognize the DVR MAC from OpenStack1, thus connection fails. Therefore,
-only local type network can be attached to a DVR mode router.
-
-Cross-Neutron layer-2 networking and DVR may co-exist after we address the
-DVR MAC recognition problem(we will issue a discussion about this problem in
-the Neutron community) or introduce l2 gateway. Actually this bridge network
-approach is just one of the implementation, we are considering in the near
-future to provide a mechanism to let SDN controller to plug in, which DVR and
-bridge network may be not needed.
-
-Having the above limitation, can our proposal support the major user scenarios?
-Considering whether the tenant network and router are local or across Neutron
-servers, we divide the user scenarios into four categories. For the scenario of
-cross-Neutron router, we use the proposal shown in Fig 3 in our discussion.
-
-Local Network and Local Router
-------------------------------
-
-Topology::
-
- +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 |
- | | | |
- | ext net1 | | ext net2 |
- | +-----+-----+ | | +-----+-----+ |
- | | | | | |
- | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | | | |
- | | R1 | | | | R2 | |
- | | | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | |
- | | | | | |
- | +---+---+ | | +---+---+ |
- | net1 | | net2 |
- | | | |
- +-----------------+ +-----------------+
-
- Fig 7
-
-Each OpenStack cloud has its own external network, instance in each local
-network accesses the external network via the local router. If east-west
-networking is not required, this scenario has no requirement on cross-Neutron
-layer-2 and layer-3 networking functionality. Both central Neutron server and
-local Neutron server can process network resource management request. While if
-east-west networking is needed, we have two choices to extend the above
-topology::
-
- *
- +-----------------+ +-----------------+ * +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 | * |OpenStack1 | |OpenStack2 |
- | | | | * | | | |
- | ext net1 | | ext net2 | * | ext net1 | | ext net2 |
- | +-----+-----+ | | +-----+-----+ | * | +-----+-----+ | | +-----+-----+ |
- | | | | | | * | | | | | |
- | | | | | | * | | | | | |
- | +--+--+ | | +--+--+ | * | +--+--+ | | +--+--+ |
- | | | | | | | | * | | | | | | | |
- | | R1 | | | | R2 | | * | | R1 +--+ | | +---+ R2 | |
- | | | | | | | | * | | | | | | | | | |
- | +--+--+ | | +--+--+ | * | +--+--+ | | | | +--+--+ |
- | | | | | | * | | | | | | | |
- | | | | | | * | | | | | | | |
- | +---+-+-+ | | +---+-+-+ | * | +---+---+ | | | | +---+---+ |
- | net1 | | | net2 | | * | net1 | | | | net2 |
- | | | | | | * | | | | | |
- | +--------+--+ | | +--------+--+ | * | | | net3 | | |
- | | Instance1 | | | | Instance2 | | * | +------------+------------+-----------+ |
- | +-----------+ | | +-----------+ | * | | | |
- | | | | | | * +-----------------+ +-----------------+
- | | | net3 | | | *
- | +------+-------------------------+----+ | * Fig 8.2
- | | | | *
- +-----------------+ +-----------------+ *
- *
- Fig 8.1
-
-In the left topology, two instances are connected by a shared VxLAN network,
-only local network is attached to local router, so it can be either legacy or
-DVR mode. In the right topology, two local routers are connected by a shared
-VxLAN network, so they can only be legacy mode.
-
-Cross-Neutron Network and Local Router
---------------------------------------
-
-Topology::
-
- +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 |
- | | | |
- | ext net1 | | ext net2 |
- | +-----+-----+ | | +-----+-----+ |
- | | | | | |
- | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | | | |
- | | R1 | | | | R2 | |
- | | | | | | | |
- | +--+--+ | | +--+--+ |
- | | | | | |
- | net1 | | | | |
- | +--+---+---------------------+---+---+ |
- | | | | | |
- | | | | | |
- | +--+--------+ | | +--+--------+ |
- | | Instance1 | | | | Instance2 | |
- | +-----------+ | | +-----------+ |
- | | | |
- +-----------------+ +-----------------+
-
- Fig 9
-
-From the Neutron API point of view, attaching a network to different routers
-that each has its own external gateway is allowed but packets can only get out
-via one of the external network because there is only one gateway ip in one
-subnet. But in the Tricircle, we allocate one gateway ip for network in each
-OpenStack cloud, so instances can access specific external network via specific
-gateway according to which OpenStack cloud they are located.
-
-We can see this topology as a simplification of the topology shown in Fig 8.1
-that it doesn't require an extra network interface for instances. And if no
-other networks are attached to R1 and R2 except net1, R1 and R2 can be DVR
-mode.
-
-In the NFV scenario, usually instance itself acts as a router, so there's no
-need to create a Neutron router and we directly attach the instance to the
-provider network and access the real external network via the provider network.
-In that case, when creating Neutron network, "router:external" label should be
-set to "False". See Fig 10::
-
- +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 |
- | | | |
- | provider net1 | | provider net2 |
- | +--+---------+ | | +--+---------+ |
- | | | | | |
- | | | | | |
- | +--+--------+ | | +--+--------+ |
- | | VNF | | | | VNF | |
- | | Instance1 | | | | Instance2 | |
- | +------+----+ | | +------+----+ |
- | | | | | |
- | | | | | |
- | net1 | | | | |
- | +------+-------------------------+---+ |
- | | | |
- +-----------------+ +-----------------+
-
- Fig 10
-
-Local Network and Cross-Neutron Router
---------------------------------------
-
-Topology::
-
- +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 |
- | | | |
- | | | ext net |
- | | | +-------+---+ |
- | bridge net | | | |
- | +-----+-----------------+-+-+ | |
- | | | | | | +--+--+ |
- | | | | | | | | |
- | +--+--+ | | | +----+ R | |
- | | | | | | | | |
- | | R | | | | +-----+ |
- | | | | | | |
- | +--+--+ | | | +-----+ |
- | | | | | | | |
- | | | | +---+ R | |
- | +---+---+ | | | | |
- | net1 | | +--+--+ |
- | | | | |
- | | | | |
- | | | +---+---+ |
- | | | net2 |
- | | | |
- +-----------------+ +-----------------+
-
- Fig 11
-
-Since the router is cross-Neutron type, the Tricircle automatically creates
-bridge network to connect router instances inside the two Neutron servers and
-connect the router instance to the real external network. Networks attached to
-the router are local type, so the router can be either legacy or DVR mode.
-
-Cross-Neutron Network and Cross-Neutron Router
-----------------------------------------------
-
-Topology::
-
- *
- +-----------------+ +-----------------+ * +-----------------+ +-----------------+
- |OpenStack1 | |OpenStack2 | * |OpenStack1 | |OpenStack2 |
- | | | | * | | | |
- | | | ext net | * | | | ext net |
- | | | +-------+---+ | * | | | +-------+---+ |
- | bridge net | | | | * | bridge net | | | |
- | +-----+-----------------+-+-+ | | * | +-----+-----------------+-+-+ | |
- | | | | | | +--+--+ | * | | | | | | +--+--+ |
- | | | | | | | | | * | | | | | | | | |
- | | | | | +----+ R | | * | | | | | +----+ R | |
- | | | | | | | | * | | | | | | | |
- | +--+--+ | | | +-----+ | * | +--+--+ | | | +-----+ |
- | | | | | | | * | | | | | | |
- | | R | | | | +-----+ | * | +--+ R | | | | +-----+ |
- | | | | | | | | | * | | | | | | | | | |
- | +--+--+ | | +---+ R | | * | | +--+--+ | | +---+ R +--+ |
- | | | | | | | * | | | | | | | | |
- | | | | +--+--+ | * | | | | | +--+--+ | |
- | | | | | | * | | | | | | | |
- | | | | | | * | | | | | | | |
- | +---+------------------------+---+ | * | | +---+------------------------+---+ | |
- | net1 | | | * | | net1 | | | |
- | | | | * | | | | | |
- +-----------------+ +-----------------+ * | | | | | |
- * | +-+------------------------------------++ |
- Fig 12.1 * | net2 | | |
- * | | | |
- * +-----------------+ +-----------------+
- *
- Fig 12.2
-
-In Fig 12.1, the router can only be legacy mode since net1 attached to the
-router is shared VxLAN type. Actually in this case the bridge network is not
-needed for east-west networking. Let's see Fig 12.2, both net1 and net2 are
-shared VxLAN type and are attached to the router(also this router can only be
-legacy mode), so packets between net1 and net2 are routed in the router of the
-local OpenStack cloud and then sent to the target. Extra routes will be cleared
-so no packets will go through the bridge network. This is the current
-implementation of the Tricircle to support VLAN network.
-
-Recommended Layer-3 Networking Mode
------------------------------------
-
-Let's make a summary of the above discussion. Assume that DVR mode is a must,
-the recommended layer-3 topology for each scenario is listed below.
-
-+----------------------------+---------------------+------------------+
-| north-south networking via | isolated east-west | Fig 7 |
-| multiple external networks | networking | |
-| +---------------------+------------------+
-| | connected east-west | Fig 8.1 or Fig 9 |
-| | networking | |
-+----------------------------+---------------------+------------------+
-| north-south networking via | Fig 11 |
-| single external network | |
-+----------------------------+---------------------+------------------+
-| north-south networking via | Fig 10 |
-| direct provider network | |
-+--------------------------------------------------+------------------+
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-Guide of multi-node DevStack installation needs to be updated to introduce
-the new bridge network solution.
diff --git a/specs/ocata/legacy_tables_clean.rst b/specs/ocata/legacy_tables_clean.rst
deleted file mode 100644
index 715dd39e..00000000
--- a/specs/ocata/legacy_tables_clean.rst
+++ /dev/null
@@ -1,111 +0,0 @@
-=====================================
-Tricircle Table Clean After Splitting
-=====================================
-
-Background
-==========
-Originally the Tricircle provided unified OpenStack API gateway and networking
-automation functionality. But now the Tricircle narrows its scope to networking
-automation across Neutron servers, the functionality of OpenStack API gateway
-is developed in another project called Trio2o[1].
-
-Problem Description
-===================
-After this splitting, many tables would no longer be used, including quota,
-volume, aggregate and pod binding, etc. The data models, tables and APIs of
-them should be removed. As for the rest of the tables that are still in use
-in the Tricircle, they should be renamed for better understanding.
-
-Apart from the table cleaning work and table renaming work, a new feature
-will be developed to remove the dependency on old table. During the period
-of external network creation, it will take 'availability_zone_hints' (AZ or
-az will be used for short for availability zone) as a parameter. Previously
-az_hints was searched in the pod binding table by az_name and tenant_id, now
-the pod binding table is deprecated and new search strategy is needed to fix
-the problem[2]. A function named find_pod_by_az will be developed to find the
-az_hints by az_name in the pod table. Given the az_name, if it is not empty,
-we first match it with region_name in the pod table. When a pod with the same
-region_name is found, it will be returned back. The search procedure is
-complete. If no pod is found with the same region_name, then we try to match
-it with az_name in the pod table. If multiple pods are found, then we will
-raise an exception. If only one pod is found, this pod will be returned back.
-An exception will be raised if no pod is matched at the end of the previous
-search procedure. However, if the az_name is empty, we will return None, a new
-configuration item "default_region_for_external_network" will be used.
-
-Proposed Change
-===============
-
-All tables that need to be changed can be divided into two categories,
-``Table to be removed``, ``Table to be renamed``.
-
-Table to be removed:
-
-- quality_of_service_specs
-
-- quota_classes
-
-- quota_usages
-
-- quotas
-
-- reservations
-
-- volume_type_extra_specs
-
-- volume_type_projects
-
-- volume_types
-
-- aggregates
-
-- aggregate_metadata
-
-- instance_types
-
-- instance_type_projects
-
-- instance_type_extra_specs
-
-- key_pairs
-
-- pod_binding
-
-Table to be renamed:
-
-- cascaded_pod_service_configuration(new name: cached_endpoints)
-
-- cascaded_pods(new name: pods)
-
-- cascaded_pods_resource_routing(new name: resource_routings)
-
-- job(new name: async_jobs)
-
-The deprecated tables will be removed from the repository directly, and other
-tables containing old meanings will be renamed for better understanding.
-
-After the deletion of pod binding table, a new feature will be developed to
-lookup the az in the pod table rather than the pod binding table.
-
-Data Model Impact
-=================
-
-In database, many tables are removed, other tables are renamed for better
-understanding.
-
-Documentation Impact
-====================
-
-After the pod binding table is removed, the explanation of the pod binding
-API in the doc/source/api_v1.rst will be removed as well.
-
-Dependencies
-============
-
-None
-
-References
-==========
-[1] https://github.com/openstack/trio2o
-
-[2] https://review.opendev.org/#/c/412325/
diff --git a/specs/ocata/local-neutron-plugin.rst b/specs/ocata/local-neutron-plugin.rst
deleted file mode 100644
index ced5e73f..00000000
--- a/specs/ocata/local-neutron-plugin.rst
+++ /dev/null
@@ -1,214 +0,0 @@
-==============================
-Tricircle Local Neutron Plugin
-==============================
-
-Background
-==========
-
-One of the key value we would like to achieve via the Tricircle project is to
-provide networking automation functionality across several Neutron servers.
-Each OpenStack instance runs its own Nova and Neutron services but shares the
-same Keystone service or uses federated Keystone, which is a multi-region
-deployment mode. With networking automation, virtual machines or bare metals
-booted in different OpenStack instances can inter-communicate via layer2 or
-layer3 network.
-
-Considering the cross Neutron layer2 network case, if Neutron service in each
-OpenStack instance allocates ip address independently, the same ip address
-could be assigned to virtual machines in different OpenStack instances, thus ip
-address conflict could occur. One straightforward solution to this problem is
-to divide the ip allocation pool into several parts and each OpenStack instance
-has one. The drawback is that since virtual machines are not distributed evenly
-in each OpenStack instance, we may see some OpenStack instances uses up ip
-addresses while other OpenStack instances still have ip addresses not
-allocated. What's worse, dividing the ip allocation pool makes it impossible
-for us to process virtual machine migration from one OpenStack instance to
-another.
-
-Thanks to Neutron's flexible plugin framework, by writing a new plugin and
-configuring Neutron server to use it, developers can define what Neutron server
-should do after receiving a network resources operation request. So for the
-ip address conflict issue discussed above, we decide to run one central Neutron
-server with the Tricircle central Neutron plugin(abbr: "central plugin") to
-manage ip allocation pool centrally.
-
-Besides central plugin, we need a bridge to connect central and local Neutron
-servers since each OpenStack instance has its own local Nova and Neutron server
-but these two services are not aware of the central Neutron server. This bridge
-should validate requested network data via the central Neutron server, then
-create necessary network resources in the target OpenStack instance with the
-data retrieved from the central Neutron server.
-
-Local Plugin
-============
-
-For connecting central and local Neutron servers, Neutron plugin is again a
-good place for us to build the bridge. We can write our own plugin, the
-Tricircle local Neutron plugin(abbr: "local plugin") to trigger the cross
-Neutron networking automation in local Neutron server. During virtual machine
-booting, local Nova server will interact with local Neutron server to query
-network or create port, which will trigger local plugin to retrieve data from
-central Neutron server and create necessary network resources according to the
-data. To support different core plugins, we will introduce a new option
-"real_core_plugin" in the "tricircle" configuration group. During
-initialization, local plugin will load the plugin specified by
-"real_core_plugin". Local plugin only adds logic to interact with central
-Neutron server, but invokes the real core plugin to finish the CRUD operations
-of local network resources. The following graph shows the relation between user
-and Nova and Neutron servers: ::
-
- +------+
- | user |
- +-+--+-+
- | |
- +-----------+ +----------------------+
- | boot vm create and query |
- | network resource |
- v |
- +----+-------+ |
- | local Nova | xxxxxxxxxxxxxxx |
- +----+-------+ xxx xxx |
- | xx xx |
- +---+ xxx +--------+ xxx |
- | x | | x |
- | x | | x |
- v V | v x v
- +--------+---------+ | +----+----------+----+
- | local Neutron | | | central Neutron |
- | +--------------+ | | | +----------------+ |
- | | local plugin | | | | | central plugin | |
- | +--------------+ | | | +----------------+ |
- +------------------+ | +--------------------+
- | |
- +-------------+
-
-Next using virtual machine booting procedure to elaborate how local plugin
-works. To begin with, user creates network and subnet via central Neutron
-server. Then this user passes the network id as the requested network
-information to local Nova server to boot a virtual machine. During parameter
-validation, local Nova server queries local Neutron server to ensure the
-passed-in network id is valid, which is a "network-get" request. In the
-"network-get" handle function, local plugin first checks if local Neutron
-already has a network with that id. If not, local plugin retrieves network and
-also subnet information from central Neutron server then creates network and
-subnet based on this information. User may pass an invalid network id by
-mistake, in this case, local plugin will receive a 404 response from central
-Neutron server, it just returns a 404 response to local Nova server.
-
-After the network id validation passes, local Nova server continues to schedule
-a host so compute manager running in that host will do the left works. Compute
-manager creates a port in the requested network via local Neutron server, which
-is a "port-create" request. In the "port-create" handle function, local plugin
-sends the same request to central Neutron server to create a port, and uses
-the returned port information to create a local port. With local plugin, we
-ensure all ip addresses are allocated by central Neutron server.
-
-At the end of the network setup of the virtual machine, compute manager issues
-a "port-update" request to local Neutron server to associate the host with the
-port. In the "port-update" handle function, local plugin recognizes that this
-request is sent from local Nova server by the request body that the request
-body contains host information, so it sends a "port-update" request to central
-Neutron server with region name in the request body. In Keystone, we register
-services inside one OpenStack instance as one unique region, so we can use
-region name to identify one OpenStack instance. After receiving the request,
-central Neutron server is informed that one virtual machine port is correctly
-setup in one OpenStack instance, so it starts the cross Neutron networking
-automation process, like security group rule population, tunnel setup for
-layer2 communication and route setup for layer3 communication, which are done
-by making Neutron API call to each local Neutron server.
-
-
-Implementation
-==============
-
-Implementation details of the local plugin is discussed in this section.
-
-Resource Id
------------
-
-Local plugin always retrieves data of networks resources from central Neutron
-server and use these data to create network resources in local Neutron server.
-During the creation of these network resources, we need to guarantee resource
-ids in central and local server the same. Consider the scenario that user
-creates a port via central Neutron server then use this port to boot a virtual
-machine. After local Nova server receives the request, it will use the port id
-to create a tap device for the virtual machine. If port ids in central and
-local Neutron servers are different, OVS agent can't correctly recognize the
-tap device and configure it. As a result, virtual machine fails to connect to
-the network. Fortunately, database access module in Neutron allow us to specify
-id before creating the resource record, so in local plugin, we just specify id
-the same as central resource's to create local resource.
-
-Network Type Adaption
----------------------
-
-Two network types are supported currently in central plugin, which are local
-and vlan type. Before creating network based on information retrieved
-from central Neutron server, local plugin needs to adapt network type. For
-local type, local plugin creates the network without specifying the network
-type, so the default tenant network type is used. For vlan type, local plugin
-keeps the network type, segmentation id and physical network parameter.
-
-We plan to support another two network types later. They are shared_vxlan and
-mixed network type. For shared_vxlan type, local plugin changes the network
-type parameter from "shared_vxlan" to "vxlan", but keeps the segmentation id
-parameter(vxlan type doesn't need physical network parameter). For mixed type,
-like local type, local plugin uses the default tenant network type to create
-the network, but it needs to do one more thing, that is to save the segment
-information in central Neutron server. Neutron has a extension which allows one
-network to carry multiple segments information[1], so segment information of
-each local network can all be saved in the central network.
-
-Dhcp Port Handle
-----------------
-
-After local subnet creation, local Neutron server will schedule one dhcp agent
-for that subnet, and dhcp agent will automatically create a dhcp port. The ip
-address of this dhcp port is not allocated by central Neutron server, so we may
-encounter ip address conflict. We need to address this problem to ensure all ip
-addresses are allocated by central Neutron server.
-
-Here is the approach. After central Neutron server receives subnet creation
-subnet, central plugin not only creates the requested subnet, but also create a
-port to pre-allocate an ip address for the dhcp port. So during creation of
-local subnet, local plugin will query central Neutron server to retrieve the
-data of the pre-created port and use its ip address to create a local dhcp
-port. The "device_id" of the dhcp port is set to "reserved_dhcp_port" so after
-one dhcp agent is scheduled, it will use this port other than create a new one.
-
-Gateway Port Handle
--------------------
-
-If cross Neutron layer2 networking is enabled in one network, we need to
-allocate one gateway ip for that network in each OpenStack instance. The reason
-is that we want layer3 routing to be finished locally in each OpenStack
-instance. If all the OpenStack instances have the same gateway ip, packets sent
-to the gateway may reach the remote one, so the path is not the best and not
-predictable.
-
-How we address this problem in local plugin is that before creating local
-subnet, local plugin sends request to central Neutron server to create an
-"gateway port", then uses the ip of this port as the gateway ip of the local
-subnet. Name of the gateway port includes the region name of the OpenStack
-instance and the id of the subnet so each OpenStack instance can have its own
-gateway port and gateway ip for one specific subnet.
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-Installation guide needs to be updated to introduce the configuration of
-central and local plugin.
-
-References
-==========
-[1] https://blueprints.launchpad.net/neutron/+spec/ml2-multi-segment-api
diff --git a/specs/pike/async_job_management.rst b/specs/pike/async_job_management.rst
deleted file mode 100644
index 896211ed..00000000
--- a/specs/pike/async_job_management.rst
+++ /dev/null
@@ -1,276 +0,0 @@
-=========================================
-Tricircle Asynchronous Job Management API
-=========================================
-
-Background
-==========
-In the Tricircle, XJob provides OpenStack multi-region functionality. It
-receives and processes jobs from the Admin API or Tricircle Central
-Neutron Plugin and handles them in an asynchronous way. For example, when
-booting an instance in the first time for the project, router, security
-group rule, FIP and other resources may have not already been created in
-the local Neutron(s), these resources could be created asynchronously to
-accelerate response for the initial instance booting request, different
-from network, subnet and security group resources that must be created
-before an instance booting. Central Neutron could send such creation jobs
-to local Neutron(s) through XJob and then local Neutron(s) handle them
-with their own speed.
-
-Implementation
-==============
-XJob server may strike occasionally so tenants and cloud administrators
-need to know the job status and delete or redo the failed job if necessary.
-Asynchronous job management APIs provide such functionality and they are
-listed as following:
-
-* Create a job
-
- Create a job to synchronize resource if necessary.
-
- Create Job Request::
-
- POST /v1.0/jobs
- {
- "job": {
- "type": "port_delete",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
- Response:
- {
- "job": {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
- Normal Response Code: 202
-
-
-* Get a job
-
- Retrieve a job from the Tricircle database.
-
- The detailed information of the job will be shown. Otherwise
- it will return "Resource not found" exception.
-
- List Request::
-
- GET /v1.0/jobs/3f4ecf30-0213-4f1f-9cb0-0233bcedb767
-
- Response:
- {
- "job": {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- }
- }
-
- Normal Response Code: 200
-
-* Get all jobs
-
- Retrieve all of the jobs from the Tricircle database.
-
- List Request::
-
- GET /v1.0/jobs/detail
-
- Response:
- {
- "jobs":
- [
- {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- },
- {
- "id": "b01fe514-5211-4758-bbd1-9f32141a7ac2",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "seg_rule_setup",
- "timestamp": "2017-03-01 17:14:44",
- "status": "FAIL",
- "resource": {
- "project_id": "d01246bc5792477d9062a76332b7514a"
- }
- }
- ]
- }
-
- Normal Response Code: 200
-
-* Get all jobs with filter(s)
-
- Retrieve job(s) from the Tricircle database. We can filter them by
- project ID, job type and job status. If no filter is provided,
- GET /v1.0/jobs will return all jobs.
-
- The response contains a list of jobs. Using filters, a subset of jobs
- will be returned.
-
- List Request::
-
- GET /v1.0/jobs?project_id=d01246bc5792477d9062a76332b7514a
-
- Response:
- {
- "jobs":
- [
- {
- "id": "3f4ecf30-0213-4f1f-9cb0-0233bcedb767",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "port_delete",
- "timestamp": "2017-03-03 11:05:36",
- "status": "NEW",
- "resource": {
- "pod_id": "0eb59465-5132-4f57-af01-a9e306158b86",
- "port_id": "8498b903-9e18-4265-8d62-3c12e0ce4314"
- }
- },
- {
- "id": "b01fe514-5211-4758-bbd1-9f32141a7ac2",
- "project_id": "d01246bc5792477d9062a76332b7514a",
- "type": "seg_rule_setup",
- "timestamp": "2017-03-01 17:14:44",
- "status": "FAIL",
- "resource": {
- "project_id": "d01246bc5792477d9062a76332b7514a"
- }
- }
- ]
- }
-
- Normal Response Code: 200
-
-
-* Get all jobs' schemas
-
- Retrieve all jobs' schemas. User may want to know what the resources
- are needed for a specific job.
-
- List Request::
-
- GET /v1.0/jobs/schemas
-
- return all jobs' schemas.
- Response:
- {
- "schemas":
- [
- {
- "type": "configure_route",
- "resource": ["router_id"]
- },
- {
- "type": "router_setup",
- "resource": ["pod_id", "router_id", "network_id"]
- },
- {
- "type": "port_delete",
- "resource": ["pod_id", "port_id"]
- },
- {
- "type": "seg_rule_setup",
- "resource": ["project_id"]
- },
- {
- "type": "update_network",
- "resource": ["pod_id", "network_id"]
- },
- {
- "type": "subnet_update",
- "resource": ["pod_id", "subnet_id"]
- },
- {
- "type": "shadow_port_setup",
- "resource": [pod_id", "network_id"]
- }
- ]
- }
-
- Normal Response Code: 200
-
-
-* Delete a job
-
- Delete a failed or duplicated job from the Tricircle database.
- A pair of curly braces will be returned if succeeds, otherwise an
- exception will be thrown. What's more, we can list all jobs to verify
- whether it is deleted successfully or not.
-
- Delete Job Request::
-
- DELETE /v1.0/jobs/{id}
-
- Response:
- This operation does not return a response body.
-
- Normal Response Code: 200
-
-
-* Redo a job
-
- Redo a halted job brought by the XJob server corruption or network failures.
- The job handler will redo a failed job with time interval, but this Admin
- API will redo a job immediately. Nothing will be returned for this request,
- but we can monitor its status through the execution state.
-
- Redo Job Request::
-
- PUT /v1.0/jobs/{id}
-
- Response:
- This operation does not return a response body.
-
- Normal Response Code: 200
-
-
-Data Model Impact
-=================
-
-In order to manage the jobs for each tenant, we need to filter them by
-project ID. So project ID is going to be added to the AsyncJob model and
-AsyncJobLog model.
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-- Add documentation for asynchronous job management API
-- Add release note for asynchronous job management API
-
-References
-==========
-
-None
-
diff --git a/specs/pike/cross-neutron-vxlan-networking.rst b/specs/pike/cross-neutron-vxlan-networking.rst
deleted file mode 100644
index d0417609..00000000
--- a/specs/pike/cross-neutron-vxlan-networking.rst
+++ /dev/null
@@ -1,233 +0,0 @@
-===========================================
-Cross Neutron VxLAN Networking in Tricircle
-===========================================
-
-Background
-==========
-
-Currently we only support VLAN as the cross-Neutron network type. For VLAN network
-type, central plugin in Tricircle picks a physical network and allocates a VLAN
-tag(or uses what users specify), then before the creation of local network,
-local plugin queries this provider network information and creates the network
-based on this information. Tricircle only guarantees that instance packets sent
-out of hosts in different pods belonging to the same VLAN network will be tagged
-with the same VLAN ID. Deployers need to carefully configure physical networks
-and switch ports to make sure that packets can be transported correctly between
-physical devices.
-
-For more flexible deployment, VxLAN network type is a better choice. Compared
-to 12-bit VLAN ID, 24-bit VxLAN ID can support more numbers of bridge networks
-and cross-Neutron L2 networks. With MAC-in-UDP encapsulation of VxLAN network,
-hosts in different pods only need to be IP routable to transport instance
-packets.
-
-Proposal
-========
-
-There are some challenges to support cross-Neutron VxLAN network.
-
-1. How to keep VxLAN ID identical for the same VxLAN network across Neutron servers
-
-2. How to synchronize tunnel endpoint information between pods
-
-3. How to trigger L2 agents to build tunnels based on this information
-
-4. How to support different back-ends, like ODL, L2 gateway
-
-The first challenge can be solved as VLAN network does, we allocate VxLAN ID in
-central plugin and local plugin will use the same VxLAN ID to create local
-network. For the second challenge, we introduce a new table called
-"shadow_agents" in Tricircle database, so central plugin can save the tunnel
-endpoint information collected from one local Neutron server in this table
-and use it to populate the information to other local Neutron servers when
-needed. Here is the schema of the table:
-
-.. csv-table:: Shadow Agent Table
- :header: Field, Type, Nullable, Key, Default
-
- id, string, no, primary, null
- pod_id, string, no, , null
- host, string, no, unique, null
- type, string, no, unique, null
- tunnel_ip, string, no, , null
-
-**How to collect tunnel endpoint information**
-
-When the host where a port will be located is determined, local Neutron server
-will receive a port-update request containing host ID in the body. During the
-process of this request, local plugin can query agent information that contains
-tunnel endpoint information from local Neutron database with host ID and port
-VIF type; then send tunnel endpoint information to central Neutron server by
-issuing a port-update request with this information in the binding profile.
-
-**How to populate tunnel endpoint information**
-
-When the tunnel endpoint information in one pod is needed to be populated to
-other pods, XJob will issue port-create requests to corresponding local Neutron
-servers with tunnel endpoint information queried from Tricircle database in the
-bodies. After receiving such request, local Neutron server will save tunnel
-endpoint information by calling real core plugin's "create_or_update_agent"
-method. This method comes from neutron.db.agent_db.AgentDbMixin class. Plugins
-that support "agent" extension will have this method. Actually there's no such
-agent daemon running in the target local Neutron server, but we insert a record
-for it in the database so the local Neutron server will assume there exists an
-agent. That's why we call it shadow agent.
-
-The proposed solution for the third challenge is based on the shadow agent and
-L2 population mechanism. In the original Neutron process, if the port status
-is updated to active, L2 population mechanism driver does two things. First,
-driver checks if the updated port is the first port in the target agent. If so,
-driver collects tunnel endpoint information of other ports in the same network,
-then sends the information to the target agent via RPC. Second, driver sends
-the tunnel endpoint information of the updated port to other agents where ports
-in the same network are located, also via RPC. L2 agents will build the tunnels
-based on the information they received. To trigger the above processes to build
-tunnels across Neutron servers, we further introduce shadow port.
-
-Let's say we have two instance ports, port1 is located in host1 in pod1 and
-port2 is located in host2 in pod2. To make L2 agent running in host1 build a
-tunnel to host2, we create a port with the same properties of port2 in pod1.
-As discussed above, local Neutron server will create shadow agent during the
-process of port-create request, so local Neutron server in pod1 won't complain
-that host2 doesn't exist. To trigger L2 population process, we then update the
-port status to active, so L2 agent in host1 will receive tunnel endpoint
-information of port2 and build the tunnel. Port status is a read-only property
-so we can't directly update it via ReSTful API. Instead, we issue a port-update
-request with a special key in the binding profile. After local Neutron server
-receives such request, it pops the special key from the binding profile and
-updates the port status to active. XJob daemon will take the job to create and
-update shadow ports.
-
-Here is the flow of shadow agent and shadow port process::
-
- +-------+ +---------+ +---------+
- | | | | +---------+ | |
- | Local | | Local | | | +----------+ +------+ | Local |
- | Nova | | Neutron | | Central | | | | | | Neutron |
- | Pod1 | | Pod1 | | Neutron | | Database | | XJob | | Pod2 |
- | | | | | | | | | | | |
- +---+---+ +---- ----+ +----+----+ +----+-----+ +--+---+ +----+----+
- | | | | | |
- | update port1 | | | | |
- | [host id] | | | | |
- +---------------> | | | |
- | | update port1 | | | |
- | | [agent info] | | | |
- | +----------------> | | |
- | | | save shadow | | |
- | | | agent info | | |
- | | +----------------> | |
- | | | | | |
- | | | trigger shadow | | |
- | | | port setup job | | |
- | | | for pod1 | | |
- | | +---------------------------------> |
- | | | | | query ports in |
- | | | | | the same network |
- | | | | +------------------>
- | | | | | |
- | | | | | return port2 |
- | | | | <------------------+
- | | | | query shadow | |
- | | | | agent info | |
- | | | | for port2 | |
- | | | <----------------+ |
- | | | | | |
- | | | | create shadow | |
- | | | | port for port2 | |
- | <--------------------------------------------------+ |
- | | | | | |
- | | create shadow | | | |
- | | agent and port | | | |
- | +-----+ | | | |
- | | | | | | |
- | | | | | | |
- | <-----+ | | | |
- | | | | update shadow | |
- | | | | port to active | |
- | <--------------------------------------------------+ |
- | | | | | |
- | | L2 population | | | trigger shadow |
- | +-----+ | | | port setup job |
- | | | | | | for pod2 |
- | | | | | +-----+ |
- | <-----+ | | | | |
- | | | | | | |
- | | | | <-----+ |
- | | | | | |
- | | | | | |
- + + + + + +
-
-Bridge network can support VxLAN network in the same way, we just create shadow
-ports for router interface and router gateway. In the above graph, local Nova
-server updates port with host ID to trigger the whole process. L3 agent will
-update interface port and gateway port with host ID, so similar process will
-be triggered to create shadow ports for router interface and router gateway.
-
-Currently Neutron team is working on push notification [1]_, Neutron server
-will send resource data to agents; agents cache this data and use it to do the
-real job like configuring openvswitch, updating iptables, configuring dnsmasq,
-etc. Agents don't need to retrieve resource data from Neutron server via RPC
-any more. Based on push notification, if tunnel endpoint information is stored
-in port object later, and this information supports updating via ReSTful API,
-we can simplify the solution for challenge 3 and 4. We just need to create
-shadow port containing tunnel endpoint information. This information will be
-pushed to agents and agents use it to create necessary tunnels and flows.
-
-**How to support different back-ends besides ML2+OVS implementation**
-
-We consider two typical back-ends that can support cross-Neutron VxLAN networking,
-L2 gateway and SDN controller like ODL. For L2 gateway, we consider only
-supporting static tunnel endpoint information for L2 gateway at the first step.
-Shadow agent and shadow port process is almost the same with the ML2+OVS
-implementation. The difference is that, for L2 gateway, the tunnel IP of the
-shadow agent is set to the tunnel endpoint of the L2 gateway. So after L2
-population, L2 agents will create tunnels to the tunnel endpoint of the L2
-gateway. For SDN controller, we assume that SDN controller has the ability to
-manage tunnel endpoint information across Neutron servers, so Tricircle only helps to
-allocate VxLAN ID and keep the VxLAN ID identical across Neutron servers for one network.
-Shadow agent and shadow port process will not be used in this case. However, if
-different SDN controllers are used in different pods, it will be hard for each
-SDN controller to connect hosts managed by other SDN controllers since each SDN
-controller has its own mechanism. This problem is discussed in this page [2]_.
-One possible solution under Tricircle is as what L2 gateway does. We create
-shadow ports that contain L2 gateway tunnel endpoint information so SDN
-controller can build tunnels in its own way. We then configure L2 gateway in
-each pod to forward the packets between L2 gateways. L2 gateways discussed here
-are mostly hardware based, and can be controlled by SDN controller. SDN
-controller will use ML2 mechanism driver to receive the L2 network context and
-further control L2 gateways for the network.
-
-To distinguish different back-ends, we will add a new configuration option
-cross_pod_vxlan_mode whose valid values are "p2p", "l2gw" and "noop". Mode
-"p2p" works for the ML2+OVS scenario, in this mode, shadow ports and shadow
-agents containing host tunnel endpoint information are created; mode "l2gw"
-works for the L2 gateway scenario, in this mode, shadow ports and shadow agents
-containing L2 gateway tunnel endpoint information are created. For the SDN
-controller scenario, as discussed above, if SDN controller can manage tunnel
-endpoint information by itself, we only need to use "noop" mode, meaning that
-neither shadow ports nor shadow agents will be created; or if SDN controller
-can manage hardware L2 gateway, we can use "l2gw" mode.
-
-Data Model Impact
-=================
-
-New table "shadow_agents" is added.
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-- Update configuration guide to introduce options for VxLAN network
-- Update networking guide to discuss new scenarios with VxLAN network
-- Add release note about cross-Neutron VxLAN networking support
-
-References
-==========
-
-.. [1] https://blueprints.launchpad.net/neutron/+spec/push-notifications
-.. [2] https://etherealmind.com/help-wanted-stitching-a-federated-sdn-on-openstack-with-evpn/
diff --git a/specs/pike/l3-networking-multi-NS-with-EW-enabled.rst b/specs/pike/l3-networking-multi-NS-with-EW-enabled.rst
deleted file mode 100644
index d5c9edd4..00000000
--- a/specs/pike/l3-networking-multi-NS-with-EW-enabled.rst
+++ /dev/null
@@ -1,393 +0,0 @@
-===========================================
-Layer-3 Networking multi-NS-with-EW-enabled
-===========================================
-
-Problems
-========
-
-There are already several scenarios fulfilled in Tricircle for north-
-south networking.
-
-Scenario "North South Networking via Multiple External Networks"[1] meets
-the demand for multiple external networks, but local network can not
-reach other local networks which are not in the same OpenStack cloud.
-
-Scenario "North South Networking via Single External Network"[2] can meet
-local networks east-west networking requirement, but the north-south traffic
-needs to go to single gateway.
-
-In multi-region cloud deployment, a requirement is that each OpenStack cloud
-provides external network, north-south traffic is expected to be handled
-locally for shortest path, and/or use multiple external networks to ensure
-application north-south traffic redundancy, at the same time east-west
-networking of tenant's networks between OpenStack cloud is also needed.
-
-Proposal
-========
-
-To address the above problems, the key limitation is the pattern for router
-gateway, one router in Neutron can only be attached to one external network.
-As what's described in the spec of combined bridge network[3], only external
-network is suitable for working as bridge network due to DVR challenge.
-
-North-south traffic via the external network in the same region is conflict
-with external network as bridge network.
-
-The proposal is to introduce a new networking mode for this scenario::
-
-
- +-----------------------+ +----------------------+
- | ext-net1 | | ext-net2 |
- | +---+---+ | | +--+---+ |
- |RegionOne | | | RegionTwo | |
- | +---+---+ | | +----+--+ |
- | | R1 | | | | R2 | |
- | +--+----+ | | +--+----+ |
- | | net1 | | net2 | |
- | +---+--+---+-+ | | ++-----+--+---+ |
- | | | | | | | |
- | +---------+-+ | | | | +--+--------+ |
- | | Instance1 | | | | | | Instance2 | |
- | +-----------+ | | | | +-----------+ |
- | +----+--+ | bridge-net | +-+-----+ |
- | | R3(1) +--------------------+ R3(2) | |
- | +-------+ | | +-------+ |
- +-----------------------+ +----------------------+
- Figure.1 Multiple external networks with east-west networking
-
-R1 is the router to connect the external network ext-net1 directly
-in RegionOne. Net1's default gateway is R1, so all north-south traffic
-will be forwarded by R1 by default. In short, north-south traffic of net2
-will be processed by R2 in RegionTwo. R1 and R2 are local routers which
-is supposed to be presented in only one region. Region name should be
-specified in availability-zone-hint during router creation in central
-Neutron, for example::
-
- openstack --os-region-name=CentralRegion router create --availability-zone-hint=RegionOne R1
- openstack --os-region-name=CentralRegion router create --availability-zone-hint=RegionTwo R2
-
- openstack --os-region-name=CentralRegion router add subnet R1
- openstack --os-region-name=CentralRegion router add subnet R2
-
-In order to process the east-west traffic from net1 to net2, R3(1) and R3(2)
-will be introduced, R3(1) and R3(2) will be inter-connected by bridge-net.
-Bridge-net could be VLAN or VxLAN cross Neutron L2 network, and it's the
-"external network" for both R3(1) and R3(2), please note here the bridge-net
-is not real external network, just the concept of Neutron network. R3(1) and
-R3(2) will only forward the east-west traffic across Neutron for local
-networks, so it's not necessary to work as DVR, centralized router is good
-enough.
-
-In central Neutron, we only need to create a virtual logical router R3,
-and R3 router is called as east-west gateway, to handle the east-west
-traffic for local networks in different region, and it's non-local router.
-Tricircle central Neutron plugin will help to create R3(1) in RegionOne and
-R3(2) in RegionTwo, and use the bridge network to inter-connect R3(1) and
-R3(2). The logical topology in central Neutron looks like follows::
-
- ext-net1 ext-net2
- +-------+ +--+---+
- | |
- +---+---+ +----+--+
- | R1 | | R2 |
- +--+----+ +--+----+
- | net1 net2 |
- +---+--+---++ ++-----+--+---+
- | | | |
- +---------+-+ | | +--+--------+
- | Instance1 | | | | Instance2 |
- +-----------+ | | +-----------+
- +-+----+--+
- | R3 |
- +---------+
-
- Figure.2 Logical topology in central Neutron
-
-Tricircle central Neutron plugin will use logical router R3 to create R3(1)
-in RegionOne, and R3(2) in RegionTwo.
-
-Please note that R3(1) is not the default gateway of net1, and R3(2) is not
-the default gateway of net2 too. So the user has to create a port and use
-this port as the router interface explicitly between router and local
-network.
-
-In central Neutron, the topology could be created like this::
-
- openstack --os-region-name=CentralRegion port create --network=net1 net1-R3-interface
- openstack --os-region-name=CentralRegion router add port R3
-
- openstack --os-region-name=CentralRegion port create --network=net2 net2-R3-interface
- openstack --os-region-name=CentralRegion router add port R3
-
-Tricircle central Neutron plugin will automatically configure R3(1), R3(2) and
-bridge-network as follows:
-
-For net1, host route should be added::
-
- destination=net2's cidr, nexthop=
-
-For net2, host route should be added::
-
- destination=net1's cidr, nexthop=
-
-In R3(1), extra route will be configured::
-
- destination=net2's cidr, nexthop=R3(2)'s interface in bridge-net
-
-In R3(2), extra route will be configured::
-
- destination=net1's cidr, nexthop=R3(1)'s interface in bridge-net
-
-R3(1) and R3(2) will set the external gateway to bridge-net::
-
- router-gateway-set R3(1) bridge-net
- router-gateway-set R3(2) bridge-net
-
-Now, north-south traffic of Instance1 and Instance2 work like follows::
-
- Instance1 -> net1 -> R1 -> ext-net1
- Instance2 -> net2 -> R2 -> ext-net2
-
-Only one hop for north-south traffic.
-
-East-west traffic between Instance1 and Instance2 work like follows::
-
- Instance1 <-> net1 <-> R3(1) <-> bridge-net <-> R3(2) <-> net2 <-> Instance2
-
-Two hops for cross Neutron east-west traffic.
-
-The topology will be more complex if there are cross Neutron L2 networks
-except local networks::
-
- +-----------------------+ +----------------------+
- | ext-net1 | | ext-net2 |
- | +-------+ | | +--+---+ |
- |RegionOne | | | RegionTwo | |
- | +---+----------+ | | +-------------+--+ |
- | | R1 | | | | R2 | |
- | +--+--+---+--+-+ | | ++-+----+---+----+ |
- | net1 | | | | | | | | | | net2 |
- | ++--++ | | | | | | | | +-+---+ |
- | | net3| | | | | | | |net4| |
- | | ++---+ | | | | | | ++---+ | |
- | | | | | | net5 | | | | | |
- | | | +++-------------------------+-++| | |
- | | | | | | net6 | | | | | |
- | | | |++-+--------------------+++ | | | |
- | | | | | | | | | | | |
- | | | | | | | | | | | |
- | | | | | | | | | | | |
- | | | | | | | | | | | |
- | +----+---+----+-+-+ | bridge-net | ++--+-+-----+-----+ |
- | | R3(1) +--------------------+ R3(2) | |
- | +-----------------+ | | +-----------------+ |
- +-----------------------+ +----------------------+
-
- Figure.3 Multi-NS and cross Neutron L2 networks
-
-The logical topology in central Neutron for Figure.3 looks like as follows::
-
- ext-net1 ext-net2
- +-------+ +--+---+
- | |
- +---+----------+ +-------------+--+
- | R1 | | R2 |
- +--+--+---+--+-+ ++-+----+---+----+
- net1 | | | | | | | | net2
- ++--++ | | | | | | +-+---+
- | net3| | | | | |net4|
- | ++---+ | | | | ++---+ |
- | | | | net5 | | | |
- | | +------+------------------+ | |
- | | | | net6 | | |
- | | +-------------+------+ | |
- | | | | | |
- | | | | | |
- | | | | | |
- | | | | | |
- +-+---+------------+---------+------------+-----+-+
- | R3 |
- +-------------------------------------------------+
- Figure.4 Logical topology in central Neutron with cross Neutron L2 network
-
-East-west traffic inside one region will be processed locally through default
-gateway. For example, in RegionOne, R1 has router interfaces in net1, net3,
-net5, net6, the east-west traffic between these networks will work as follows::
-
- net1 <-> R1 <-> net3
- net1 <-> R1 <-> net5
- net1 <-> R1 <-> net6
- net3 <-> R1 <-> net5
- net3 <-> R1 <-> net6
- net5 <-> R1 <-> net6
-
-There is nothing special for east-west traffic between local networks
-in different OpenStack regions.
-
-Net5 and net6 are cross Neutron L2 networks, instances could be attached
-to network from different regions, and instances are reachable in a remote
-region via the cross Neutron L2 network itself. There is no need to add host
-route for cross Neutron L2 network, for it's routable in the same region for
-other local networks or cross Neutron L2 networks, default route is enough
-for east-west traffic.
-
-It's needed to address how one cross Neutron L2 network will be
-attached different local router: different gateway IP address will be used.
-For example, in central Neutron, net5's default gateway IP is 192.168.0.1
-in R1, the user needs to create a gateway port explicitly for local router R2
-and net5, for example 192.168.0.2, then net5 will be attached to R2 using this
-gateway port 192.168.0.2. Tricircle central Neutron plugin will make this
-port's IP 192.168.0.2 as the default gateway IP for net5 in RegionTwo.
-
-Besides of gateway ports creation for local router R2, it's also needed to
-create a gateway port for R3 and net5, which is used for east-west traffic.
-Because R3 will be spread into RegionOne and RegionTwo, so net5 will have
-different gateway ports in RegionOne and RegionTwo. Tricircle central Neutron
-plugin needs to reserve the gateway ports in central Neutron, and create these
-gateway ports in RegionOne and RegionTwo for net5 on R3. Because R3 is the
-east-west gateway router for net5, so these gateway ports are not the default
-gateway port. Then host route in net5 should be updated for local networks
-which are not in the same region:
-
-For net5 in RegionOne, host route should be added::
-
- destination=net2's cidr, nexthop=
- destination=net4's cidr, nexthop=
-
-For net5 in RegionTwo, host route should be added::
-
- destination=net1's cidr, nexthop=
- destination=net3's cidr, nexthop=
-
-Similar operation for net6 in RegionOne and RegionTwo.
-
-If R1 and R2 are centralized routers, cross Neutron L2 network will
-work, but if R1 and R2 are DVRs, then DVR MAC issue mentioned in the
-spec "l3-networking-combined-bridge-net" should be fixed[2].
-
-In order to make the topology not too complex, this use case will not be
-supported: a cross Neutron L2 network is not able to be stretched into
-the region where there are local networks. This use case is not useful
-and will make the east-west traffic even more complex::
-
- +-----------------------+ +----------+ +-----------------+
- | ext-net1 | | ext-net2 | | ext-net4 |
- | +-------+ | | +------+ | | +--+---+ |
- |RegionOne | | | RegionTwo| | Region4 | |
- | +---+----------+ | | +------+ | | +-------+--+ |
- | | R1 | | | | R2 | | | | R4 | |
- | +--+--+---+--+-+ | | ++-+---+ | | +-+---+----+ |
- | net1 | | | | | | | | | | | | net2 |
- | ++--++ | | | | | | | | | | +-+---+ |
- | | net3| | | | | | | | | |net4| |
- | | ++---+ | | | | | | | | ++---+ | |
- | | | | | | net5 | | | | | | | |
- | | | +-+-------------------------+-+ | | | | |
- | | | | | net6 | | | | | | |
- | | | +-+--------------------+ | | | | |
- | | | | | | | | | |
- | | | | | | | | | |
- | | | | | | | | | |
- | | | | | | | | | |
- | +----+---+--------+ | | +-----+ | | +-+-----+-----+ |
- | | R3(1) | | | |R3(2)| | | | R3(3) | |
- | +-----------+-----+ | | +-+---+ | | +-----+-------+ |
- | | | | | | | | |
- +-----------------------+ +----------+ +-----------------+
- | bridge-net | |
- +----------------------------+-------------------+
-
- Figure.5 Cross Neutron L2 network not able to be stretched into some region
-
-
-Implementation
---------------
-
-Local router: It's a router which is created with region name specified in the
-availability zone hint, this will be present only in the specific region.
-
-East-west gateway router: It's a router which will be spread into multiple
-regions and this will handle the east-west traffic to attached local networks.
-
-The following description of implementation is not pseudo code, it's the
-logical judgemenet for different conditions combination.
-
-Adding router interface to east-west gateway router::
-
- if IP of the router interface is the subnet default gateway IP
- # north-south traffic and east-west traffic will
- # go through this router
- # router is the default router gateway, it's the
- # single north-south external network mode
- if the network is cross Neutron L2 network
- reserve gateway port in different region
- add router interface in each region using reserved gateway port IP
- make sure the gateway port IP is the default route
- else # local network
- add router interface using the default gateway port or the port
- specified in request
- else # not the default gateway IP in this subnet
- if the network is cross Neutron L2 network
- reserve gateway port in different region
- add router interface in each region using reserved gateway port IP
- update host route in each connected local network in each region,
- next hop is the reserved gateway port IP
- else # local network
- create router in the region as needed
- add router interface using the port specified in request
- if there are more than one interfaces on this router
- update host route in each connected local network in each
- region, next hop is port IP on this router.
-
- Configure extra route to the router in each region for EW traffic
-
-Adding router interface to local router for cross Neutron L2 network will
-make the local router as the default gateway router in this region::
-
- # default north-south traffic will go through this router
- add router interface using the default gateway port or the port
- specified in request
- make sure this local router in the region is the default gateway
-
-If external network is attached to east-west gateway router, and network's
-default gateway is the east-west gateway router, then the router will be
-upgraded to north-south networking via single external network mode.
-
-Constraints:
- Network can only be attached to one local router in one region.
-
- If a network has already been attached to a east-west gateway router,
- and the east-west gateway router is the default gateway of this network,
- then the network can't be attached to another local router.
-
-.. note:: Host route update in a subnet will function only in next
- dhcp request. It may take dhcp_lease_duration for VMs in the subnet
- to update the host route. It's better to compose the networking
- topology before attached VMs to the netwrok. dhcp_lease_duration is
- configured by the cloud operator. If tenant wants to make the host
- route work immediately, can send dhcp request directly in VMs.
-
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-1. Add new guide for North South Networking via Multiple External Networks
- with east-west enabled.
-2. Release notes.
-
-Reference
-=========
-
-.. [1] North South Networking via Multiple External Networks: https://docs.openstack.org/tricircle/latest/networking/networking-guide-multiple-external-networks.html
-.. [2] l3-networking-combined-bridge-net: https://github.com/openstack/tricircle/blob/master/specs/ocata/l3-networking-combined-bridge-net.rst
-.. [3] North South Networking via Single External Network: https://docs.openstack.org/tricircle/latest/networking/networking-guide-single-external-network.html
diff --git a/specs/pike/quality-of-service.rst b/specs/pike/quality-of-service.rst
deleted file mode 100644
index 246c25dc..00000000
--- a/specs/pike/quality-of-service.rst
+++ /dev/null
@@ -1,247 +0,0 @@
-=============================
-Tricircle Quality of Service
-=============================
-
-Background
-==========
-
-QoS is defined as the ability to guarantee certain network requirements
-like bandwidth, latency, jitter and reliability in order to satisfy a
-Service Level Agreement (SLA) between an application provider and end
-tenants. In the Tricircle, each OpenStack instance runs its own Nova and
-Neutron services but shares the same Keystone service or uses federated
-KeyStones, which is a multi-region deployment mode. With networking automation,
-networks or ports created in different OpenStack cloud should be able to be
-associated with QoS policies.
-
-Proposal
-========
-
-As networking automation across Neutron could be done through the Tricircle,
-the QoS automation should be able to work based on tenant's need too. When
-tenant wants to apply QoS to the network or port from the central Neutron, QoS
-can't be created in the local Neutron server in the bottom pod directly, since
-it's still unclear whether the network will be presented in this pod or not.
-
-In order to achieve QoS automation operations, QoS can't be created in the
-local Neutron server directly until there are some existing networks/ports
-in bottom pod. The Tricircle central Neutron plugin(abbr: "central plugin")
-will operate QoS information in the local Neutron server, QoS service isn't
-like network/port that needs to be created during VM booting, in order to
-speed up the local VMs booting and reduce the delay that caused by
-synchronization between central Neutron and local Neutron, Tricircle central
-plugin should use an asynchronous method to associate QoS with the local
-network/port, or remove QoS association in each local Neutron if needed.
-
-Implementation
-==============
-
-Case 1, QoS policy creation
-----------------------------
-
-In this case, we only create QoS in the central Neutron.
-
-Case 2, QoS policy association without local network/port in place
------------------------------------------------------------
-
-QoS has been created in the central Neutron but local network/port has not
-yet been created.
-
-In this case, we just need to update network/port with QoS policy id in the
-central Neutron.
-
-Case 3, QoS policy association with local network/port in place
----------------------------------------------------------------
-
-After QoS has been created in the central Neutron and local network/port
-also has been created, associate QoS with network/port in the central Neutron.
-
-In this case, network/port has been created in the local Neutron. After
-network/port is updated with the QoS policy id in the central Neutron, we also
-need to do some similar association in the local Neutron. Central Neutron uses
-"create_qos_policy" job to create the local QoS policy firstly, then update the
-network/port QoS association asynchronously in the local Neutron through the
-network/port routing information and add the QoS routing information in routing
-table. XJob will interact with local Neutron to update the QoS policy id for
-network/port in local Neutron.
-
-Case 4, provision VM with QoS policy associated central port/network
---------------------------------------------------------------
-
-QoS has been associated to central port/network first, local network/port
-is created later in VM provision.
-
-In this case, QoS has been associated to the central network/port and at this
-point local network/port does not exist. Since QoS has not been created in
-the local Neutron but central Neutron has finished the association, local
-neutron needs to trigger central Neutron to finish the local network/port
-QoS association when VMs booting in the local. When VM booting in the bottom
-pod, local Neutron sends update port request with port information to central
-Neutron and if QoS id field exists in the network/port, the central Neutron
-will be triggered to use XJob to create an QoS policy creation job in the
-local Neutron (it also speeds up VM booting) and add the QoS routing
-information in routing table.
-
-Case 5, QoS policy updating
-----------------------------
-
-In this case, if local network/port isn't associated with QoS, we only update
-QoS in the central Neutron.
-
-If QoS policy has been associated with local network/port in place, after
-central Neutron updates QoS, central Neutron will use XJob to create a QoS
-asynchronous updating job through the network/port routing information.
-XJob will asynchronously update QoS in the local Neutron.
-
-Case 6, QoS policy disassociation
------------------------------------
-
-For QoS policy disassociation, just need to change the parameters of
-"QoS_policy_id" to None when update network/port in the central Neutron and
-we can disassociate network/port.
-
-In this case, if network/port in local Neutron isn't associated with QoS, we
-only disassociate network/port in the central Neutron.
-
-If QoS policy has been associated with network/port in local Neutron, after
-central Neutron disassociates network, central Neutron will use XJob to
-create a network update job to disassociate the network with the QoS policy;
-for port, central Neutron will synchronously update the port to disassociate
-it with the QoS policy in the local Neutron.
-
-Case 7, QoS policy deletion
-----------------------------
-
-QoS policy can only be deleted if there is no any association in central
-Neutron. In this case, if local network/port isn't associated with QoS, we
-only delete QoS in the central Neutron.
-
-If there is QoS policy routing info, after central Neutron deletes QoS,
-central Neutron will use XJob to create a QoS asynchronous deletion job
-through the network/port routing information. XJob will asynchronously
-delete QoS in the local Neutron.
-
-Case 8, QoS rule creation
---------------------------
-
-In this case, if local network/port isn't associated with QoS, we only create
-QoS rule in the central Neutron.
-
-If QoS policy has been associated with local network/port in place, after central
-Neutron creates QoS rules, central Neutron will use XJob to create a QoS rules
-syncing job through the network/port routing information, then asynchronously
-creates QoS rules in the local Neutron.
-
-Case 9, QoS rule updating
---------------------------
-
-In this case, if local network/port isn't associated with QoS, we only update
-QoS rule in the central Neutron. If QoS policy has been associated with local
-network/port in place, after central Neutron updates QoS rule, central Neutron
-will trigger XJob to create a QoS rules syncing job in the local Neutron
-through the network/port routing information. XJob will asynchronously update
-QoS rule in the local Neutron.
-
-Case 10, QoS rule deletion
-----------------------------
-
-In this case, if local network/port isn't associated with QoS, we only delete
-QoS rule in the central Neutron.
-
-If QoS policy has been associated with local network/port in place, after
-central Neutron deletes QoS rule, central Neutron will use XJob to create a QoS
-rules syncing job through the network/port routing information. XJob will
-asynchronously delete QoS rule in the local Neutron.
-
-QoS XJob jobs list
--------------------
-
-- **1: create_qos_policy(self, ctxt, policy_id, pod_id, res_type, res_id=None)**
-
-Asynchronously creating QoS policy for the corresponding pod which id equals
-"pod_id", specify network or port in through the parameter res_type and
-res_id. If res_type is RT_NETWORK, then res_id is network's uuid, if res_type
-is RT_PORT, then res_id is port's uuid
-
-**Triggering condition:**
-
-When associating network/port in the central Neutron, if this network/port
-exists in the local Neutron, triggering this asynchronous job to complete
-the local association.
-
-When central plugin processing a port update request sent by local plugin
-and finding the port is associated with QoS.
-
-If pod_id is POD_NOT_SPECIFIED then the async job will process all related
-pods, so the create_qos_policy(self, ctxt, policy_id, pod_id) job will deal
-with not only single pod's QoS association.
-
-If the res_type is RT_NETWORK/RT_PORT, after creating the qos policy on pod,
-the async job will bind the qos policy that just created to the network/port
-specified by the parameter of res_id.
-
-- **2: update_qos_policy(self, ctxt, policy_id, pod_id)**
-
-Asynchronously updating QoS policy for the corresponding pod which id equals
-"pod_id".
-
-**Triggering condition:**
-
-When updating QoS policy in the central Neutron, if it also exists in the
-local Neutron, triggering this asynchronous job to complete the local QoS
-updating.
-
-If pod_id is POD_NOT_SPECIFIED then the async job will process all related
-pods, so the update_qos_policy(self,ctxt,policy_id,pod_id) job will deal with
-not only single pod's QoS association.
-
-- **3: delete_qos_policy(self, ctxt, policy_id, pod_id)**
-
-Asynchronously deleting QoS policy for the corresponding pod which id equals
-"pod_id".
-
-**Triggering condition:**
-
-When deleting QoS policy in the central Neutron, if this QoS policy exists in
-the local Neutron, triggering this asynchronous job to complete the local QoS
-deletion.
-(Warning: the deleted QoS policy must be disassociated first.)
-
-If pod_id is POD_NOT_SPECIFIED then the async job will process all related
-pods, so the delete_qos_policy(self,ctxt,policy_id,pod_id) job will deal with
-not only single pod's QoS association.
-
-- **4: sync_qos_policy_rules(self, ctxt, policy_id)**
-
-Asynchronous operation for rules of one QoS policy for specified project.
-There are two trigger conditions. The one is that central Neutron
-creates/updates/deletes QoS rules after QoS policy has been associated with
-local network/port. The other is that central plugin processes a port update request
-sent by local plugin and finds the port is associated with QoS policy.
-
-If the rule both exists in the central Neutron and local Neutron, but with
-inconsistent content, just asynchronously updating this QoS rule in the local
-Neutron.
-
-If the rule exits in the central Neutron, but it does not exist in the local
-Neutron, just asynchronously creating this QoS rule in the local Neutron.
-
-If the rule exits in the local Neutron, but it does not exist in the central
-Neutron, just asynchronously deleting this QoS rule in the local Neutron.
-
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-Release notes
-
diff --git a/specs/pike/smoke-test-engine.rst b/specs/pike/smoke-test-engine.rst
deleted file mode 100644
index feeb07e2..00000000
--- a/specs/pike/smoke-test-engine.rst
+++ /dev/null
@@ -1,219 +0,0 @@
-=================
-Smoke Test Engine
-=================
-
-Problems
-========
-Currently we are running a simple smoke test in the CI job. Several resources
-are created to build a simple topology, then we query to check whether the
-resources are also created in local Neutron servers as expected. The problems
-exist are:
-
-- 1 Bash scripts are used to invoke client to send API request while python
- scripts are used to check the result. Mix use of bash and python makes us
- hard to write new tests.
-- 2 Resources are not cleaned at the end of the test so we can't proceed other
- tests in the same environment.
-
-Proposal
-========
-Using bash scripts to do both API request and result validation is tricky and
-hard to read, working with python is a better choice. We have several python
-libraries that can help us to send API request: openstackclient, neutronclient
-and openstacksdk. The main goal of the first two libraries is providing command
-line interface(CLI), so they don't expose methods for us to send API request,
-but we can still use them by calling internal functions that are used by their
-CLI instance. The drawback of using internal functions is that those internal
-functions are undocumented and are possible to be changed or removed someday.
-Compare to openstackclient and neutronclient, openstacksdk is a library that
-aims for application building and is well-documented. Actually openstackclient
-uses openstacksdk for some of its commands' implementation. The limitation of
-openstacksdk is that some service extensions like trunk and service function
-chaining have not been supported yet, but it's easy to extend by our own.
-
-Before starting to write python code to prepare, validate and finally clean
-resources for each test scenario, let's hold on and move one step forward. Heat
-uses template to define resources and networking topologies that need to be
-created, we can also use YAML file to describe our test tasks.
-
-Schema
-------
-
-A task can be defined as a dict that has the following basic fields:
-
-.. csv-table::
- :header: Field, Type, Description, Required or not
- :widths: 10, 10, 40, 10
-
- task_id, string, user specified task ID, required
- region, string, keystone region to send API, required
- type, string, resource type, required
- depend, list, task IDs the current task depends on, optional
- params, dict, "parameters to run the task, usage differs in different task types", optional
-
-Currently four type of tasks are defined. The usage of "params" field for each
-type of task is listed below:
-
-.. csv-table::
- :header: Task type, Usage of "params" field
- :widths: 10, 50
-
- create, used as the post body of the create request
- query, used as the query filter
- action, used as the put body of the action request
- validate, used as the filter to query resources that need to be validated
-
-Task doesn't have "task type" field, but it can have an extra dict type field
-to include extra needed information for that task. This extra field differs in
-different task types. "Create" task doesn't have an extra field.
-
-.. list-table::
- :widths: 15, 10, 10, 40, 10
- :header-rows: 1
-
- * - Extra field
- - Sub field
- - Type
- - Description
- - Required or not
- * - query(for query task)
- - get_one
- - bool
- - whether to return an element or a list
- - required
- * - action(for action task)
- - target
- - string
- - target resource ID
- - required
- * -
- - method
- - string
- - action method, "update" and "delete" are also included
- - required
- * -
- - retries
- - int
- - times to retry the current task
- - optional
- * - validate(for validate task)
- - predicate
- - string
- - value should be "any" or "all", "any" means that for each condition,
- there exists an resource satisfying that condition; "all" means that
- every condition is satisfied by all the resources
- - required
- * -
- - condition
- - list
- - each condition is a dict, key of the dict is the field of the resource,
- value of the dict is the expected value of the resource field
- - required
- * -
- - retries
- - int
- - times to retry the current task
- - optional
-
-Several related tasks can be grouped to form a task set. A task set is a dict
-with the following fields:
-
-.. csv-table::
- :header: Field, Type, Description, Required or not
- :widths: 10, 10, 40, 10
-
- task_set_id, string, user specified task set ID, required
- depend, list, task set IDs the current task set depends on, optional
- tasks, list, task dicts of the task set, required
-
-So the YAML file contains a list of task sets.
-
-Result and Reference
---------------------
-
-"Create" and "query" type tasks will return results, which can be used in the
-definition of other tasks that depend on them. Use ``task_id@resource_field``
-to refer to "resource_field" of the resource returned by "task_id". If the task
-relied on belongs to other task set, use ``task_set_id@task_id@resource_field``
-to specify the task set ID. The reference can be used in the "params", "action
-target" and "validate condition" field. If reference is used, task_id needs to
-be in the list of task's "depend" field, and task_set_id needs to be in the
-list of task set's "depend" field. For the "query" type task which is depended
-on, "get_one" field needs to be true.
-
-Example
--------
-
-Give an example to show how to use the above schema to define tasks::
-
- - task_set_id: preparation
- tasks:
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: vm1
- region: region1
- type: server
- depend:
- - net1
- - subnet1
- - image1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm1
- networks:
- - uuid: net1@id
- - task_set_id: wait-for-job
- tasks:
- - task_id: check-job
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_set_id: check
- depend: [preparation]
- tasks:
- - task_id: check-servers1
- region: region1
- type: server
- validate:
- predicate: any
- condition:
- - status: ACTIVE
- name: vm1
-
-The above YAML content define three task sets. "Preparation" task set create
-network, subnet and server, then "wait-for-job" task set waits for asynchronous
-jobs to finish, finally "check" task set check whether the server is active.
-
-Implementation
-==============
-
-A task engine needs to be implemented to parse the YAML file, analyse the task
-and task set dependency and then run the tasks. A runner based on openstacksdk
-will also be implemented.
-
-Dependencies
-============
-
-None
diff --git a/specs/queens/lbaas.rst b/specs/queens/lbaas.rst
deleted file mode 100644
index 033f290c..00000000
--- a/specs/queens/lbaas.rst
+++ /dev/null
@@ -1,185 +0,0 @@
-==========================================
-Distributed LBaaS in Multi-Region Scenario
-==========================================
-
-Background
-==========
-
-Currently, LBaaS (Load-Balancing-as-a-Service) is not supported in the
-Tricircle. This spec is to describe how LBaaS will be implemented in
-the Tricircle. LBaaS is an advanced service of Neutron, which allows for
-proprietary and open-source load balancing technologies to drive the actual
-load balancing of requests. Based on the networking guide of Ocata release,
-LBaaS can be configured with an agent or Octavia. Given that the OpenStack
-community try to take Octavia as the reference implementation of LBaaS, we
-only enable LBaaS based on Octavia in the Tricircle.
-
-Different from existing LBaaS implementation, Octavia accomplishes its
-delivery of load balancing services by managing a fleet of virtual machines,
-containers, or bare metal servers, collectively known as amphorae, which it
-spins up on demand. This spec file is dedicated to how to implement LBaaS
-in multiple regions with the Tricircle.
-
-Overall Implementation
-======================
-
-The Tricircle is designed in a central-local fashion, where all the local
-neutrons are managed by the central neutron. As a result, in order to adapt
-the central-local design and the amphorae mechanism of
-Octavia, we plan to deploy LBaaS as follows. ::
-
- +---------------------------+
- | |
- | Central Neutron |
- | |
- +---------------------------+
- Central Region
-
- +----------------------------+ +-----------------------------+
- | +----------------+ | | +----------------+ |
- | | LBaaS Octavia | | | | LBaaS Octavia | |
- | +----------------+ | | +----------------+ |
- | +------+ +---------------+ | | +-------+ +---------------+ |
- | | Nova | | Local Neutron | | | | Nova | | Local Neutron | |
- | +------+ +---------------+ | | +-------+ +---------------+ |
- +----------------------------+ +-----------------------------+
- Region One Region Two
-
-As demonstrated in the figure above, for each region where a local neutron
-is installed, admins can optionally choose to configure and install Octavia.
-Typically, Octavia leverages nova installed in its region to spin up amphorae.
-By employing load balancing softwares (e.g. haproxy) installed in the
-amphorae and Virtual Router Redundancy Protocol (VRRP), a load balancer which
-consists of a VIP and an amphora, can balance load across members with
-high availability. However, under the central-local scenario, we plan to let
-Octavia employ the central neutron in Central Region to manage networking
-resources, while still employ services in its region to manage amphora.
-Hence, the workflow of networking resource management in Tricircle can be
-described as follows.
-
-Tenant-->local neutron API-->neutron-LBaaS--->local Octavia--->central neutron
-
-Specifically, when a tenant attempts to create a load balancer, he/she needs to
-send a request to the local neutron-lbaas service. The service plugin of
-neutron-lbaas then prepares for creating the load balancer, including
-creating port via local plugin, inserting the info of the port into the
-database, and so on. Next the service plugin triggers the creating function
-of the corresponding driver of Octavia, i.e.,
-Octavia.network.drivers.neutron.AllowedAddressPairsDriver to create the
-amphora. During the creation, Octavia employs the central neutron to
-complete a series of operations, for instance, allocating VIP, plugging
-in VIP, updating databases. Given that the main features of managing
-networking resource are implemented, we hence need to adapt the mechanism
-of Octavia and neutron-lbaas by improving the functionalities of the local
-and central plugins.
-
-Considering the Tricircle is dedicated to enabling networking automation
-across Neutrons, the implementation can be divided as two parts,
-i.e., LBaaS members in one OpenStack instance, and LBaaS members in
-multiple OpenStack instances.
-
-LBaaS members in single region
-==============================
-
-For LBaaS in one region, after installing octavia, cloud tenants should
-build a management network and two security groups for amphorae manually
-in the central neutron. Next, tenants need to create an interface for health
-management. Then, tenants need to configure the newly created networking
-resources for octavia and let octavia employ central neutron to create
-resources. Finally, tenants can create load balancers, listeners, pools,
-and members in the local neutron. In this case, all the members of a
-loadbalancer are in one region, regardless of whether the members reside
-in the same subnet or not.
-
-LBaaS members in multiple regions
-=================================
-
-1. members in the same subnet yet locating in different regions
----------------------------------------------------------------
-As shown below. ::
-
- +-------------------------------+ +-----------------------+
- | +---------------------------+ | | |
- | | Amphora | | | |
- | | | | | |
- | | +-------+ +---------+ | | | |
- | +--+ mgmt +--+ subnet1 +---+ | | |
- | +-------+ +---------+ | | |
- | | | |
- | +--------------------------+ | | +-------------------+ |
- | | +---------+ +---------+ | | | | +---------+ | |
- | | | member1 | | member2 | | | | | | member3 | | |
- | | +---------+ +---------+ | | | | +---------+ | |
- | +--------------------------+ | | +-------------------+ |
- | network1(subnet1) | | network1(subnet1) |
- +-------------------------------+ +-----------------------+
- Region One Region Two
- Fig. 1. The scenario of balancing load across instances of one subnet which
- reside in different regions.
-
-As shown in Fig. 1, suppose that a load balancer is created in Region one,
-and hence a listener, a pool, and two members in subnet1. When adding an
-instance in Region Two to the pool as a member, the local neutron creates
-the network in Region Two. Members that locate in different regions yet
-reside in the same subnet form a shared VLAN/VxLAN network. As a result,
-the Tricircle supports adding members that locates in different regions to
-a pool.
-
-2. members residing in different subnets and regions
-----------------------------------------------------
-As shown below. ::
-
- +---------------------------------------+ +-----------------------+
- | +-----------------------------------+ | | |
- | | Amphora | | | |
- | | | | | |
- | | +---------+ +------+ +---------+ | | | |
- | +-+ subnet2 +--+ mgmt +-+ subnet1 +-+ | | |
- | +---------+ +------+ +---------+ | | |
- | | | |
- | +----------------------------------+ | | +-------------------+ |
- | | | | | | | |
- | | +---------+ +---------+ | | | | +---------+ | |
- | | | member1 | | member2 | | | | | | member3 | | |
- | | +---------+ +---------+ | | | | +---------+ | |
- | | | | | | | |
- | +----------------------------------+ | | +-------------------+ |
- | network1(subnet1) | | network2(subnet2) |
- +---------------------------------------+ +-----------------------+
- Region One Region Two
- Fig. 2. The scenario of balancing load across instances of different subnets
- which reside in different regions as well.
-
-As show in Fig. 2, supposing that a load balancer is created in region one, as
-well as a listener, a pool, and two members in subnet1. When adding an instance
-of subnet2 located in region two, the local neutron-lbaas queries the central
-neutron whether subnet2 exist or not. If subnet2 exists, the local
-neutron-lbaas employ octavia to plug a port of subnet2 to the amphora. This
-triggers cross-region vxlan networking process, then the amphora can reach
-the members. As a result, the LBaaS in multiple regions works.
-
-Please note that LBaaS in multiple regions should not be applied to the local
-network case. When adding a member in a local network which resides in other
-regions, neutron-lbaas use 'get_subnet' will fail and returns "network not
-located in current region"
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-Configuration guide needs to be updated to introduce the configuration of
-Octavia, local neutron, and central neutron.
-
-References
-==========
-
-None
diff --git a/specs/queens/resource_deleting.rst b/specs/queens/resource_deleting.rst
deleted file mode 100644
index 695aa9a6..00000000
--- a/specs/queens/resource_deleting.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-========================================
-Reliable resource deleting in Tricircle
-========================================
-
-Background
-==========
-During the deletion of resources which are mapped to several local Neutron(s),
-it may bring some conflict operations. For example, deleting a network in
-central neutron which is also resided in several local Neutron(s). The reason
-is that network-get request will trigger local neutron to query central
-neutron and create the network, and we delete local networks before deleting
-central network. When a network-get request comes to a local neutron server
-after the local network is completely deleted in that region and at this time
-the network in central neutron still exists (assuming it takes certain time to
-delete all local networks), local neutron will still retrieve the network from
-central neutron and the deleted local network will be recreated. This issue
-also applies to the deletion cases of other resource types.
-
-Proposed Solution
-=================
-Recently, Tricircle adds a feature to distinguish the source of requests[1], so
-we can distinguish the deletion request from 'Central Neutron' or
-'Local Neutron'. In order to avoid the conflict mentioned above, we introduce a
-new table called "deleting_resource" in Tricircle database, so central plugin
-can save the resource deletion information and set the information when it
-receives a deletion request. Here is the schema of the table:
-
-.. csv-table:: Resource deleting table
- :header: Field, Type, Nullable, pk/fk/uk, Description
-
- resource_id, string, False, uk, resource id in central Neutron
- resource_type, string, False, uk, resource_type denotes one of the available resource types
- deleted_at, timestamp, False, n/a, deletion timestamp
-
-**How to delete the resource without conflict operation**
-
-Let's take network deletion as an example.
-
-At the beginning of network-delete handle, central neutron server sets the
-information of deleted network into the "deleting_resource" table.
-
-At this point, if get-request from local neutron servers comes, central
-neutron server will check the "deleting_resource" table whether the
-associated resource has been recorded and return 404 to local neutron server
-if the associated resources is being deleting.
-
-At this point, if deletion request is from central Neutron, central neutron
-server will check the "deleting_resource" table whether the associated
-resource has been recorded and it will return 204 to user if associated
-resource is being deleting.
-
-For the get-request of user, central neutron server will query the related
-network information in "deleting_resource" table and will return the deleting
-resource to user if the network information which the user queries exists in
-the table. When user re-deleting the network after something wrong happens,
-central neutron will return 204 to user.
-
-At the end of network-delete handle that all the mapped local networks have
-been deleted, central neutron server will remove the deleting resource record
-and remove this network.
-
-In addition, there is a timestamp in table that cloud administrator is able to
-delete a resource which is in deleting status over long time (too long to
-delete, or in abnormal status).
-
-[1] https://review.opendev.org/#/c/518421/
diff --git a/specs/stein/new-l3-networking-mulit-NS-with-EW.rst b/specs/stein/new-l3-networking-mulit-NS-with-EW.rst
deleted file mode 100644
index 75210e22..00000000
--- a/specs/stein/new-l3-networking-mulit-NS-with-EW.rst
+++ /dev/null
@@ -1,327 +0,0 @@
-=================================================
-A New Layer-3 Networking multi-NS-with-EW-enabled
-=================================================
-
-Problems
-========
-Based on spec for l3 networking [1], a l3 networking which enables multiple
-NS traffic along with EW traffic is demonstrated. However, in the
-aforementioned l3 networking model, the host route will be only valid after
-DHCP lease time expired and renewed. It may take dhcp_lease_duration for VMs
-in the subnet to update the host route, after a new pod with external
-network is added to Tricircle. To solve the problem, this spec is written
-to introduce a new l3 networking model.
-
-Proposal
-========
-For the networking model in [1], a tenant network is attached to two
-routers, one for NS traffic, the other for EW traffic. In the new networking
-model, inspired by combined bridge network [2], we propose to attach the
-tenant network to one router, and the router takes charge of routing NS
-and EW traffic. The new networking mode is plotted in Fig. 1. ::
-
- +-----------------------+ +----------------------+
- | ext-net1 | | ext-net2 |
- | +---+---+ | | +--+---+ |
- |RegionOne | | | RegionTwo | |
- | +---+---+ | | +----+--+ |
- | | R1 +------+ | | +--------+ R2 | |
- | +-------+ | | | | +-------+ |
- | net1 | | | | net2 |
- | +------+---+-+ | | | | +-+----+------+ |
- | | | | | | | | | |
- | +---------+-+ | | | | | | +--+--------+ |
- | | Instance1 | | | | | | | | Instance2 | |
- | +-----------+ | | | | | | +-----------+ |
- | +----+--+ | | | | ++------+ |
- | | R3(1) +-+-----------------+--+ R3(2) | |
- | +-------+ | bridge net | +-------+ |
- +-----------------------+ +----------------------+
-
- Figure 1 Multiple external networks with east-west networking
-
-As shown in Fig. 1, R1 connects to external network (i.e., ext-net1) and
-ext-net1 is the default gateway of R1. Meanwhile, net1 is attached to R3
-and R3's default gateway is the bridge net. Further, interfaces of bridge
-net are only attached to R1 and R2 which are regarded as local routers.
-
-In such a scenario, all traffic (no matter NS or EW traffic) flows to R3.
-For EW traffic, from net1 to net2, R3(1) will forwards packets to the
-interface of net2 in R3(2) router namespace. For NS traffic, R3 forwards
-packets to the interface of an available local router (i.e., R1 or R2)
-which attached to the real external network. As a result, bridge net is
-an internal net where NS and EW traffic is steered, rather than the real
-external network of R3.
-
-To create such a topology, we need to create a logical (non-local) router
-R3 in the central Neutron. Tricircle central Neutron plugin then creates
-R3(1) in RegionOne and R3(2) in RegionTwo, as well as the bridge network
-to inter-connect R3(1) and R3(2). As such, the networking for EW traffic
-is ready for tenants. To enable NS traffic, real external networks are
-required to be attached to R3. When explicitly adding the gateway port
-of each external network to R3, Tricircle automatically creates a local
-router (e.g. R1) for external network and set the gateway to the local
-router. Then to connect the local router (e.g. R1) and the non-local
-router (R3), two interfaces of bridge-net are also created and attached
-to respect router. The logical topology in central Neutron is plotted
-in Fig. 2. ::
-
- ext-net1 ext-net2
- +---+---+ +---+---+
- | |
- +---+---+ +---+---+
- | R1 | | R2 |
- +---+---+ +---+---+
- | |
- +---+--------------------+---+
- | bridge-net |
- +-------------+--------------+
- |
- |
- +-------------+--------------+
- | R3 |
- +---+--------------------+---+
- | net1 net2 |
- +---+-----+-+ +---+-+---+
- | |
- +---------+-+ +--+--------+
- | Instance1 | | Instance2 |
- +-----------+ +-----------+
-
- Figure 2 Logical topology in central Neutron
-
-To improve the logic of building l3 networking, we introduce routed network to
-manage external networks in central Neutron. In central Neutron, one routed
-network is created as a logical external network, and real external networks
-are stored as segments of the external network. As such, the local routers
-(e.g., R1 and R2 in Fig. 2) are transparent to users. As a result, when a real
-external network is created, a local router is created and the external
-network's gateway is set to the router. Moreover, a port of bridge-net is
-created and added to the local router.
-
-The routed network is created as follows: ::
-
- openstack --os-region-name=CentralRegion network create --share --provider-physical-network extern --provider-network-type vlan --provider-segment 3005 ext-net
- openstack --os-region-name=CentralRegion network segment create --physical-network extern --network-type vlan --segment 3005 --network ext-net ext-sm-net1
- openstack --os-region-name=CentralRegion network segment create --physical-network extern --network-type vlan --segment 3005 --network ext-net ext-sm-net2
- openstack --os-region-name=CentralRegion subnet create --network ext-net --network-segment ext-net1 --ip-version 4 --subnet-range 203.0.113.0/24 net1-subnet-v4
- openstack --os-region-name=CentralRegion subnet create --network ext-net --network-segment ext-net1 --ip-version 4 --subnet-range 203.0.114.0/24 net2--subnet-v4
-
-The logical topology exposed to users is plotted in Fig. 3. ::
-
- ext-net (routed network)
- +---+---+
- |
- |
- +--------------+-------------+
- | R3 |
- +---+--------------------+---+
- | net1 net2 |
- +---+-----+-+ +---+-+---+
- | |
- +---------+-+ +--+--------+
- | Instance1 | | Instance2 |
- +-----------+ +-----------+
-
- Figure 3 Logical topology exposed to users in central Neutron
-
-For R3, net1 and net2 should be attached to R3: ::
-
- openstack --os-region-name=CentralRegion router add subnet R3
- openstack --os-region-name=CentralRegion router add subnet R3
-
-The gateway of the ext-net, i.e., the routed network, is set to R3: ::
-
- openstack --os-region-name=CentralRegion router set R3
-
-However, a routed network does not have a gateway. Consequently, the command
-above fails for trying adding the gateway of a routed network to the router,
-i.e., R3. To ensure the command works, we plan to create a gateway port for
-the routed network before setting the gateway to a router. Actually, the port
-is a blank port which does not have an IP, because a routed network is a
-software entity of multiple segments (i.e., subnets). To make sure the
-gateways of real external networks can be retrieved, we manage the IPs of
-gateways in "tags" field of the gateway port.
-
-This command creates a port of bridget-net and add it to R3, which is plotted in
-Fig. 2.
-
-Tricircle central Neutron plugin will automatically configure R3(1), R3(2)
-and bridge-network as follows:
-
-For net1 and net2, no host route is needed, so in such an l3 networking
-model, users are no longer required to wait for DHCP renew to update
-host route. All traffic is forwarded to R3 by default.
-
-In R3(1), extra route will be configured: ::
-
- destination=net2's cidr, nexthop=R3(2)'s interface in bridge-net
- destination=ext-net1's cidr, nexthop=R1's interface in bridge-net
-
-In R3(2), extra route will be configured: ::
-
- destination=net1's cidr, nexthop=R3(1)'s interface in bridge-net
- destination=ext-net2's cidr, nexthop=R2's interface in bridge-net
-
-R3(1) and R3(2) will set the external gateway to bridge-net: ::
-
- router-gateway-set R3(1) bridge-net
- router-gateway-set R3(2) bridge-net
-
-Now, north-south traffic of Instance1 and Instance2 work as follows: ::
-
- Instance1 -> net1 -> R3(1) -> R1 -> ext-net1
- Instance2 -> net2 -> R3(2) -> R2 -> ext-net2
-
-Two hops for north-south traffic.
-
-East-west traffic between Instance1 and Instance2 work as follows: ::
-
- Instance1 <-> net1 <-> R3(1) <-> bridge-net <-> R3(2) <-> net2 <-> Instance2
-
-Two hops for cross Neutron east-west traffic.
-
-The topology with cross Neutron L2 networks except local networks is
-illustrated in Fig. 4. ::
-
- +-----------------------+ +-----------------------+
- | ext-net1 | | ext-net2 |
- | +---+---+ | | +--+---+ |
- |RegionOne | | | RegionTwo | |
- | +---+------+ | | +----------+--+ |
- | | R1 +---+ | | +---+ R2 | |
- | +----------+ | | | | +-------------+ |
- | net1 | | | | net2 |
- | ++---+ | | | | +-----+ |
- | | net3 | | | | net4| |
- | | ++---+ | | | | +--+-+ | |
- | | | | | net5 | | | | |
- | | | +-+-----------------------------+-+ | | |
- | | | | | | net6 | | | | | |
- | | | | ++-----------------------++ | | | |
- | | | | | | | | | | | | | |
- | | | | | | | | | | | | | |
- | | | | | | | | | | | | | |
- | | | | | | | | | | | | | |
- | +----+---+---+--+-+ | | bridge-net | | ++--+---+---+-----+ |
- | | R3(1) +-+----------------+-+ R3(2) | |
- | +-----------------+ | | +-----------------+ |
- +-----------------------+ +-----------------------+
-
- Figure 4 Multi-NS and cross Neutron L2 networks
-
-The logical topology in central Neutron for Figure. 4 is plotted in Fig. 5. ::
-
- ext-net1 ext-net2
- +---+---+ +--+---+
- | |
- +--+-----------+ +---+------------+
- | R1 | | R2 |
- +----------+---+ +----+-----------+
- | |
- +----------+--------------------------+-----------+
- | bridge-net |
- +-----------------------+-------------------------+
- |
- +-----------------------+-------------------------+
- | R3 |
- +--+----+------+-----------------+---------+----+-+
- | | | | | |
- | | | | | |
- | | | | | |
- | | +-+--------------------+ | |
- | | net5 | | |
- | | +--------------+------+ | |
- | | net6 | |
- | +-+---+ +---+-+ |
- | net3 net2 |
- +-+---+ +---+-+
- net1 net4
-
- Figure 5 Logical topology in central Neutron with cross Neutron L2 network
-
-By adding networks to R3, EW traffic is routed by R3.
-
-For net5 in RegionOne, extra route in R3(1) should be added: ::
-
- destination=net1's cidr, nexthop=
- destination=net3's cidr, nexthop=
-
-For net5 in RegionTwo, extra route in R3(2) should be added: ::
-
- destination=net1's cidr, nexthop=
- destination=net3's cidr, nexthop=
-
-The east-west traffic between these networks will work as follows::
-
- net1 <-> R3 <-> net3
- net1 <-> R3 <-> net5
- net1 <-> R3 <-> net6
- net3 <-> R3 <-> net5
- net3 <-> R3 <-> net6
- net5 <-> R3 <-> net6
-
-For NS traffic, the route to external network is already configured,
-so NS traffic is routed to R1 or R2.
-
-Implementation
-==============
-
-Part 0: add an option in local.conf to enable the new l3 networking model
-
-Add an option "ENABLE_HOST_ROUTE_INDEPENDENT_L3_NETWORKING", whose value
-is TRUE or FALSE, to indicate whether users expect to adopt such new l3
-networking model.
-
-Part 1: enable external network creation with transparent (local) router
-
-This part mainly ensures a real external network is created along with a
-local router, and set the gateway of the external network to the router.
-As shown in Fig. 2, when ext-net1 is created, R1 is created, too. And the
-gateway of ext-net1 is set to R1. Moreover, the local router, e.g. R1, is
-transparent to users. In other words, users only create external network,
-while tricircle complete the creation of the local router. As a result,
-users are unaware of the local routers.
-
-Part 2: enable routed network and gateway setting process
-
-This part enables routed network in the central neutron. Meanwhile, this
-part also needs to complete the process of setting gateway of the routed
-network to the distributed router, e.g. R3 in Fig. 2. Here since the routed
-network is a software entity of multiple real external networks, the gateway
-ip of the routed network is set as NULL. And the gateway ips of real external
-networks is planned to stored in tag field of the routed network. So this
-part mainly deal with the blank gateway ip of the routed network when setting
-gateway to the router.
-
-Part 3: modify floating ip creation
-
-In the existing l3 networking, external network and tenant network is
-connected by a router, so implementing floating ip only needs NAT once.
-However, in the new l3 networking model, as shown in Fig. 2, external network
-and tenant network connect two routers, respectively. And the two routers
-are connected by bridge network. So implementing floating ip needs to be NATed
-twice. This part mainly deal with such an issue.
-
-Data Model Impact
-=================
-
-None
-
-Dependencies
-============
-
-None
-
-Documentation Impact
-====================
-
-1. Add a new guide for North South Networking via Multiple External Networks
- with east-west enabled.
-2. Release notes.
-
-Reference
-=========
-
-.. [1] https://github.com/openstack/tricircle/blob/master/specs/pike/l3-networking-multi-NS-with-EW-enabled.rst
-.. [2] https://github.com/openstack/tricircle/blob/master/specs/ocata/l3-networking-combined-bridge-net.rst
diff --git a/specs/train/join-tricircle-and-trio2o.rst b/specs/train/join-tricircle-and-trio2o.rst
deleted file mode 100644
index 079d2c0e..00000000
--- a/specs/train/join-tricircle-and-trio2o.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-==================================
-Join Tricircle and Trio2o together
-==================================
-
-Blueprint
-=========
-
-https://blueprints.launchpad.net/tricircle/+spec/join-tricircle-and-trio2o-together
-
-At the very beginning, Tricircle and Tiro2o has the same Poc Stage
-Project called OpenStack Cascased Solution[1] by Huawei. After
-tests they get divided as two independent project in community.
-Tricircle focus on network automation across neutron servers in
-multi-region OpenStack Clouds while Trio2o is the api gateway
-for transferring nova and cinder rest api. Although they get
-independent, however, in many application area such as NFV
-and multiple data centers they need to be reunified once
-again as their design concept is the same for multi-region
-openstack management since the beginning. This blueprint
-try to reunite Tricircle and Trio2o as a unified and complete
-project dedicated to multi-region OpenStack clouds management.
-Problem Description
-Tricircle is one of the Community official components and kept
-on the pace with every Iterative version. The newest version is
-Stein. While Tio2o is out of update and management for a long time.
-The newest version is Mitaka. Also there exists a big problem is that
-Trio2o re-implement nova and cinder rest-api and cli command. Trio2o
-has no python-client. And overlaps may exists for the two project
-devstack install script as well,etc.
-
-Implementation
-==============
-So It is needed to develop a unified common python-client for the two
-project Tricirle and Trio2o to unify and translate cli command and rest
-api. Also the two database and tables need to be combined together and
-redesigned . Source code and configure files about functions and api
-need to be unified planned . Then devstack install scripts as well as
-zuul gate jobs need to be merged and verified together. At last there
-need to check the tempest unit tests in an unified path specification.
-
-References
-==========
-
-[1].https://wiki.openstack.org/wiki/OpenStack_cascading_solution
-
-[2].https://wiki.openstack.org/wiki/Tricircle
-
-[3].https://wiki.openstack.org/wiki/Trio2o
-
-[4].https://github.com/openstack/tricircle
-
-[5].https://github.com/openstack/python-tricircleclient
-
-[6].https://github.com/openstack/trio2o
-
-[7].https://docs.openstack.org/tricircle/latest/user/readme.html
diff --git a/specs/train/multi-region container management.rst b/specs/train/multi-region container management.rst
deleted file mode 100644
index 67732616..00000000
--- a/specs/train/multi-region container management.rst
+++ /dev/null
@@ -1,144 +0,0 @@
-=============================================
-Container Management in Multi-Region Scenario
-=============================================
-
-Background
-==========
-
-Currently, multi-region container management is not supported in the Tricircle.
-This spec is to describe how container management will be implemented
-in the Tricircle multi-region scenario. Now openstack provides many components
-for container services such as zun,kuyr,kuryr-libnetwork. Zun is a component that
-provides container management service in openstack, it provides a unified OpenStack API
-for launching and managing containers, supporting docker container technology.
-Kuryr is an component that interfaces a container network to a neutron network.
-Kuryr-libnetwork is a kuryr plugin running under the libnetwork framework and provides
-network services for containers. Zun integrates with keystone, neutron,
-and glance to implement container management. Keystone provides identity authentication
-for containers, neutron provides network for containers, and glance provides images for containers.
-These openstack services work together to accomplish the multi-region container management.
-
-Overall Implementation
-======================
-
-The Tricircle is designed in a Central_Neutron-Local_Neutron fashion, where all the local neutrons are
-managed by the central neutron. As a result, in order to adapt the Central_Neutron-Local_Neutron design and
-the container network requirements and image requirements, we plan to deploy zun, kuryr,kuryr-libnetwork and
-raw docker engine as follows. ::
-
- +--------------------------------------------------+ +--------------------------------------------------+
- | | Central Region | |
- | +--------+ +--×---------------------×--+ +--------+ |
- | +-----| Glance | User <---- | Keystone | ----> User | Glance |-----+ |
- | | +--------+ x------x +---------------------------+ x------x +--------+ | |
- | | | | Central Neutron | | | |
- | | +---------------+ | +--×----^-----------^----×--+ | +---------------+ | |
- | | | Zun API |<------------+ | | | | +------------>| Zun API | | |
- | | +---------------+ +---------------+ | | | | +---------------+ +---------------+ | |
- | | | | | | | | | | | | | | | |
- | +--+ Zun Compute +--------+ Docker Engine | | | | | | Docker Engine +--------+ Zun Compute +--+ |
- | | | | | | | | | | | | | |
- | +-------+-------+ +-------+-------+ | | | | +-------+-------+ +-------+-------+ |
- | | | | | | | | | |
- | | | | | | | | | |
- | +-------+-------+ +-------+-------+ | | | | +-------+-------+ +-------+-------+ |
- | | | | | | | | | | | | | |
- | | Local Neutron +--------+ Kuryr | | | | | | Kuryr <--------> Local Neutron | |
- | | | | libnetwork | | | | | | libnetwork | | | |
- | +-------+-------+ +---------------+ | | | | +---------------+ +-------+-------+ |
- | | | | | | | |
- | +------------------------------------×----+ +----×------------------------------------+ |
- | | | |
- +--------------------------------------------------+ +--------------------------------------------------+
- Region One Region Two
-
- Fig. 1 The multi-region container management architecture.
-
-As showned in the Fig. 1 above, in Tricircle, each region has already installed
-a local neutron. In order to accomplish container management in Tricircle,
-admins need to configure and install zun,docker,kuryr and kuryr-libnetwork.
-Under the Central_Neutron-Local_Neutron scenario, we plan to let zun employ
-the central neutron in Central Region to manage networking resources, meanwhile
-still employ docker engine in its own region to manage docker container instance.
-Then, use kuryr/kuryr-libnetwork to connect the container network to the neutron network.
-Hence, the workflow of container creation in Tricircle can be described as follows. ::
-
- +-----------------------------------------------------------------------------------------------------------------------------------------------+
- | +---------------+ +---------------+ +-----------------+ +-------------------------+ |
- | +----------+ +-->| neutronClient | -->| Local Neutron | -->| Central Neutron | -->|Neutron network and port | |
- | +------->| Keystone | | +---------------+ +------^--------+ +--------+--------+ +-------------+-----------+ |
- | | +----------+ | | | | |
- | | | +------------------+ +----------------------+ +-----------------+-----------+ |
- | | +-->| kuryr/libnetwork | --------------------------------------->|Connect container to network | |
- | +--+---+ +---------+ +-------------+ | +------------------+ +-----------------+-----------+ |
- | | User | -->| Zun API | -->| Zun Compute | --+ | |
- | +------+ +---------+ +-------------+ | +--------------+ +--------------+ | |
- | +-->| glanceClient | -->| docker image | +=====+=====+ |
- | | +--------------+ +------+-------+ ‖ Container ‖ |
- | | | +=====+=====+ |
- | | +------------+ +--------V---------------+ | |
- | +-->| Docker API | -->| Create docker instance | ----------------------------------+ |
- | +------------+ +------------------------+ |
- +-----------------------------------------------------------------------------------------------------------------------------------------------+
- Fig. 2 The multi-region container creation workflow.
-
-Specifically, when a tenant attempts to create container, he/she needs to
-send a request to Zun API. Then it will call zun compute driver to create
-a container in four sub-steps. Firstly, call network_api(neutronClient) to
-process neutron network(use Central_Neutron-Local_Neutron mechanism). Secondly,
-call image_api(glanceClient) to provide docker image. Thirdly, call docker API
-to create docker instance. Finally, use kuryr connect container to neutron network.
-So far, a container can successfully created in Tricircle environment. Considering
-the Tricircle is dedicated to enabling networking automation across Neutrons, so we
-can implement the interconnection among multiple containers in multi-region scenario.
-As shown below. ::
-
- +------------------------+ +-------------------+ +------------------------+
- | net1 | | | | net1 |
- | +---------+--------------------------+-------------------------+----------+ |
- | | | | | | | | |
- | | | | | | | | |
- | +-----+------+ | | | | | +-----+------+ |
- | | Container1 | | | +----+----+ | | | Container2 | |
- | +------------+ | | | | | | +------------+ |
- | | | | Router | | | |
- | +-----+------+ | | | | | | +-----+------+ |
- | | Container3 | | | +----+----+ | | | Container4 | |
- | +-----+------+ | | | | | +-----+------+ |
- | | | | | | | | |
- | | | | | | | | |
- | +---------+--------------------------+-------------------------+----------+ |
- | net2 | | | | net2 |
- | | | | | |
- | +--------------------+ | | +---------------+ | | +--------------------+ |
- | | Local Neutron | | | |Central Neutron| | | | Local Neutron | |
- | +--------------------+ | | +---------------+ | | +--------------------+ |
- +------------------------+ +-------------------+ +------------------------+
- Region One Central Region Region Two
-
- Fig. 3 The container interconnection in multi-region scenario.
-
-Although, combined with Tricircle, we can also implement the container deletion,
-the container modification, the container lookup and so on in multi-region scenario.
-That means we can implement container management in multi-region scenario.
-
-
-Data Model Impact
------------------
-
-None
-
-Dependencies
-------------
-
-None
-
-Documentation Impact
---------------------
-
-None
-
-References
-----------
-
-None
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644
index 04eb7ecb..00000000
--- a/test-requirements.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-hacking>=3.0,<3.1.0 # Apache-2.0
-coverage!=4.4,>=4.0 # Apache-2.0
-fixtures>=3.0.0 # Apache-2.0/BSD
-mock>=3.0.0 # BSD
-python-subunit>=1.0.0 # Apache-2.0/BSD
-requests-mock>=1.2.0 # Apache-2.0
-docutils>=0.11 # OSI-Approved Open Source, Public Domain
-sphinx!=1.6.6,!=1.6.7,>=1.6.5,<2.0.0;python_version=='2.7' # BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.5,!=2.1.0;python_version>='3.4' # BSD
-openstackdocstheme>=1.30.0 # Apache-2.0
-testrepository>=0.0.18 # Apache-2.0/BSD
-testtools>=2.2.0 # MIT
-testresources>=2.0.0 # Apache-2.0/BSD
-testscenarios>=0.4 # Apache-2.0/BSD
-oslotest>=3.2.0 # Apache-2.0
-os-testr>=1.0.0 # Apache-2.0
-ddt>=1.0.1 # MIT
-reno>=2.5.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 3f2015e5..00000000
--- a/tox.ini
+++ /dev/null
@@ -1,98 +0,0 @@
-[tox]
-minversion = 3.1.1
-envlist = py37,pypy,pep8
-skipsdist = True
-ignore_basepython_conflict = True
-
-[testenv]
-basepython = python3
-setenv = VIRTUAL_ENV={envdir}
- PYTHONWARNINGS=default::DeprecationWarning
-passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
-usedevelop = True
-deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-whitelist_externals =
- sh
- stestr
-commands =
- stestr run {posargs}
- stestr slowest
-
-[testenv:releasenotes]
-commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
-
-[testenv:pep8]
-deps =
- -r{toxinidir}/test-requirements.txt
-commands = flake8
-
-[testenv:venv]
-commands = {posargs}
-
-[testenv:cover]
-commands =
- python setup.py testr --coverage --testr-args='{posargs}'
- coverage report --fail-under=70 --skip-covered
-
-[testenv:genconfig]
-deps =
- -r{toxinidir}/test-requirements.txt
-commands = oslo-config-generator --config-file=etc/api-cfg-gen.conf
- oslo-config-generator --config-file=etc/xjob-cfg-gen.conf
-
-[testenv:genpolicy]
-deps =
- -r{toxinidir}/test-requirements.txt
-commands = oslopolicy-sample-generator --config-file=etc/policy-generator.conf
-
-[testenv:docs]
-deps =
- -r{toxinidir}/test-requirements.txt
-commands = python setup.py build_sphinx
-
-[testenv:debug]
-commands = oslo_debug_helper {posargs}
-
-[testenv:lower-constraints]
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
-
-[flake8]
-show-source = True
-builtins = _
-# H106: Don't put vim configuration in source files
-# H203: Use assertIs(Not)None to check for None
-enable-extensions=H106,H203
-# E123 closing bracket does not match indentation of opening bracket's line
-# E402 module level import not at top of file
-# W503 line break before binary operator
-# W504 line break after binary operator
-ignore = E123,E402,W503,W504
-exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
-
-[hacking]
-import_exceptions = tricircle.common.i18n
-
-[testenv:dev]
-# run locally (not in the gate) using editable mode
-# https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
-# note that order is important to ensure dependencies don't override
-commands =
- pip install -q -e "git+https://opendev.org/openstack/networking-sfc#egg=networking_sfc"
- pip install -q -e "git+https://opendev.org/openstack/neutron#egg=neutron"
-
-[testenv:py3-dev]
-basepython = python3
-commands =
- {[testenv:dev]commands}
- {[testenv]commands}
-
-[testenv:pep8-dev]
-basepython = python3
-deps = {[testenv]deps}
- -r{toxinidir}/test-requirements.txt
-commands = flake8
diff --git a/tricircle/__init__.py b/tricircle/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/api/__init__.py b/tricircle/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/api/app.py b/tricircle/api/app.py
deleted file mode 100644
index a732714c..00000000
--- a/tricircle/api/app.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-
-from oslo_config import cfg
-
-from tricircle.common.i18n import _
-from tricircle.common import restapp
-
-
-common_opts = [
- cfg.IPOpt('bind_host', default='0.0.0.0',
- help=_("The host IP to bind to")),
- cfg.PortOpt('bind_port', default=19999,
- help=_("The port to bind to")),
- cfg.IntOpt('api_workers', default=1,
- help=_("number of api workers")),
- cfg.StrOpt('api_extensions_path', default="",
- help=_("The path for API extensions")),
- cfg.StrOpt('auth_strategy', default='keystone',
- help=_("The type of authentication to use")),
- cfg.BoolOpt('allow_bulk', default=True,
- help=_("Allow the usage of the bulk API")),
- cfg.BoolOpt('allow_pagination', default=False,
- help=_("Allow the usage of the pagination")),
- cfg.BoolOpt('allow_sorting', default=False,
- help=_("Allow the usage of the sorting")),
- cfg.IntOpt('pagination_max_limit', min=1, default=2000,
- help=_("The maximum number of items returned in a single "
- "response, value must be greater or equal to 1")),
-]
-
-
-def setup_app(*args, **kwargs):
- config = {
- 'server': {
- 'port': cfg.CONF.bind_port,
- 'host': cfg.CONF.bind_host
- },
- 'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
- 'errors': {
- 400: '/error',
- '__force_dict__': True
- }
- }
- }
- pecan_config = pecan.configuration.conf_from_dict(config)
-
- # app_hooks = [], hook collection will be put here later
-
- app = pecan.make_app(
- pecan_config.app.root,
- debug=False,
- wrap_app=restapp.auth_app,
- force_canonical=False,
- hooks=[],
- guess_content_type_from_ext=True
- )
-
- return app
diff --git a/tricircle/api/controllers/__init__.py b/tricircle/api/controllers/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/api/controllers/job.py b/tricircle/api/controllers/job.py
deleted file mode 100755
index 062e23f2..00000000
--- a/tricircle/api/controllers/job.py
+++ /dev/null
@@ -1,432 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-from pecan import expose
-from pecan import rest
-import six
-
-from oslo_log import log as logging
-from oslo_utils import timeutils
-
-from tricircle.common import constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exc
-from tricircle.common.i18n import _
-from tricircle.common import policy
-from tricircle.common import utils
-from tricircle.common import xrpcapi
-from tricircle.db import api as db_api
-
-LOG = logging.getLogger(__name__)
-
-
-class AsyncJobController(rest.RestController):
- # with AsyncJobController, admin can create, show, delete and
- # redo asynchronous jobs
-
- def __init__(self):
- self.xjob_handler = xrpcapi.XJobAPI()
-
- @expose(generic=True, template='json')
- def post(self, **kw):
- context = t_context.extract_context_from_environ()
- job_resource_map = constants.job_resource_map
-
- if not policy.enforce(context, policy.ADMIN_API_JOB_CREATE):
- return utils.format_api_error(
- 403, _("Unauthorized to create a job"))
-
- if 'job' not in kw:
- return utils.format_api_error(
- 400, _("Request body not found"))
-
- job = kw['job']
-
- for field in ('type', 'project_id'):
- value = job.get(field)
- if value is None:
- return utils.format_api_error(
- 400, _("%(field)s isn't provided in request body") % {
- 'field': field})
- elif len(value.strip()) == 0:
- return utils.format_api_error(
- 400, _("%(field)s can't be empty") % {'field': field})
-
- if job['type'] not in job_resource_map.keys():
- return utils.format_api_error(
- 400, _('There is no such job type: %(job_type)s') % {
- 'job_type': job['type']})
-
- job_type = job['type']
- project_id = job['project_id']
-
- if 'resource' not in job:
- return utils.format_api_error(
- 400, _('Failed to create job, because the resource is not'
- ' specified'))
-
- # verify that all given resources are exactly needed
- request_fields = set(job['resource'].keys())
- require_fields = set([resource_id
- for resource_type, resource_id in
- job_resource_map[job_type]])
- missing_fields = require_fields - request_fields
- redundant_fields = request_fields - require_fields
-
- if missing_fields:
- return utils.format_api_error(
- 400, _('Some required fields are not specified:'
- ' %(field)s') % {'field': missing_fields})
- if redundant_fields:
- return utils.format_api_error(
- 400, _('Some fields are redundant: %(field)s') % {
- 'field': redundant_fields})
-
- # validate whether the project id is legal
- resource_type_1, resource_id_1 = (
- constants.job_primary_resource_map[job_type])
- if resource_type_1 is not None:
- filter = [{'key': 'project_id', 'comparator': 'eq',
- 'value': project_id},
- {'key': 'resource_type', 'comparator': 'eq',
- 'value': resource_type_1},
- {'key': 'top_id', 'comparator': 'eq',
- 'value': job['resource'][resource_id_1]}]
-
- routings = db_api.list_resource_routings(context, filter)
- if not routings:
- msg = (_("%(resource)s %(resource_id)s doesn't belong to the"
- " project %(project_id)s") %
- {'resource': resource_type_1,
- 'resource_id': job['resource'][resource_id_1],
- 'project_id': project_id})
- return utils.format_api_error(400, msg)
-
- # if job_type = seg_rule_setup, we should ensure the project id
- # is equal to the one from resource.
- if job_type in (constants.JT_SEG_RULE_SETUP,
- constants.JT_RESOURCE_RECYCLE):
- if job['project_id'] != job['resource']['project_id']:
- msg = (_("Specified project_id %(project_id_1)s and resource's"
- " project_id %(project_id_2)s are different") %
- {'project_id_1': job['project_id'],
- 'project_id_2': job['resource']['project_id']})
- return utils.format_api_error(400, msg)
-
- # combine uuid into target resource id
- resource_id = '#'.join([job['resource'][resource_id]
- for resource_type, resource_id
- in job_resource_map[job_type]])
-
- try:
- # create a job and put it into execution immediately
- self.xjob_handler.invoke_method(context, project_id,
- constants.job_handles[job_type],
- job_type, resource_id)
- except Exception as e:
- LOG.exception('Failed to create job: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to create a job'))
-
- new_job = db_api.get_latest_job(context, constants.JS_New, job_type,
- resource_id)
- return {'job': self._get_more_readable_job(new_job)}
-
- @expose(generic=True, template='json')
- def get_one(self, id, **kwargs):
- """the return value may vary according to the value of id
-
- :param id: 1) if id = 'schemas', return job schemas
- 2) if id = 'detail', return all jobs
- 3) if id = $job_id, return detailed single job info
- :return: return value is decided by id parameter
- """
- context = t_context.extract_context_from_environ()
- job_resource_map = constants.job_resource_map
-
- if not policy.enforce(context, policy.ADMIN_API_JOB_SCHEMA_LIST):
- return utils.format_api_error(
- 403, _('Unauthorized to show job information'))
-
- if id == 'schemas':
- job_schemas = []
- for job_type in job_resource_map.keys():
- job = {}
- resource = []
- for resource_type, resource_id in job_resource_map[job_type]:
- resource.append(resource_id)
-
- job['resource'] = resource
- job['type'] = job_type
- job_schemas.append(job)
-
- return {'schemas': job_schemas}
-
- if id == 'detail':
- return self.get_all(**kwargs)
-
- try:
- job = db_api.get_job(context, id)
- return {'job': self._get_more_readable_job(job)}
- except Exception:
- try:
- job = db_api.get_job_from_log(context, id)
- return {'job': self._get_more_readable_job(job)}
- except t_exc.ResourceNotFound:
- return utils.format_api_error(
- 404, _('Resource not found'))
-
- @expose(generic=True, template='json')
- def get_all(self, **kwargs):
- """Get all the jobs. Using filters, only get a subset of jobs.
-
- :param kwargs: job filters
- :return: a list of jobs
- """
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_JOB_LIST):
- return utils.format_api_error(
- 403, _('Unauthorized to show all jobs'))
-
- # check limit and marker, default value -1 means no pagination
- _limit = kwargs.pop('limit', -1)
-
- try:
- limit = int(_limit)
- limit = utils.get_pagination_limit(limit)
- except ValueError as e:
- LOG.exception('Failed to convert pagination limit to an integer: '
- '%(exception)s ', {'exception': e})
- msg = (_("Limit should be an integer or a valid literal "
- "for int() rather than '%s'") % _limit)
- return utils.format_api_error(400, msg)
-
- marker = kwargs.pop('marker', None)
-
- sorts = [('timestamp', 'desc'), ('id', 'desc')]
- is_valid_filter, filters = self._get_filters(kwargs)
-
- if not is_valid_filter:
- msg = (_('Unsupported filter type: %(filters)s') % {
- 'filters': ', '.join(
- [filter_name for filter_name in filters])
- })
- return utils.format_api_error(400, msg)
-
- # project ID from client should be equal to the one from
- # context, since only the project ID in which the user
- # is authorized will be used as the filter.
- filters['project_id'] = context.project_id
- filters = [{'key': key, 'comparator': 'eq', 'value': value}
- for key, value in six.iteritems(filters)]
-
- try:
- if marker is not None:
- try:
- # verify whether the marker is effective
- db_api.get_job(context, marker)
- jobs = db_api.list_jobs(context, filters,
- sorts, limit, marker)
- jobs_from_log = []
- if len(jobs) < limit:
- jobs_from_log = db_api.list_jobs_from_log(
- context, filters, sorts, limit - len(jobs), None)
- job_collection = jobs + jobs_from_log
- except t_exc.ResourceNotFound:
- try:
- db_api.get_job_from_log(context, marker)
- jobs_from_log = db_api.list_jobs_from_log(
- context, filters, sorts, limit, marker)
- job_collection = jobs_from_log
- except t_exc.ResourceNotFound:
- msg = (_('Invalid marker: %(marker)s')
- % {'marker': marker})
- return utils.format_api_error(400, msg)
- else:
- jobs = db_api.list_jobs(context, filters,
- sorts, limit, marker)
- jobs_from_log = []
- if len(jobs) < limit:
- jobs_from_log = db_api.list_jobs_from_log(
- context, filters, sorts, limit - len(jobs), None)
- job_collection = jobs + jobs_from_log
- # add link
- links = []
- if len(job_collection) >= limit:
- marker = job_collection[-1]['id']
- base = constants.JOB_PATH
- link = "%s?limit=%s&marker=%s" % (base, limit, marker)
- links.append({"rel": "next",
- "href": link})
-
- result = {'jobs': [self._get_more_readable_job(job)
- for job in job_collection]}
- if links:
- result['jobs_links'] = links
- return result
- except Exception as e:
- LOG.exception('Failed to show all asynchronous jobs: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to show all asynchronous jobs'))
-
- # make the job status and resource id more human readable. Split
- # resource id into several member uuid(s) to provide more detailed resource
- # information. If job entry is from job table, then remove resource id
- # and extra id from job attributes. If job entry is from job log table,
- # only remove resource id from job attributes.
- def _get_more_readable_job(self, job):
- job_resource_map = constants.job_resource_map
-
- if 'status' in job:
- job['status'] = constants.job_status_map[job['status']]
- else:
- job['status'] = constants.job_status_map[constants.JS_Success]
-
- job['resource'] = dict(zip([resource_id
- for resource_type, resource_id
- in job_resource_map[job['type']]],
- job['resource_id'].split('#')))
- job.pop('resource_id')
-
- if "extra_id" in job:
- job.pop('extra_id')
-
- return job
-
- def _get_filters(self, params):
- """Return a dictionary of query param filters from the request.
-
- :param params: the URI params coming from the wsgi layer
- :return: (flag, filters), flag indicates whether the filters are valid,
- and the filters denote a list of key-value pairs.
- """
- filters = {}
- unsupported_filters = {}
- for filter_name in params:
- if filter_name in constants.JOB_LIST_SUPPORTED_FILTERS:
- # map filter name
- if filter_name == 'status':
- job_status_in_db = self._get_job_status_in_db(
- params.get(filter_name))
- filters[filter_name] = job_status_in_db
- continue
- filters[filter_name] = params.get(filter_name)
- else:
- unsupported_filters[filter_name] = params.get(filter_name)
-
- if unsupported_filters:
- return False, unsupported_filters
- return True, filters
-
- # map user input job status to job status stored in database
- def _get_job_status_in_db(self, job_status):
- job_status_map = {
- 'fail': constants.JS_Fail,
- 'success': constants.JS_Success,
- 'running': constants.JS_Running,
- 'new': constants.JS_New
- }
- job_status_lower = job_status.lower()
- if job_status_lower in job_status_map:
- return job_status_map[job_status_lower]
- return job_status
-
- @expose(generic=True, template='json')
- def delete(self, job_id):
- # delete a job from the database. If the job is running, the delete
- # operation will fail. In other cases, job will be deleted directly.
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_JOB_DELETE):
- return utils.format_api_error(
- 403, _('Unauthorized to delete a job'))
-
- try:
- db_api.get_job_from_log(context, job_id)
- return utils.format_api_error(
- 400, _('Job %(job_id)s is from job log') % {'job_id': job_id})
- except Exception:
- try:
- job = db_api.get_job(context, job_id)
- except t_exc.ResourceNotFound:
- return utils.format_api_error(
- 404, _('Job %(job_id)s not found') % {'job_id': job_id})
- try:
- # if job status = RUNNING, notify user this new one, delete
- # operation fails.
- if job['status'] == constants.JS_Running:
- return utils.format_api_error(
- 400, (_('Failed to delete the running job %(job_id)s') %
- {"job_id": job_id}))
- # if job status = SUCCESS, move the job entry to job log table,
- # then delete it from job table.
- elif job['status'] == constants.JS_Success:
- db_api.finish_job(context, job_id, True, timeutils.utcnow())
- pecan.response.status = 200
- return {}
-
- db_api.delete_job(context, job_id)
- pecan.response.status = 200
- return {}
- except Exception as e:
- LOG.exception('Failed to delete the job: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to delete the job'))
-
- @expose(generic=True, template='json')
- def put(self, job_id):
- # we use HTTP/HTTPS PUT method to redo a job. Regularly PUT method
- # requires a request body, but considering the job redo operation
- # doesn't need more information other than job id, we will issue
- # this request without a request body.
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_JOB_REDO):
- return utils.format_api_error(
- 403, _('Unauthorized to redo a job'))
-
- try:
- db_api.get_job_from_log(context, job_id)
- return utils.format_api_error(
- 400, _('Job %(job_id)s is from job log') % {'job_id': job_id})
- except Exception:
- try:
- job = db_api.get_job(context, job_id)
- except t_exc.ResourceNotFound:
- return utils.format_api_error(
- 404, _('Job %(job_id)s not found') % {'job_id': job_id})
-
- try:
- # if status = RUNNING, notify user this new one and then exit
- if job['status'] == constants.JS_Running:
- return utils.format_api_error(
- 400, (_("Can't redo job %(job_id)s which is running") %
- {'job_id': job['id']}))
- # if status = SUCCESS, notify user this new one and then exit
- elif job['status'] == constants.JS_Success:
- msg = (_("Can't redo job %(job_id)s which had run successfully"
- ) % {'job_id': job['id']})
- return utils.format_api_error(400, msg)
- # if job status = FAIL or job status = NEW, redo it immediately
- self.xjob_handler.invoke_method(context, job['project_id'],
- constants.job_handles[job['type']],
- job['type'], job['resource_id'])
- except Exception as e:
- LOG.exception('Failed to redo the job: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to redo the job'))
diff --git a/tricircle/api/controllers/pod.py b/tricircle/api/controllers/pod.py
deleted file mode 100644
index 3273997b..00000000
--- a/tricircle/api/controllers/pod.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# Copyright (c) 2015 Huawei Tech. Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-from pecan import expose
-from pecan import Response
-from pecan import rest
-
-import oslo_db.exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common import policy
-
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
-
-LOG = logging.getLogger(__name__)
-
-
-class PodsController(rest.RestController):
-
- def __init__(self):
- pass
-
- @expose(generic=True, template='json')
- def post(self, **kw):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_PODS_CREATE):
- pecan.abort(401, _('Unauthorized to create pods'))
- return
-
- if 'pod' not in kw:
- pecan.abort(400, _('Request body pod not found'))
- return
-
- pod = kw['pod']
-
- # if az_name is null, and there is already one in db
- region_name = pod.get('region_name', '').strip()
- pod_az_name = pod.get('pod_az_name', '').strip()
- dc_name = pod.get('dc_name', '').strip()
- az_name = pod.get('az_name', '').strip()
- _uuid = uuidutils.generate_uuid()
- top_region_name = self._get_top_region(context)
-
- if az_name == '':
- if region_name == '':
- return Response(
- _('Valid region_name is required for top region'),
- 422)
-
- if top_region_name != '':
- return Response(_('Top region already exists'), 409)
- # to create the top region, make the pod_az_name to null value
- pod_az_name = ''
-
- if az_name != '':
- if region_name == '':
- return Response(
- _('Valid region_name is required for pod'), 422)
- # region_name != ''
- # then the pod region name should not be same as the top region
- if top_region_name == region_name:
- return Response(
- _('Pod region name duplicated with the top region name'),
- 409)
-
- try:
- with context.session.begin():
- new_pod = core.create_resource(
- context, models.Pod,
- {'pod_id': _uuid,
- 'region_name': region_name,
- 'pod_az_name': pod_az_name,
- 'dc_name': dc_name,
- 'az_name': az_name})
- except db_exc.DBDuplicateEntry as e1:
- LOG.exception('Record already exists on %(region_name)s: '
- '%(exception)s',
- {'region_name': region_name,
- 'exception': e1})
- return Response(_('Record already exists'), 409)
- except Exception as e2:
- LOG.exception('Failed to create pod: %(region_name)s,'
- 'pod_az_name: %(pod_az_name)s,'
- 'dc_name: %(dc_name)s,'
- 'az_name: %(az_name)s'
- '%(exception)s ',
- {'region_name': region_name,
- 'pod_az_name': pod_az_name,
- 'dc_name': dc_name,
- 'az_name': az_name,
- 'exception': e2})
- return Response(_('Failed to create pod'), 500)
-
- return {'pod': new_pod}
-
- @expose(generic=True, template='json')
- def get_one(self, _id):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_PODS_SHOW):
- pecan.abort(401, _('Unauthorized to show pods'))
- return
-
- try:
- return {'pod': db_api.get_pod(context, _id)}
- except t_exceptions.ResourceNotFound:
- pecan.abort(404, _('Pod not found'))
- return
-
- @expose(generic=True, template='json')
- def get_all(self):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_PODS_LIST):
- pecan.abort(401, _('Unauthorized to list pods'))
- return
-
- try:
- return {'pods': db_api.list_pods(context)}
- except Exception as e:
- LOG.exception('Failed to list all pods: %(exception)s ',
- {'exception': e})
-
- pecan.abort(500, _('Failed to list pods'))
- return
-
- @expose(generic=True, template='json')
- def delete(self, _id):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_PODS_DELETE):
- pecan.abort(401, _('Unauthorized to delete pods'))
- return
-
- try:
- with context.session.begin():
- core.delete_resource(context, models.Pod, _id)
- pecan.response.status = 200
- return {}
- except t_exceptions.ResourceNotFound:
- return Response(_('Pod not found'), 404)
- except Exception as e:
- LOG.exception('Failed to delete pod: %(pod_id)s,'
- '%(exception)s',
- {'pod_id': _id,
- 'exception': e})
-
- return Response(_('Failed to delete pod'), 500)
-
- def _get_top_region(self, ctx):
- top_region_name = ''
- try:
- with ctx.session.begin():
- pods = core.query_resource(ctx,
- models.Pod, [], [])
- for pod in pods:
- if pod['az_name'] == '' and pod['region_name'] != '':
- return pod['region_name']
- except Exception as e:
- LOG.exception('Failed to get top region: %(exception)s ',
- {'exception': e})
-
- return top_region_name
-
- return top_region_name
diff --git a/tricircle/api/controllers/root.py b/tricircle/api/controllers/root.py
deleted file mode 100644
index e3b227ce..00000000
--- a/tricircle/api/controllers/root.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (c) 2015 Huawei Tech. Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import pecan
-from pecan import request
-
-from tricircle.api.controllers import job
-from tricircle.api.controllers import pod
-from tricircle.api.controllers import routing
-import tricircle.common.context as t_context
-
-
-def expose(*args, **kwargs):
- kwargs.setdefault('content_type', 'application/json')
- kwargs.setdefault('template', 'json')
- return pecan.expose(*args, **kwargs)
-
-
-def when(index, *args, **kwargs):
- kwargs.setdefault('content_type', 'application/json')
- kwargs.setdefault('template', 'json')
- return index.when(*args, **kwargs)
-
-
-class RootController(object):
-
- @expose()
- def _lookup(self, version, *remainder):
- if version == 'v1.0':
- return V1Controller(), remainder
-
- @pecan.expose(generic=True, template='json')
- def index(self):
- return {
- "versions": [
- {
- "status": "CURRENT",
- "links": [
- {
- "rel": "self",
- "href": pecan.request.application_url + "/v1.0/"
- }
- ],
- "id": "v1.0",
- "updated": "2015-09-09"
- }
- ]
- }
-
- @index.when(method='POST')
- @index.when(method='PUT')
- @index.when(method='DELETE')
- @index.when(method='HEAD')
- @index.when(method='PATCH')
- def not_supported(self):
- pecan.abort(405)
-
-
-class V1Controller(object):
-
- def __init__(self):
-
- self.sub_controllers = {
- "pods": pod.PodsController(),
- "routings": routing.RoutingController(),
- "jobs": job.AsyncJobController()
- }
-
- for name, ctrl in self.sub_controllers.items():
- setattr(self, name, ctrl)
-
- @pecan.expose(generic=True, template='json')
- def index(self):
- return {
- "version": "1.0",
- "links": [
- {"rel": "self",
- "href": pecan.request.application_url + "/v1.0"}
- ] + [
- {"rel": name,
- "href": pecan.request.application_url + "/v1.0/" + name}
- for name in sorted(self.sub_controllers)
- ]
- }
-
- @index.when(method='POST')
- @index.when(method='PUT')
- @index.when(method='DELETE')
- @index.when(method='HEAD')
- @index.when(method='PATCH')
- def not_supported(self):
- pecan.abort(405)
-
-
-def _extract_context_from_environ(environ):
- context_paras = {'auth_token': 'HTTP_X_AUTH_TOKEN',
- 'user': 'HTTP_X_USER_ID',
- 'tenant': 'HTTP_X_TENANT_ID',
- 'user_name': 'HTTP_X_USER_NAME',
- 'tenant_name': 'HTTP_X_PROJECT_NAME',
- 'domain': 'HTTP_X_DOMAIN_ID',
- 'user_domain': 'HTTP_X_USER_DOMAIN_ID',
- 'project_domain': 'HTTP_X_PROJECT_DOMAIN_ID',
- 'request_id': 'openstack.request_id'}
- for key in context_paras:
- context_paras[key] = environ.get(context_paras[key])
- role = environ.get('HTTP_X_ROLE')
- # TODO(zhiyuan): replace with policy check
- context_paras['is_admin'] = 'admin' in role.split(',') if role else False
- return t_context.Context(**context_paras)
-
-
-def _get_environment():
- return request.environ
diff --git a/tricircle/api/controllers/routing.py b/tricircle/api/controllers/routing.py
deleted file mode 100644
index 2eba6518..00000000
--- a/tricircle/api/controllers/routing.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-from pecan import expose
-from pecan import rest
-
-from oslo_log import log as logging
-
-from tricircle.common import constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common import policy
-from tricircle.common import utils
-
-from tricircle.db import api as db_api
-
-LOG = logging.getLogger(__name__)
-
-
-SUPPORTED_FILTERS = ['id', 'top_id', 'bottom_id', 'pod_id', 'project_id',
- 'resource_type', 'created_at', 'updated_at']
-
-
-class RoutingController(rest.RestController):
-
- def __init__(self):
- pass
-
- @expose(generic=True, template='json')
- def post(self, **kw):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_CREATE):
- return utils.format_api_error(
- 403, _("Unauthorized to create resource routing"))
-
- if 'routing' not in kw:
- return utils.format_api_error(
- 400, _("Request body not found"))
-
- routing = kw['routing']
-
- for field in ('top_id', 'bottom_id', 'pod_id',
- 'project_id', 'resource_type'):
- value = routing.get(field)
- if value is None or len(value.strip()) == 0:
- return utils.format_api_error(
- 400, _("Field %(field)s can not be empty") % {
- 'field': field})
-
- # the resource type should be properly provisioned.
- resource_type = routing.get('resource_type').strip()
- if not constants.is_valid_resource_type(resource_type):
- return utils.format_api_error(
- 400, _('There is no such resource type'))
-
- try:
- top_id = routing.get('top_id').strip()
- bottom_id = routing.get('bottom_id').strip()
- pod_id = routing.get('pod_id').strip()
- project_id = routing.get('project_id').strip()
-
- routing = db_api.create_resource_mapping(context, top_id,
- bottom_id, pod_id,
- project_id,
- resource_type)
- if not routing:
- return utils.format_api_error(
- 409, _('Resource routing already exists'))
- except Exception as e:
- LOG.exception('Failed to create resource routing: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to create resource routing'))
-
- return {'routing': routing}
-
- def _get_filters(self, params):
- """Return a dictionary of query param filters from the request.
-
- :param params: the URI params coming from the wsgi layer
- :return: (flag, filters), flag indicates whether the filters are valid,
- and the filters denote a list of key-value pairs.
- """
- filters = {}
- unsupported_filters = {}
- for filter_name in params:
- if filter_name in SUPPORTED_FILTERS:
- # map filter name
- filters[filter_name] = params.get(filter_name)
- else:
- unsupported_filters[filter_name] = params.get(filter_name)
-
- if unsupported_filters:
- return False, unsupported_filters
- return True, filters
-
- @expose(generic=True, template='json')
- def get_all(self, **kwargs):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_LIST):
- return utils.format_api_error(
- 403, _('Unauthorized to show all resource routings'))
-
- # default value -1 means no pagination, then maximum pagination
- # limit from configuration will be used.
- _limit = kwargs.pop('limit', -1)
-
- try:
- limit = int(_limit)
- limit = utils.get_pagination_limit(limit)
- except ValueError as e:
- LOG.exception('Failed to convert pagination limit to an integer: '
- '%(exception)s ', {'exception': e})
- msg = (_("Limit should be an integer or a valid literal "
- "for int() rather than '%s'") % _limit)
- return utils.format_api_error(400, msg)
-
- marker = kwargs.pop('marker', None)
- if marker is not None:
- try:
- marker = int(marker)
- try:
- # we throw an exception if a marker with
- # invalid ID is specified.
- db_api.get_resource_routing(context, marker)
- except t_exceptions.ResourceNotFound:
- return utils.format_api_error(
- 400, _('Marker %s is an invalid ID') % marker)
- except ValueError as e:
- LOG.exception('Failed to convert page marker to an integer: '
- '%(exception)s ', {'exception': e})
- msg = (_("Marker should be an integer or a valid literal "
- "for int() rather than '%s'") % marker)
- return utils.format_api_error(400, msg)
-
- is_valid_filter, filters = self._get_filters(kwargs)
-
- if not is_valid_filter:
- msg = (_('Unsupported filter type: %(filters)s') % {
- 'filters': ', '.join(
- [filter_name for filter_name in filters])
- })
- return utils.format_api_error(400, msg)
-
- if 'id' in filters:
- try:
- # resource routing id is an integer.
- filters['id'] = int(filters['id'])
- except ValueError as e:
- LOG.exception('Failed to convert routing id to an integer:'
- ' %(exception)s ', {'exception': e})
- msg = (_("Id should be an integer or a valid literal "
- "for int() rather than '%s'") % filters['id'])
- return utils.format_api_error(400, msg)
-
- # project ID from client should be equal to the one from
- # context, since only the project ID in which the user
- # is authorized will be used as the filter.
- filters['project_id'] = context.project_id
- expand_filters = [{'key': filter_name, 'comparator': 'eq',
- 'value': filters[filter_name]}
- for filter_name in filters]
- try:
- routings = db_api.list_resource_routings(context, expand_filters,
- limit, marker,
- sorts=[('id', 'desc')])
- links = []
- if len(routings) >= limit:
- marker = routings[-1]['id']
- # if we reach the first element, then no elements in next page,
- # so link to next page won't be provided.
- if marker != 1:
- base = constants.ROUTING_PATH
- link = "%s?limit=%s&marker=%s" % (base, limit, marker)
-
- links.append({"rel": "next",
- "href": link})
-
- result = {}
- result["routings"] = routings
- if links:
- result["routings_links"] = links
- return result
- except Exception as e:
- LOG.exception('Failed to show all resource routings: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to show all resource routings'))
-
- @expose(generic=True, template='json')
- def get_one(self, _id):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_SHOW):
- return utils.format_api_error(
- 403, _('Unauthorized to show the resource routing'))
-
- try:
- return {'routing': db_api.get_resource_routing(context, _id)}
- except t_exceptions.ResourceNotFound:
- return utils.format_api_error(
- 404, _('Resource routing not found'))
-
- @expose(generic=True, template='json')
- def delete(self, _id):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_DELETE):
- return utils.format_api_error(
- 403, _('Unauthorized to delete the resource routing'))
-
- try:
- db_api.get_resource_routing(context, _id)
- except t_exceptions.ResourceNotFound:
- return utils.format_api_error(404,
- _('Resource routing not found'))
- try:
- db_api.delete_resource_routing(context, _id)
- pecan.response.status = 200
- return pecan.response
- except Exception as e:
- LOG.exception('Failed to delete the resource routing: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to delete the resource routing'))
-
- @expose(generic=True, template='json')
- def put(self, _id, **kw):
- context = t_context.extract_context_from_environ()
-
- if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_PUT):
- return utils.format_api_error(
- 403, _('Unauthorized to update resource routing'))
-
- try:
- db_api.get_resource_routing(context, _id)
- except t_exceptions.ResourceNotFound:
- return utils.format_api_error(404,
- _('Resource routing not found'))
-
- if 'routing' not in kw:
- return utils.format_api_error(
- 400, _('Request body not found'))
-
- update_dict = kw['routing']
-
- # values to be updated should not be empty
- for field in update_dict:
- value = update_dict.get(field)
- if value is None or len(value.strip()) == 0:
- return utils.format_api_error(
- 400, _("Field %(field)s can not be empty") % {
- 'field': field})
-
- # the resource type should be properly provisioned.
- if 'resource_type' in update_dict:
- if not constants.is_valid_resource_type(
- update_dict['resource_type']):
- return utils.format_api_error(
- 400, _('There is no such resource type'))
-
- # the pod with new pod_id should exist in pod table
- if 'pod_id' in update_dict:
- new_pod_id = update_dict.get('pod_id')
- try:
- # find the pod through the pod_id and verify whether it exists
- db_api.get_pod(context, new_pod_id)
- except t_exceptions.ResourceNotFound:
- return utils.format_api_error(
- 400, _("The pod %(new_pod_id)s doesn't"
- " exist") % {'new_pod_id': new_pod_id})
- except Exception as e:
- LOG.exception('Failed to update resource routing: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to update resource routing'))
-
- try:
- routing_updated = db_api.update_resource_routing(
- context, _id, update_dict)
- return {'routing': routing_updated}
- except Exception as e:
- LOG.exception('Failed to update resource routing: '
- '%(exception)s ', {'exception': e})
- return utils.format_api_error(
- 500, _('Failed to update resource routing'))
diff --git a/tricircle/api/opts.py b/tricircle/api/opts.py
deleted file mode 100644
index 4621312b..00000000
--- a/tricircle/api/opts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import tricircle.api.app
-
-
-def list_opts():
- return [
- ('DEFAULT', tricircle.api.app.common_opts),
- ]
diff --git a/tricircle/api/wsgi.py b/tricircle/api/wsgi.py
deleted file mode 100644
index f82c3aff..00000000
--- a/tricircle/api/wsgi.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2017 Huawei Tech. Co,. Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""WSGI script for Tricircle API
-WSGI handler for running Tricircle API under Apache2, nginx, gunicorn etc.
-
-Community wide goal in Pike:
- https://governance.openstack.org/tc/goals/pike/deploy-api-in-wsgi.html
-"""
-
-import os
-import os.path
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tricircle.api import app
-from tricircle.common import config
-
-CONFIG_FILE = 'api.conf'
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-def _get_config_file(env=None):
- if env is None:
- env = os.environ
-
- dir_name = env.get('TRICIRCLE_CONF_DIR', '/etc/tricircle').strip()
- return os.path.join(dir_name, CONFIG_FILE)
-
-
-def init_application():
-
- # initialize the config system
- conf_file = _get_config_file()
- # NOTE(hberaud): Call reset to ensure the ConfigOpts object doesn't
- # already contain registered options if the app is reloaded.
- CONF.reset()
- config.init(app.common_opts, ['--config-file', conf_file])
-
- LOG.info("Configuration:")
- CONF.log_opt_values(LOG, logging.INFO)
-
- # return WSGI app
- return app.setup_app()
diff --git a/tricircle/cmd/__init__.py b/tricircle/cmd/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/cmd/api.py b/tricircle/cmd/api.py
deleted file mode 100644
index b625764a..00000000
--- a/tricircle/cmd/api.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# Much of this module is based on the work of the Ironic team
-# see https://opendev.org/openstack/ironic/src/branch/master/ironic/cmd/api.py
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_service import wsgi
-
-from tricircle.api import app
-from tricircle.common import config
-from tricircle.common import restapp
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-def main():
- config.init(app.common_opts, sys.argv[1:])
- application = app.setup_app()
-
- host = CONF.bind_host
- port = CONF.bind_port
- workers = CONF.api_workers
-
- if workers < 1:
- LOG.warning("Wrong worker number, worker = %(workers)s", workers)
- workers = 1
-
- LOG.info("Admin API on http://%(host)s:%(port)s with %(workers)s",
- {'host': host, 'port': port, 'workers': workers})
-
- service = wsgi.Server(CONF, 'Tricircle Admin_API', application, host, port)
- restapp.serve(service, CONF, workers)
-
- LOG.info("Configuration:")
- CONF.log_opt_values(LOG, logging.INFO)
-
- restapp.wait()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tricircle/cmd/manage.py b/tricircle/cmd/manage.py
deleted file mode 100644
index bef841d6..00000000
--- a/tricircle/cmd/manage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tricircle.db import core
-from tricircle.db import migration_helpers
-
-import pbr.version
-
-CONF = cfg.CONF
-
-
-def do_db_version():
- print(migration_helpers.db_version())
-
-
-def do_db_sync():
- migration_helpers.sync_repo(CONF.command.version)
-
-
-def add_command_parsers(subparsers):
- parser = subparsers.add_parser('db_version')
- parser.set_defaults(func=do_db_version)
-
- parser = subparsers.add_parser('db_sync')
- parser.set_defaults(func=do_db_sync)
- parser.add_argument('version', nargs='?')
-
-
-command_opt = cfg.SubCommandOpt('command',
- title='Commands',
- help='Show available commands.',
- handler=add_command_parsers)
-
-
-def main():
- core.initialize()
- logging.register_options(CONF)
- logging.setup(CONF, 'tricircle-db-manage')
- CONF.register_cli_opt(command_opt)
- version_info = pbr.version.VersionInfo('tricircle')
-
- try:
- CONF(sys.argv[1:], project='tricircle', prog='tricircle-db-manage',
- version=version_info.version_string())
- except RuntimeError as e:
- sys.exit("ERROR: %s" % e)
-
- try:
- CONF.command.func()
- except Exception as e:
- sys.exit("ERROR: %s" % e)
diff --git a/tricircle/cmd/status.py b/tricircle/cmd/status.py
deleted file mode 100644
index 10bad2a9..00000000
--- a/tricircle/cmd/status.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2018 NEC, Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_upgradecheck import upgradecheck
-
-from tricircle.common.i18n import _
-
-
-class Checks(upgradecheck.UpgradeCommands):
-
- """Upgrade checks for the tricircle-status upgrade check command
-
- Upgrade checks should be added as separate methods in this class
- and added to _upgrade_checks tuple.
- """
-
- def _check_placeholder(self):
- # This is just a placeholder for upgrade checks, it should be
- # removed when the actual checks are added
- return upgradecheck.Result(upgradecheck.Code.SUCCESS)
-
- # The format of the check functions is to return an
- # oslo_upgradecheck.upgradecheck.Result
- # object with the appropriate
- # oslo_upgradecheck.upgradecheck.Code and details set.
- # If the check hits warnings or failures then those should be stored
- # in the returned Result's "details" attribute. The
- # summary will be rolled up at the end of the check() method.
- _upgrade_checks = (
- # In the future there should be some real checks added here
- (_('Placeholder'), _check_placeholder),
- )
-
-
-def main():
- return upgradecheck.main(
- cfg.CONF, project='tricircle', upgrade_command=Checks())
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tricircle/cmd/xjob.py b/tricircle/cmd/xjob.py
deleted file mode 100644
index 4523e349..00000000
--- a/tricircle/cmd/xjob.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# Much of this module is based on the work of the Ironic team
-# see https://opendev.org/openstack/ironic/src/branch/master/ironic/cmd/api.py
-
-import eventlet
-
-eventlet.monkey_patch()
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tricircle.common import config
-
-from tricircle.xjob import xservice
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-def main():
- config.init(xservice.common_opts, sys.argv[1:])
-
- host = CONF.host
- workers = CONF.workers
-
- if workers < 1:
- LOG.warning("Wrong worker number, worker = %(workers)s", workers)
- workers = 1
-
- LOG.info("XJob Server on http://%(host)s with %(workers)s",
- {'host': host, 'workers': workers})
-
- xservice.serve(xservice.create_service(), workers)
-
- LOG.info("Configuration:")
- CONF.log_opt_values(LOG, logging.INFO)
-
- xservice.wait()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tricircle/common/__init__.py b/tricircle/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/common/baserpc.py b/tricircle/common/baserpc.py
deleted file mode 100644
index 15ec3dd6..00000000
--- a/tricircle/common/baserpc.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# copy and modify from OpenStack Nova
-
-"""
-Base RPC client and server common to all services.
-"""
-
-from oslo_config import cfg
-import oslo_messaging as messaging
-from oslo_serialization import jsonutils
-
-from tricircle.common import rpc
-
-
-CONF = cfg.CONF
-rpcapi_cap_opt = cfg.StrOpt('baseclientapi',
- help='Set a version cap for messages sent to the'
- 'base api in any service')
-CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
-
-_NAMESPACE = 'baseclientapi'
-
-
-class BaseClientAPI(object):
-
- """Client side of the base rpc API.
-
- API version history:
- 1.0 - Initial version.
- """
-
- VERSION_ALIASES = {
- # baseapi was added in the first version of Tricircle
- }
-
- def __init__(self, topic):
- super(BaseClientAPI, self).__init__()
- target = messaging.Target(topic=topic,
- namespace=_NAMESPACE,
- version='1.0')
- version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,
- CONF.upgrade_levels.baseapi)
- self.client = rpc.get_client(target, version_cap=version_cap)
-
- def ping(self, context, arg, timeout=None):
- arg_p = jsonutils.to_primitive(arg)
- cctxt = self.client.prepare(timeout=timeout)
- return cctxt.call(context, 'ping', arg=arg_p)
-
-
-class BaseServerRPCAPI(object):
- """Server side of the base RPC API."""
-
- target = messaging.Target(namespace=_NAMESPACE, version='1.0')
-
- def __init__(self, service_name):
- self.service_name = service_name
-
- def ping(self, context, arg):
- resp = {'service': self.service_name, 'arg': arg}
- return jsonutils.to_primitive(resp)
diff --git a/tricircle/common/client.py b/tricircle/common/client.py
deleted file mode 100644
index 8eb0f867..00000000
--- a/tricircle/common/client.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import functools
-import inspect
-import six
-from six.moves import xrange
-
-import keystoneauth1.identity.generic as auth_identity
-from keystoneauth1 import session
-from keystoneclient.v3 import client as keystone_client
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-import tricircle.common.context as tricircle_context
-from tricircle.common import exceptions
-from tricircle.common import resource_handle
-from tricircle.db import api
-from tricircle.db import models
-
-
-client_opts = [
- cfg.StrOpt('auth_url',
- default='http://127.0.0.1/identity',
- help='keystone authorization url'),
- cfg.StrOpt('identity_url',
- default='http://127.0.0.1/identity/v3',
- help='keystone service url'),
- cfg.BoolOpt('auto_refresh_endpoint',
- default=False,
- help='if set to True, endpoint will be automatically'
- 'refreshed if timeout accessing endpoint'),
- cfg.StrOpt('top_region_name',
- help='name of top pod which client needs to access'),
- cfg.StrOpt('admin_username',
- help='username of admin account, needed when'
- ' auto_refresh_endpoint set to True'),
- cfg.StrOpt('admin_password',
- help='password of admin account, needed when'
- ' auto_refresh_endpoint set to True'),
- cfg.StrOpt('admin_tenant',
- help='tenant name of admin account, needed when'
- ' auto_refresh_endpoint set to True'),
- cfg.StrOpt('admin_user_domain_name',
- default='Default',
- help='user domain name of admin account, needed when'
- ' auto_refresh_endpoint set to True'),
- cfg.StrOpt('admin_tenant_domain_name',
- default='Default',
- help='tenant domain name of admin account, needed when'
- ' auto_refresh_endpoint set to True'),
- cfg.StrOpt('bridge_cidr',
- default='100.0.0.0/9',
- help='cidr pool of the bridge network'),
- cfg.StrOpt('cross_pod_vxlan_mode', default='p2p',
- choices=['p2p', 'l2gw', 'noop'],
- help='Cross-pod VxLAN networking support mode'),
- cfg.IntOpt('max_shadow_port_bulk_size', default=100,
- help='max bulk size to create shadow ports'),
- cfg.IntOpt('max_trunk_subports_bulk_size', default=100,
- help='max bulk size to create trunk subports')
-]
-client_opt_group = cfg.OptGroup('client')
-cfg.CONF.register_group(client_opt_group)
-cfg.CONF.register_opts(client_opts, group=client_opt_group)
-
-LOG = logging.getLogger(__name__)
-
-
-def _safe_operation(operation_name):
- def handle_func(func):
- @six.wraps(func)
- def handle_args(*args, **kwargs):
- instance, resource, context = args[:3]
- if resource not in instance.operation_resources_map[
- operation_name]:
- raise exceptions.ResourceNotSupported(resource, operation_name)
- retries = 1
- for i in xrange(retries + 1):
- try:
- service = instance.resource_service_map[resource]
- instance._ensure_endpoint_set(context, service)
- instance._ensure_token_for_admin(context)
- return func(*args, **kwargs)
- except exceptions.EndpointNotAvailable as e:
- instance._unset_endpoint(service)
- if i == retries:
- raise
- if cfg.CONF.client.auto_refresh_endpoint:
- LOG.warning('%(exception)s, '
- 'update endpoint and try again',
- {'exception': e.message})
- instance._update_endpoint_from_keystone(context, True)
- else:
- raise
- except exceptions.EndpointNotFound as e:
- # NOTE(zhiyuan) endpoints are not registered in Keystone
- # for the given pod and service, we add default behaviours
- # for the handle functions
- if i < retries and cfg.CONF.client.auto_refresh_endpoint:
- LOG.warning('%(exception)s, '
- 'update endpoint and try again',
- {'exception': e.message})
- instance._update_endpoint_from_keystone(context, True)
- continue
- if operation_name == 'list':
- return []
- else:
- return None
- return handle_args
- return handle_func
-
-
-class Client(object):
- """Wrapper of all OpenStack service clients
-
- Client works as a wrapper of all OpenStack service clients so you can
- operate all kinds of resources by only interacting with Client. Client
- provides five methods to operate resources:
- create_resources
- delete_resources
- get_resources
- list_resources
- action_resources
-
- Take create_resources as an example to show how Client works. When
- create_resources is called, it gets the corresponding service handler
- according to the resource type. Service handlers are defined in
- resource_handle.py and each service has one. Each handler has the
- following methods:
- handle_create
- handle_delete
- handle_get
- handle_list
- handle_action
- It's obvious that create_resources is mapped to handle_create(for port,
- handle_create in NeutronResourceHandle is called).
-
- Not all kinds of resources support the above five operations(or not
- supported yet by Tricircle), so each service handler has a
- support_resource field to specify the resources and operations it
- supports, like:
- 'port': LIST | CREATE | DELETE | GET
- This line means that NeutronResourceHandle supports list, create, delete
- and get operations for port resource. To support more resources or make a
- resource support more operations, register them in support_resource.
-
- Dig into "handle_xxx" you can find that it will call methods in each
- OpenStack service client finally. Calling handle_create for port will
- result in calling create_port in neutronclient module.
-
- Current "handle_xxx" implementation constructs method name by resource
- and operation type and uses getattr to dynamically load method from
- OpenStack service client so it can cover most of the cases. Supporting a
- new kind of resource or making a resource support a new kind of operation
- is simply to register an entry in support_resource as described above.
- But if some special cases occur later, modifying "handle_xxx" is needed.
-
- Also, pay attention to action operation since you need to check the
- implementation of the OpenStack service client to know what the method
- name of the action is and what parameters the method has. In the comment of
- action_resources you can find that for each action, there is one line to
- describe the method name and parameters like:
- aggregate -> add_host -> aggregate, host -> none
- This line means that for aggregate resource, novaclient module has an
- add_host method and it has two position parameters and no key parameter.
- For simplicity, action name and method name are the same.
-
- One more thing to mention, Client registers a partial function
- (operation)_(resource)s for each operation and each resource. For example,
- you can call create_resources(self, resource, cxt, body) directly to create
- a network, or use create_networks(self, cxt, body) for short.
- """
- def __init__(self, region_name=None):
- self.auth_url = cfg.CONF.client.auth_url
- self.resource_service_map = {}
- self.operation_resources_map = collections.defaultdict(set)
- self.service_handle_map = {}
- self.region_name = region_name
- if not self.region_name:
- self.region_name = cfg.CONF.client.top_region_name
- for _, handle_class in inspect.getmembers(resource_handle):
- if not inspect.isclass(handle_class):
- continue
- if not hasattr(handle_class, 'service_type'):
- continue
- handle_obj = handle_class(self.auth_url)
- self.service_handle_map[handle_obj.service_type] = handle_obj
- for resource in handle_obj.support_resource:
- self.resource_service_map[resource] = handle_obj.service_type
- self.operation_resources_map['client'].add(resource)
- for operation, index in six.iteritems(
- resource_handle.operation_index_map):
- # add parentheses to emphasize we mean to do bitwise and
- if (handle_obj.support_resource[resource] & index) == 0:
- continue
- self.operation_resources_map[operation].add(resource)
- if resource == 'qos_policy':
- setattr(self, '%s_qos_policies' % operation,
- functools.partial(
- getattr(self, '%s_resources' % operation),
- resource))
- else:
- setattr(self, '%s_%ss' % (operation, resource),
- functools.partial(
- getattr(self, '%s_resources' % operation),
- resource))
-
- @staticmethod
- def _get_keystone_session(project_id=None):
- return resource_handle.ResourceHandle.get_keystone_session(project_id)
-
- @staticmethod
- def get_admin_token(project_id=None):
- return Client._get_admin_token(project_id)
-
- @staticmethod
- def _get_admin_token(project_id=None):
- return Client._get_keystone_session(project_id).get_token()
-
- def _get_admin_project_id(self):
- return self._get_keystone_session().get_project_id()
-
- def _get_endpoint_from_keystone(self, cxt):
- auth = auth_identity.Token(cfg.CONF.client.auth_url,
- cxt.auth_token, tenant_id=cxt.tenant)
- sess = session.Session(auth=auth)
- cli = keystone_client.Client(session=sess)
-
- service_id_name_map = {}
- for service in cli.services.list():
- service_dict = service.to_dict()
- service_id_name_map[service_dict['id']] = service_dict['name']
-
- region_service_endpoint_map = {}
- for endpoint in cli.endpoints.list():
- endpoint_dict = endpoint.to_dict()
- if endpoint_dict['interface'] != 'public':
- continue
- region_id = endpoint_dict['region']
- service_id = endpoint_dict['service_id']
- url = endpoint_dict['url']
- service_name = service_id_name_map[service_id]
- if region_id not in region_service_endpoint_map:
- region_service_endpoint_map[region_id] = {}
- region_service_endpoint_map[region_id][service_name] = url
- return region_service_endpoint_map
-
- def _get_config_with_retry(self, cxt, filters, pod, service, retry):
- conf_list = api.list_cached_endpoints(cxt, filters)
- if len(conf_list) == 0:
- if not retry:
- raise exceptions.EndpointNotFound(pod, service)
- self._update_endpoint_from_keystone(cxt, True)
- return self._get_config_with_retry(cxt,
- filters, pod, service, False)
- return conf_list
-
- def _unset_endpoint(self, service):
- handle = self.service_handle_map[service]
- handle.clear_endpoint_url()
-
- def _ensure_endpoint_set(self, cxt, service):
- handle = self.service_handle_map[service]
- if not handle.is_endpoint_url_set():
- pod_filters = [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': self.region_name}]
- pod_list = api.list_pods(cxt, pod_filters)
- if len(pod_list) == 0:
- raise exceptions.ResourceNotFound(models.Pod,
- self.region_name)
- # region_name is unique key, safe to get the first element
- pod_id = pod_list[0]['pod_id']
- config_filters = [
- {'key': 'pod_id', 'comparator': 'eq', 'value': pod_id},
- {'key': 'service_type', 'comparator': 'eq', 'value': service}]
- conf_list = self._get_config_with_retry(
- cxt, config_filters, pod_id, service,
- cfg.CONF.client.auto_refresh_endpoint)
- url = conf_list[0]['service_url']
- handle.update_endpoint_url(url)
-
- def _update_endpoint_from_keystone(self, cxt, is_internal):
- """Update the database by querying service endpoint url from Keystone
-
- :param cxt: context object
- :param is_internal: if True, this method utilizes pre-configured admin
- username and password to apply an new admin token, this happens only
- when auto_refresh_endpoint is set to True. if False, token in cxt is
- directly used, users should prepare admin token themselves
- :return: None
- """
- if is_internal:
- admin_context = tricircle_context.get_admin_context()
- self._ensure_token_for_admin(admin_context)
- endpoint_map = self._get_endpoint_from_keystone(admin_context)
- else:
- endpoint_map = self._get_endpoint_from_keystone(cxt)
-
- for region in endpoint_map:
- # use region name to query pod
- pod_filters = [{'key': 'region_name', 'comparator': 'eq',
- 'value': region}]
- pod_list = api.list_pods(cxt, pod_filters)
- # skip region/pod not registered in cascade service
- if len(pod_list) != 1:
- continue
- for service in endpoint_map[region]:
- pod_id = pod_list[0]['pod_id']
- config_filters = [{'key': 'pod_id', 'comparator': 'eq',
- 'value': pod_id},
- {'key': 'service_type', 'comparator': 'eq',
- 'value': service}]
- config_list = api.list_cached_endpoints(
- cxt, config_filters)
-
- if len(config_list) > 1:
- continue
- if len(config_list) == 1:
- config_id = config_list[0]['service_id']
- update_dict = {
- 'service_url': endpoint_map[region][service]}
- api.update_cached_endpoints(
- cxt, config_id, update_dict)
- else:
- config_dict = {
- 'service_id': uuidutils.generate_uuid(),
- 'pod_id': pod_id,
- 'service_type': service,
- 'service_url': endpoint_map[region][service]
- }
- api.create_cached_endpoints(
- cxt, config_dict)
-
- def get_endpoint(self, cxt, pod_id, service):
- """Get endpoint url of given pod and service
-
- :param cxt: context object
- :param pod_id: pod id
- :param service: service type
- :return: endpoint url for given pod and service
- :raises: EndpointNotUnique, EndpointNotFound
- """
- config_filters = [
- {'key': 'pod_id', 'comparator': 'eq', 'value': pod_id},
- {'key': 'service_type', 'comparator': 'eq', 'value': service}]
- conf_list = self._get_config_with_retry(
- cxt, config_filters, pod_id, service,
- cfg.CONF.client.auto_refresh_endpoint)
- return conf_list[0]['service_url']
-
- def update_endpoint_from_keystone(self, cxt):
- """Update the database by querying service endpoint url from Keystone
-
- Only admin should invoke this method since it requires admin token
-
- :param cxt: context object containing admin token
- :return: None
- """
- self._update_endpoint_from_keystone(cxt, False)
-
- def get_keystone_client_by_context(self, ctx):
- client_session = self._get_keystone_session()
- return keystone_client.Client(auth_url=cfg.CONF.client.auth_url,
- session=client_session)
-
- def _ensure_token_for_admin(self, cxt):
- if cxt.is_admin and not cxt.auth_token:
- if cxt.tenant:
- cxt.auth_token = self._get_admin_token(cxt.tenant)
- else:
- cxt.auth_token = self._get_admin_token()
- cxt.tenant = self._get_admin_project_id()
-
- @_safe_operation('client')
- def get_native_client(self, resource, cxt):
- """Get native python client instance
-
- Use this function only when for complex operations
-
- :param resource: resource type
- :param cxt: resource type
- :return: client instance
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle._get_client(cxt)
-
- @_safe_operation('list')
- def list_resources(self, resource, cxt, filters=None):
- """Query resource in pod of top layer
-
- Directly invoke this method to query resources, or use
- list_(resource)s (self, cxt, filters=None), for example,
- list_servers (self, cxt, filters=None). These methods are
- automatically generated according to the supported resources
- of each ResourceHandle class.
-
- :param resource: resource type
- :param cxt: resource type
- :param filters: list of dict with key 'key', 'comparator', 'value'
- like {'key': 'name', 'comparator': 'eq', 'value': 'private'}, 'key'
- is the field name of resources
- :return: list of dict containing resources information
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- filters = filters or []
- return handle.handle_list(cxt, resource, filters)
-
- @_safe_operation('create')
- def create_resources(self, resource, cxt, *args, **kwargs):
- """Create resource in pod of top layer
-
- Directly invoke this method to create resources, or use
- create_(resource)s (self, cxt, *args, **kwargs). These methods are
- automatically generated according to the supported resources of each
- ResourceHandle class.
-
- :param resource: resource type
- :param cxt: context object
- :param args, kwargs: passed according to resource type
- --------------------------
- resource -> args -> kwargs
- --------------------------
- aggregate -> name, availability_zone_name -> none
- server -> name, image, flavor -> nics
- network -> body -> none
- subnet -> body -> none
- port -> body -> none
- floatingip -> body -> none
- --------------------------
- :return: a dict containing resource information
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle.handle_create(cxt, resource, *args, **kwargs)
-
- @_safe_operation('update')
- def update_resources(self, resource, cxt, *args, **kwargs):
- """Update resource in pod of top layer
-
- Directly invoke this method to update resources, or use
- update_(resource)s (self, cxt, *args, **kwargs). These methods are
- automatically generated according to the supported resources of each
- ResourceHandle class.
-
- :param resource: resource type
- :param cxt: context object
- :param args, kwargs: passed according to resource type
- --------------------------
- resource -> args -> kwargs
- --------------------------
- router -> body -> none
- subnet -> body -> none
- --------------------------
- :return: a dict containing resource information
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle.handle_update(cxt, resource, *args, **kwargs)
-
- @_safe_operation('delete')
- def delete_resources(self, resource, cxt, resource_id):
- """Delete resource in pod of top layer
-
- Directly invoke this method to delete resources, or use
- delete_(resource)s (self, cxt, obj_id). These methods are
- automatically generated according to the supported resources
- of each ResourceHandle class.
- :param resource: resource type
- :param cxt: context object
- :param resource_id: id of resource
- :return: None
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle.handle_delete(cxt, resource, resource_id)
-
- @_safe_operation('get')
- def get_resources(self, resource, cxt, resource_id):
- """Get resource in pod of top layer
-
- Directly invoke this method to get resources, or use
- get_(resource)s (self, cxt, obj_id). These methods are
- automatically generated according to the supported resources
- of each ResourceHandle class.
- :param resource: resource type
- :param cxt: context object
- :param resource_id: id of resource
- :return: a dict containing resource information
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle.handle_get(cxt, resource, resource_id)
-
- @_safe_operation('action')
- def action_resources(self, resource, cxt, action, *args, **kwargs):
- """Apply action on resource in pod of top layer
-
- Directly invoke this method to apply action, or use
- action_(resource)s (self, cxt, action, *args, **kwargs). These methods
- are automatically generated according to the supported resources of
- each ResourceHandle class.
-
- :param resource: resource type
- :param cxt: context object
- :param action: action applied on resource
- :param args, kwargs: passed according to resource type
- --------------------------
- resource -> action -> args -> kwargs
- --------------------------
- aggregate -> add_host -> aggregate, host -> none
- volume -> set_bootable -> volume, flag -> none
- router -> add_interface -> router, body -> none
- router -> add_gateway -> router, body -> none
- router -> remove_gateway -> router -> none
- server_volume -> create_server_volume
- -> server_id, volume_id, device=None
- -> none
- server -> start -> server_id -> none
- server -> stop -> server_id -> none
- --------------------------
- :return: None
- :raises: EndpointNotAvailable
- """
- service = self.resource_service_map[resource]
- handle = self.service_handle_map[service]
- return handle.handle_action(cxt, resource, action, *args, **kwargs)
diff --git a/tricircle/common/config.py b/tricircle/common/config.py
deleted file mode 100644
index a079bc99..00000000
--- a/tricircle/common/config.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Routines for configuring tricircle, largely copy from Neutron
-"""
-import sys
-
-from oslo_config import cfg
-import oslo_log.log as logging
-from oslo_policy import opts as policy_opts
-
-from tricircle.common import policy
-from tricircle.common import rpc
-from tricircle.common import version
-
-
-logging.register_options(cfg.CONF)
-LOG = logging.getLogger(__name__)
-
-policy_opts.set_defaults(cfg.CONF, 'policy.json')
-
-
-def init(opts, args, **kwargs):
- # Register the configuration options
- cfg.CONF.register_opts(opts)
-
- cfg.CONF(args=args, project='tricircle',
- version=version.version_info,
- **kwargs)
-
- _setup_logging()
- _setup_policy()
-
- rpc.init(cfg.CONF)
-
-
-def _setup_logging():
- """Sets up the logging options for a log with supplied name."""
- product_name = "tricircle"
- logging.setup(cfg.CONF, product_name)
- LOG.info("Logging enabled!")
- LOG.info("%(prog)s version %(version)s",
- {'prog': sys.argv[0],
- 'version': version.version_info})
- LOG.debug("command line: %s", " ".join(sys.argv))
-
-
-def _setup_policy():
-
- # if there is valid policy file, use policy file by oslo_policy
- # otherwise, use the default policy value in policy.py
- policy_file = cfg.CONF.oslo_policy.policy_file
- if policy_file and cfg.CONF.find_file(policy_file):
- # just return here, oslo_policy lib will use policy file by itself
- return
-
- policy.populate_default_rules()
-
-
-def reset_service():
- # Reset worker in case SIGHUP is called.
- # Note that this is called only in case a service is running in
- # daemon mode.
- _setup_logging()
-
- policy.reset()
- _setup_policy()
diff --git a/tricircle/common/constants.py b/tricircle/common/constants.py
deleted file mode 100644
index f25dc2ab..00000000
--- a/tricircle/common/constants.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-
-
-# service type
-ST_NEUTRON = 'neutron'
-
-
-# resource_type
-RT_NETWORK = 'network'
-RT_SD_NETWORK = 'shadow_network'
-RT_SUBNET = 'subnet'
-RT_SD_SUBNET = 'shadow_subnet'
-RT_PORT = 'port'
-RT_TRUNK = 'trunk'
-RT_SD_PORT = 'shadow_port'
-RT_PORT_PAIR = 'port_pair'
-RT_PORT_PAIR_GROUP = 'port_pair_group'
-RT_FLOW_CLASSIFIER = 'flow_classifier'
-RT_PORT_CHAIN = 'port_chain'
-RT_ROUTER = 'router'
-RT_NS_ROUTER = 'ns_router'
-RT_SG = 'security_group'
-RT_FIP = 'floatingip'
-RT_QOS = 'qos_policy'
-
-REAL_SHADOW_TYPE_MAP = {
- RT_NETWORK: RT_SD_NETWORK,
- RT_SUBNET: RT_SD_SUBNET,
- RT_PORT: RT_SD_PORT
-}
-
-
-# check whether the resource type is properly provisioned.
-def is_valid_resource_type(resource_type):
- resource_type_table = [RT_NETWORK, RT_SUBNET, RT_PORT, RT_ROUTER, RT_SG,
- RT_TRUNK, RT_PORT_PAIR, RT_PORT_PAIR_GROUP,
- RT_FLOW_CLASSIFIER, RT_PORT_CHAIN, RT_QOS]
- return resource_type in resource_type_table
-
-
-# version list
-NEUTRON_VERSION_V2 = 'v2'
-
-# supported release
-R_LIBERTY = 'liberty'
-R_MITAKA = 'mitaka'
-
-# l3 bridge networking elements
-bridge_subnet_pool_name = 'bridge_subnet_pool'
-bridge_net_name = 'bridge_net_%s' # project_id
-bridge_subnet_name = 'bridge_subnet_%s' # project_id
-bridge_port_name = 'bridge_port_%s_%s' # project_id b_router_id
-
-# for external gateway port: project_id b_router_id None
-# for floating ip port: project_id None b_internal_port_id
-ns_bridge_port_name = 'ns_bridge_port_%s_%s_%s'
-ns_router_name = 'ns_router_%s'
-
-shadow_port_name = 'shadow_port_%s'
-dhcp_port_name = 'dhcp_port_%s' # subnet_id
-snat_port_name = 'snat_port_%s' # subnet_id
-interface_port_name = 'interface_%s_%s' # b_region_name t_subnet_id
-interface_port_device_id = 'reserved_gateway_port'
-
-MAX_INT = 0x7FFFFFFF
-DEFAULT_DESTINATION = '0.0.0.0/0'
-expire_time = datetime.datetime(2000, 1, 1)
-STR_IN_USE = 'in use'
-STR_USED_BY = 'used by'
-STR_CONFLICTS_WITH = 'conflicts with'
-
-# job status
-JS_New = '3_New'
-JS_Running = '2_Running'
-JS_Success = '1_Success'
-JS_Fail = '0_Fail'
-
-SP_EXTRA_ID = '00000000-0000-0000-0000-000000000000'
-TOP = 'top'
-POD_NOT_SPECIFIED = 'not_specified_pod'
-PROFILE_REGION = 'region'
-PROFILE_DEVICE = 'device'
-PROFILE_STATUS = 'status'
-PROFILE_HOST = 'host'
-PROFILE_AGENT_TYPE = 'type'
-PROFILE_TUNNEL_IP = 'tunnel_ip'
-PROFILE_FORCE_UP = 'force_up'
-PROFILE_LOCAL_TRUNK_ID = 'local_trunk_id'
-DEVICE_OWNER_SHADOW = 'compute:shadow'
-DEVICE_OWNER_NOVA = 'compute:nova'
-DEVICE_OWNER_SUBPORT = 'trunk:subport'
-
-# job type
-JT_CONFIGURE_ROUTE = 'configure_route'
-JT_ROUTER_SETUP = 'router_setup'
-JT_PORT_DELETE = 'port_delete'
-JT_SEG_RULE_SETUP = 'seg_rule_setup'
-JT_NETWORK_UPDATE = 'update_network'
-JT_SUBNET_UPDATE = 'subnet_update'
-JT_SHADOW_PORT_SETUP = 'shadow_port_setup'
-JT_TRUNK_SYNC = 'trunk_sync'
-JT_SFC_SYNC = 'sfc_sync'
-JT_RESOURCE_RECYCLE = 'resource_recycle'
-JT_QOS_CREATE = 'qos_create'
-JT_QOS_UPDATE = 'qos_update'
-JT_QOS_DELETE = 'qos_delete'
-JT_SYNC_QOS_RULE = 'sync_qos_rule'
-
-# network type
-NT_LOCAL = 'local'
-NT_VLAN = 'vlan'
-NT_VxLAN = 'vxlan'
-NT_FLAT = 'flat'
-
-# cross-pod VxLAN networking support mode
-NM_P2P = 'p2p'
-NM_L2GW = 'l2gw'
-NM_NOOP = 'noop'
-
-# map job type to its resource, each resource is denoted by
-# (resource_type, resource_id), for the field necessary
-# to run the job but resides outside of job resource, we
-# denote its type by "None"
-job_resource_map = {
- JT_CONFIGURE_ROUTE: [(RT_ROUTER, "router_id")],
- JT_ROUTER_SETUP: [(None, "pod_id"),
- (RT_ROUTER, "router_id"),
- (RT_NETWORK, "network_id")],
- JT_PORT_DELETE: [(None, "pod_id"),
- (RT_PORT, "port_id")],
- JT_SEG_RULE_SETUP: [(None, "project_id")],
- JT_NETWORK_UPDATE: [(None, "pod_id"),
- (RT_NETWORK, "network_id")],
- JT_TRUNK_SYNC: [(None, "pod_id"),
- (RT_TRUNK, "trunk_id")],
- JT_SUBNET_UPDATE: [(None, "pod_id"),
- (RT_SUBNET, "subnet_id")],
- JT_SHADOW_PORT_SETUP: [(None, "pod_id"),
- (RT_NETWORK, "network_id")],
- JT_SFC_SYNC: [(None, "pod_id"),
- (RT_PORT_CHAIN, "portchain_id"),
- (RT_NETWORK, "network_id")],
- JT_RESOURCE_RECYCLE: [(None, "project_id")],
- JT_QOS_CREATE: [(None, "pod_id"),
- (RT_QOS, "policy_id"),
- (None, "res_type"),
- (None, "res_id")],
- JT_QOS_UPDATE: [(None, "pod_id"),
- (RT_QOS, "policy_id")],
- JT_QOS_DELETE: [(None, "pod_id"),
- (RT_QOS, "policy_id")],
- JT_SYNC_QOS_RULE: [(None, "rule_id"),
- (RT_QOS, "policy_id")]
-}
-
-# map raw job status to more human readable job status
-job_status_map = {
- JS_Fail: 'FAIL',
- JS_Success: 'SUCCESS',
- JS_Running: 'RUNNING',
- JS_New: 'NEW'
-}
-
-# filter jobs according to the job's attributes
-JOB_LIST_SUPPORTED_FILTERS = ['project_id', 'type', 'status']
-
-# map job type to corresponding job handler
-job_handles = {
- JT_CONFIGURE_ROUTE: "configure_route",
- JT_ROUTER_SETUP: "setup_bottom_router",
- JT_PORT_DELETE: "delete_server_port",
- JT_SEG_RULE_SETUP: "configure_security_group_rules",
- JT_NETWORK_UPDATE: "update_network",
- JT_SUBNET_UPDATE: "update_subnet",
- JT_TRUNK_SYNC: "sync_trunk",
- JT_SHADOW_PORT_SETUP: "setup_shadow_ports",
- JT_SFC_SYNC: "sync_service_function_chain",
- JT_RESOURCE_RECYCLE: "recycle_resources",
- JT_QOS_CREATE: "create_qos_policy",
- JT_QOS_UPDATE: "update_qos_policy",
- JT_QOS_DELETE: "delete_qos_policy",
- JT_SYNC_QOS_RULE: "sync_qos_policy_rules"
-}
-
-# map job type to its primary resource and then we only validate the project_id
-# of that resource. For JT_SEG_RULE_SETUP, as it has only one project_id
-# parameter, there is no need to validate it.
-job_primary_resource_map = {
- JT_CONFIGURE_ROUTE: (RT_ROUTER, "router_id"),
- JT_ROUTER_SETUP: (RT_ROUTER, "router_id"),
- JT_PORT_DELETE: (RT_PORT, "port_id"),
- JT_SEG_RULE_SETUP: (None, "project_id"),
- JT_NETWORK_UPDATE: (RT_NETWORK, "network_id"),
- JT_SUBNET_UPDATE: (RT_SUBNET, "subnet_id"),
- JT_TRUNK_SYNC: (RT_TRUNK, "trunk_id"),
- JT_SHADOW_PORT_SETUP: (RT_NETWORK, "network_id"),
- JT_SFC_SYNC: (RT_PORT_CHAIN, "portchain_id"),
- JT_RESOURCE_RECYCLE: (None, "project_id"),
- JT_QOS_CREATE: (RT_QOS, "policy_id"),
- JT_QOS_UPDATE: (RT_QOS, "policy_id"),
- JT_QOS_DELETE: (RT_QOS, "policy_id"),
- JT_SYNC_QOS_RULE: (RT_QOS, "policy_id")
-}
-
-# admin API request path
-ROUTING_PATH = '/v1.0/routings'
-JOB_PATH = '/v1.0/jobs'
-
-USER_AGENT = 'User-Agent'
-# The name of the source flag when the request is from central Neutron
-CENTRAL = 'central-neutronclient'
-# The name of the source flag when the request is from local Neutron
-LOCAL = 'local-neutronclient'
-
-REQUEST_SOURCE_TYPE = set([CENTRAL, LOCAL])
-
-# for new L3 network model using routed network
-# prefix for the name of segment
-SEGMENT_NAME_PATTERN = 'newL3-(.*?)-(.*)'
-PREFIX_OF_SEGMENT_NAME = 'newL3-'
-PREFIX_OF_SEGMENT_NAME_DIVISION = '-'
diff --git a/tricircle/common/context.py b/tricircle/common/context.py
deleted file mode 100644
index 962395fb..00000000
--- a/tricircle/common/context.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from pecan import request
-
-import oslo_context.context as oslo_ctx
-
-
-from tricircle.common.i18n import _
-from tricircle.db import core
-
-
-def get_db_context():
- return Context()
-
-
-def get_admin_context():
- ctx = Context()
- ctx.is_admin = True
- return ctx
-
-
-def is_admin_context(ctx):
- return ctx.is_admin
-
-
-def extract_context_from_environ():
- context_paras = {'auth_token': 'HTTP_X_AUTH_TOKEN',
- 'user': 'HTTP_X_USER_ID',
- 'tenant': 'HTTP_X_TENANT_ID',
- 'user_name': 'HTTP_X_USER_NAME',
- 'tenant_name': 'HTTP_X_PROJECT_NAME',
- 'domain': 'HTTP_X_DOMAIN_ID',
- 'user_domain': 'HTTP_X_USER_DOMAIN_ID',
- 'project_domain': 'HTTP_X_PROJECT_DOMAIN_ID',
- 'request_id': 'openstack.request_id'}
-
- environ = request.environ
-
- for key in context_paras:
- context_paras[key] = environ.get(context_paras[key])
- role = environ.get('HTTP_X_ROLE')
-
- context_paras['is_admin'] = 'admin' in role.split(',') if role else False
- return Context(**context_paras)
-
-
-def get_context_from_neutron_context(context):
- ctx = Context()
- ctx.auth_token = context.auth_token
- ctx.user = context.user_id
- ctx.tenant = context.tenant_id
- ctx.tenant_name = context.tenant_name
- ctx.user_name = context.user_name
- ctx.resource_uuid = context.resource_uuid
- ctx.is_admin = context.is_admin
- return ctx
-
-
-class ContextBase(oslo_ctx.RequestContext):
- def __init__(self, auth_token=None, user_id=None, tenant_id=None,
- is_admin=False, read_deleted="no", request_id=None,
- overwrite=True, user_name=None, tenant_name=None,
- roles=None, **kwargs):
- """Initialize RequestContext.
-
- :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
- indicates deleted records are visible, 'only' indicates that
- *only* deleted records are visible.
-
- :param overwrite: Set to False to ensure that the greenthread local
- copy of the index is not overwritten.
- """
- super(ContextBase, self).__init__(
- auth_token=auth_token,
- user=user_id or kwargs.get('user', None),
- tenant=tenant_id or kwargs.get('tenant', None),
- domain=kwargs.get('domain', None),
- user_domain=kwargs.get('user_domain', None),
- project_domain=kwargs.get('project_domain', None),
- is_admin=is_admin,
- read_only=kwargs.get('read_only', False),
- show_deleted=kwargs.get('show_deleted', False),
- request_id=request_id,
- resource_uuid=kwargs.get('resource_uuid', None),
- overwrite=overwrite)
- self.user_name = user_name
- self.tenant_name = tenant_name
- self.read_deleted = read_deleted
- self.roles = roles or []
-
- def _get_read_deleted(self):
- return self._read_deleted
-
- def _set_read_deleted(self, read_deleted):
- if read_deleted not in ('no', 'yes', 'only'):
- raise ValueError(_("read_deleted can only be one of 'no', "
- "'yes' or 'only', not %r") % read_deleted)
- self._read_deleted = read_deleted
-
- def _del_read_deleted(self):
- del self._read_deleted
-
- read_deleted = property(_get_read_deleted, _set_read_deleted,
- _del_read_deleted)
-
- def to_dict(self):
- ctx_dict = super(ContextBase, self).to_dict()
- ctx_dict.update({
- 'user_name': self.user_name,
- 'tenant_name': self.tenant_name,
- 'tenant_id': self.tenant_id,
- 'project_id': self.project_id,
- 'roles': self.roles,
- })
- return ctx_dict
-
- @classmethod
- def from_dict(cls, values):
- return cls(**values)
-
- @property
- def project_id(self):
- return self.tenant
-
- @project_id.setter
- def project_id(self, value):
- self.tenant = value
-
- @property
- def tenant_id(self):
- return self.tenant
-
- @tenant_id.setter
- def tenant_id(self, value):
- self.tenant = value
-
- @property
- def user_id(self):
- return self.user
-
- @user_id.setter
- def user_id(self, value):
- self.user = value
-
-
-class Context(ContextBase):
- def __init__(self, **kwargs):
- super(Context, self).__init__(**kwargs)
- self._session = None
-
- @property
- def session(self):
- if not self._session:
- self._session = core.get_session()
- return self._session
-
- def elevated(self, read_deleted=None, overwrite=False):
- """Return a version of this context with admin flag set."""
- ctx = copy.copy(self)
- ctx.roles = copy.deepcopy(self.roles)
- ctx.is_admin = True
-
- if read_deleted is not None:
- ctx.read_deleted = read_deleted
-
- return ctx
diff --git a/tricircle/common/exceptions.py b/tricircle/common/exceptions.py
deleted file mode 100644
index 74be97be..00000000
--- a/tricircle/common/exceptions.py
+++ /dev/null
@@ -1,251 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tricircle base exception handling.
-"""
-
-import six
-
-from neutron_lib import exceptions
-from oslo_log import log as logging
-
-from tricircle.common.i18n import _
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TricircleException(Exception):
- """Base Tricircle Exception.
-
- To correctly use this class, inherit from it and define
- a 'message' property. That message will get printf'd
- with the keyword arguments provided to the constructor.
- """
- message = _("An unknown exception occurred.")
- code = 500
- headers = {}
- safe = False
-
- def __init__(self, message=None, **kwargs):
-
- self.kwargs = kwargs
- self.kwargs['message'] = message
-
- if 'code' not in self.kwargs:
- self.kwargs['code'] = self.code
-
- for k, v in self.kwargs.items():
- if isinstance(v, Exception):
- self.kwargs[k] = six.text_type(v)
-
- if self._should_format():
- try:
- message = self.message % kwargs
- except Exception:
-
- # kwargs doesn't match a variable in the message
- # log the issue and the kwargs
- exc_info = _('Exception class %s in string '
- 'format operation') % type(self).__name__
- format_str = _('%(exception_info)s ; %(format_key)s : '
- '%(format_value)s')
- for name, value in kwargs.items():
- exc_info = format_str % {
- 'exception_info': exc_info,
- 'format_key': name,
- 'format_value': six.text_type(value)}
-
- exc_info = _('%(message)s ; %(exception_info)s') % {
- 'message': self.message, 'exception_info': exc_info}
- LOG.exception(exc_info)
-
- # no rerasie
- # exc_info = sys.exc_info()
- # if CONF.fatal_exception_format_errors:
- # six.reraise(*exc_info)
-
- # at least get the core message out if something happened
- message = self.message
-
- elif isinstance(message, Exception):
- message = six.text_type(message)
-
- self.msg = message
- super(TricircleException, self).__init__(message)
-
- def _should_format(self):
-
- if self.kwargs['message'] is None and '%(message)' in self.message:
- LOG.error(r'\%(message)s in message '
- 'but init parameter is None')
-
- return self.kwargs['message'] is None or '%(message)' in self.message
-
- def __unicode__(self):
- return six.text_type(self.msg)
-
-
-class BadRequest(TricircleException):
- message = _('Bad %(resource)s request: %(msg)s')
-
-
-class NotFound(TricircleException):
- message = _("Resource could not be found.")
- code = 404
- safe = True
-
-
-class Conflict(TricircleException):
- pass
-
-
-class NotAuthorized(TricircleException):
- message = _("Not authorized.")
-
-
-class ServiceUnavailable(TricircleException):
- message = _("The service is unavailable")
-
-
-class AdminRequired(NotAuthorized):
- message = _("User does not have admin privileges")
-
-
-class PolicyNotAuthorized(NotAuthorized):
- message = _("Policy doesn't allow this operation to be performed.")
-
-
-class InUse(TricircleException):
- message = _("The resource is inuse")
-
-
-class InvalidConfigurationOption(TricircleException):
- message = _("An invalid value was provided for %(opt_name)s: "
- "%(opt_value)s")
-
-
-class EndpointNotAvailable(TricircleException):
- message = "Endpoint %(url)s for %(service)s is not available"
-
- def __init__(self, service, url):
- super(EndpointNotAvailable, self).__init__(service=service, url=url)
-
-
-class EndpointNotUnique(TricircleException):
- message = "Endpoint for %(service)s in %(pod)s not unique"
-
- def __init__(self, pod, service):
- super(EndpointNotUnique, self).__init__(pod=pod, service=service)
-
-
-class EndpointNotFound(TricircleException):
- message = "Endpoint for %(service)s in %(pod)s not found"
-
- def __init__(self, pod, service):
- super(EndpointNotFound, self).__init__(pod=pod, service=service)
-
-
-class ResourceNotFound(TricircleException):
- message = "Could not find %(resource_type)s: %(unique_key)s"
-
- def __init__(self, model, unique_key):
- resource_type = model.__name__.lower()
- super(ResourceNotFound, self).__init__(resource_type=resource_type,
- unique_key=unique_key)
-
-
-class ResourceNotSupported(TricircleException):
- message = "%(method)s method not supported for %(resource)s"
-
- def __init__(self, resource, method):
- super(ResourceNotSupported, self).__init__(resource=resource,
- method=method)
-
-
-class Invalid(TricircleException):
- message = _("Unacceptable parameters.")
- code = 400
-
-
-class InvalidInput(Invalid):
- message = _("Invalid input received: %(reason)s")
-
-
-class ExternalNetPodNotSpecify(TricircleException):
- message = "Pod for external network not specified"
-
- def __init__(self):
- super(ExternalNetPodNotSpecify, self).__init__()
-
-
-class PodNotFound(NotFound):
- message = "Pod %(region_name)s could not be found."
-
- def __init__(self, region_name):
- super(PodNotFound, self).__init__(region_name=region_name)
-
-
-# parameter validation error
-class ValidationError(TricircleException):
- message = _("%(msg)s")
- code = 400
-
-
-# parameter validation error
-class HTTPForbiddenError(TricircleException):
- message = _("%(msg)s")
- code = 403
-
-
-class Duplicate(TricircleException):
- pass
-
-
-class ServerMappingsNotFound(NotFound):
- message = _('Instance %(server_id)s could not be found.')
-
-
-class VolumeMappingsNotFound(NotFound):
- message = _('Volume %(volume_id)s could not be found')
-
-
-class RoutingCreateFail(TricircleException):
- message = _("Fail to create %s routing entry %(_type)s")
-
- def __init__(self, _type):
- super(RoutingCreateFail, self).__init__(_type=_type)
-
-
-class RoutingBindFail(TricircleException):
- message = _("Fail to bind top and bottom %(_type)s")
-
- def __init__(self, _type):
- super(RoutingBindFail, self).__init__(_type=_type)
-
-
-class RouterNetworkLocationMismatch(exceptions.InvalidInput):
- message = _("router located in %(router_az_hint)s, but network located "
- "in %(net_az_hints)s, location mismatch.")
-
- def __init__(self, router_az_hints, net_az_hints):
- super(RouterNetworkLocationMismatch, self).__init__(
- router_az_hint=router_az_hints, net_az_hints=net_az_hints)
-
-
-class ResourceIsInDeleting(TricircleException):
- message = 'resource is in deleting now'
- code = 204
diff --git a/tricircle/common/httpclient.py b/tricircle/common/httpclient.py
deleted file mode 100644
index 992416f8..00000000
--- a/tricircle/common/httpclient.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from six.moves.urllib import parse as urlparse
-
-import requests
-
-from oslo_log import log as logging
-
-from tricircle.common import client
-from tricircle.common import constants as cons
-from tricircle.db import api as db_api
-
-
-LOG = logging.getLogger(__name__)
-
-
-# the url could be endpoint registered in the keystone
-# or url sent to tricircle service, which is stored in
-# pecan.request.url
-def get_version_from_url(url):
-
- components = urlparse.urlsplit(url)
-
- path = components.path
- pos = path.find('/')
-
- ver = ''
- if pos == 0:
- path = path[1:]
- i = path.find('/')
- if i >= 0:
- ver = path[:i]
- else:
- ver = path
- elif pos > 0:
- ver = path[:pos]
- else:
- ver = path
-
- return ver
-
-
-def get_bottom_url(t_ver, t_url, b_ver, b_endpoint):
- """get_bottom_url
-
- convert url received by Tricircle service to bottom OpenStack
- request url through the configured endpoint in the KeyStone
-
- :param t_ver: version of top service
- :param t_url: request url to the top service
- :param b_ver: version of bottom service
- :param b_endpoint: endpoint registered in keystone for bottom service
- :return: request url to bottom service
- """
- t_parse = urlparse.urlsplit(t_url)
-
- after_ver = t_parse.path
-
- remove_ver = '/' + t_ver + '/'
- pos = after_ver.find(remove_ver)
-
- if pos == 0:
- after_ver = after_ver[len(remove_ver):]
- else:
- remove_ver = t_ver + '/'
- pos = after_ver.find(remove_ver)
- if pos == 0:
- after_ver = after_ver[len(remove_ver):]
-
- if after_ver == t_parse.path:
- # wrong t_url
- return ''
-
- b_parse = urlparse.urlsplit(b_endpoint)
-
- scheme = b_parse.scheme
- netloc = b_parse.netloc
- path = '/' + b_ver + '/' + after_ver
- if b_ver == '':
- path = '/' + after_ver
-
- # Remove availability_zone filter since it is handled by VolumeController.
- # VolumeController will send GET request only to bottom pods whose AZ
- # is specified in availability_zone filter.
- query_filters = []
- for k, v in urlparse.parse_qsl(t_parse.query):
- if k == 'availability_zone':
- continue
- query_filters.append((k, v))
- query = urlparse.urlencode(query_filters)
-
- fragment = t_parse.fragment
-
- b_url = urlparse.urlunsplit((scheme,
- netloc,
- path,
- query,
- fragment))
- return b_url
-
-
-def get_pod_service_endpoint(context, region_name, st):
-
- pod = db_api.get_pod_by_name(context, region_name)
-
- if pod:
- c = client.Client()
- return c.get_endpoint(context, pod['pod_id'], st)
-
- return ''
-
-
-def get_pod_service_ctx(context, t_url, region_name, s_type=cons.ST_NEUTRON):
- t_ver = get_version_from_url(t_url)
- b_endpoint = get_pod_service_endpoint(context,
- region_name,
- s_type)
- b_ver = get_version_from_url(b_endpoint)
- b_url = ''
- if b_endpoint != '':
- b_url = get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
-
- return {'t_ver': t_ver, 'b_ver': b_ver,
- 't_url': t_url, 'b_url': b_url}
-
-
-def forward_req(context, action, b_headers, b_url, b_body):
- s = requests.Session()
- req = requests.Request(action, b_url,
- data=b_body,
- headers=b_headers)
- prepped = req.prepare()
-
- # do something with prepped.body
- # do something with prepped.headers
- resp = s.send(prepped,
- timeout=60)
-
- return resp
-
-
-def get_res_routing_ref(context, _id, t_url, s_type):
- """Get the service context according to resource routing.
-
- :param _id: the top id of resource
- :param t_url: request url
- :param s_type: service type
- :returns: service context
- """
- pod = db_api.get_pod_by_top_id(context, _id)
-
- if not pod:
- return None
-
- region_name = pod['region_name']
-
- s_ctx = get_pod_service_ctx(context, t_url, region_name,
- s_type=s_type)
-
- if s_ctx['b_url'] == '':
- LOG.error("bottom pod endpoint incorrect %s" %
- region_name)
-
- return s_ctx
-
-
-def convert_header(from_release, to_release, header):
- b_header = {}
-
- # remove invalid header item, requests lib will strictly check
- # header for security purpose, non-string or non-bytes value
- # will lead to exception, and leading space will also be removed
- # by requests.util.check_header_validity function
- for k, v in header.items():
- if v:
- b_header[k] = v
-
- return b_header
-
-
-def convert_object(from_release, to_release, res_object,
- res_type=cons.RT_NETWORK):
- return res_object
diff --git a/tricircle/common/i18n.py b/tricircle/common/i18n.py
deleted file mode 100644
index ad33230c..00000000
--- a/tricircle/common/i18n.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import oslo_i18n
-
-_translators = oslo_i18n.TranslatorFactory(domain='tricircle')
-
-# The primary translation function using the well-known name "_"
-_ = _translators.primary
diff --git a/tricircle/common/lock_handle.py b/tricircle/common/lock_handle.py
deleted file mode 100644
index ddd7e6ba..00000000
--- a/tricircle/common/lock_handle.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import eventlet
-from six.moves import xrange
-
-import oslo_db.exception as db_exc
-
-from tricircle.common import exceptions
-from tricircle.db import core
-from tricircle.db import models
-
-
-ALL_DONE = 0 # both route and bottom resource exist
-RES_DONE = 1 # only bottom resource exists
-NONE_DONE = 2 # neither router nor bottom resources exists
-# The case only router exists is not considered, there may be some manual
-# operations on bottom pod which results to this problem.
-
-
-def get_or_create_route(t_ctx, q_ctx,
- project_id, pod, ele, _type, list_ele_method):
- # use configuration option later
- route_expire_threshold = 30
-
- _id = ele['id']
- with t_ctx.session.begin():
- routes = core.query_resource(
- t_ctx, models.ResourceRouting,
- [{'key': 'top_id', 'comparator': 'eq', 'value': _id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': pod['pod_id']},
- {'key': 'resource_type', 'comparator': 'eq',
- 'value': _type}], [])
- if routes:
- route = routes[0]
- if route['bottom_id']:
- return route, ALL_DONE
- else:
- route_time = route['updated_at'] or route['created_at']
- current_time = datetime.datetime.utcnow()
- delta = current_time - route_time
- if delta.seconds > route_expire_threshold:
- # NOTE(zhiyuan) cannot directly remove the route, we have
- # a race here that other worker is updating this route, we
- # need to check if the corresponding element has been
- # created by other worker
- eles = list_ele_method(t_ctx, q_ctx, pod, ele, _type)
- if eles:
- route['bottom_id'] = eles[0]['id']
- core.update_resource(t_ctx,
- models.ResourceRouting,
- route['id'], route)
- return route, RES_DONE
- try:
- core.delete_resource(t_ctx,
- models.ResourceRouting,
- route['id'])
- except db_exc.ResourceNotFound:
- pass
- try:
- # NOTE(zhiyuan) try/except block inside a with block will cause
- # problem, so move them out of the block and manually handle the
- # session context
- t_ctx.session.begin()
- route = core.create_resource(t_ctx, models.ResourceRouting,
- {'top_id': _id,
- 'pod_id': pod['pod_id'],
- 'project_id': project_id,
- 'resource_type': _type})
- t_ctx.session.commit()
- return route, NONE_DONE
- except db_exc.DBDuplicateEntry:
- t_ctx.session.rollback()
- return None, NONE_DONE
- finally:
- t_ctx.session.close()
-
-
-def get_or_create_element(t_ctx, q_ctx,
- project_id, pod, ele, _type, body,
- list_ele_method, create_ele_method):
- # use configuration option later
- max_tries = 5
- for _ in xrange(max_tries):
- route, status = get_or_create_route(
- t_ctx, q_ctx, project_id, pod, ele, _type, list_ele_method)
- if not route:
- eventlet.sleep(0)
- continue
- if status == RES_DONE or status == ALL_DONE:
- # in these cases, bottom_id must exist
- break
- if status == NONE_DONE:
- try:
- new_ele = create_ele_method(t_ctx, q_ctx, pod, body, _type)
- except Exception:
- with t_ctx.session.begin():
- try:
- core.delete_resource(t_ctx,
- models.ResourceRouting,
- route['id'])
- except db_exc.ColumnError:
- # NOTE(zhiyuan) this is a rare case that other worker
- # considers the route expires and delete it though it
- # was just created, maybe caused by out-of-sync time
- pass
- raise
- with t_ctx.session.begin():
- # NOTE(zhiyuan) it's safe to update route, the bottom network
- # has been successfully created, so other worker will not
- # delete this route
- route['bottom_id'] = new_ele['id']
- core.update_resource(t_ctx, models.ResourceRouting,
- route['id'], route)
- break
- if not route:
- raise exceptions.RoutingCreateFail(_type)
- if not route['bottom_id']:
- raise exceptions.RoutingBindFail(_type)
- # NOTE(zhiyuan) Status being ALL_DONE means that the routing entry is
- # complete when we retrieve the resource, so we return False to indicate
- # that we can directly use this resource safely. Status being RES_DONE and
- # NONE_DONE means that the routing entry is not complete when we retrieve
- # the resource but we manage to fill the entry finally, so we return True
- # to indicate that we may leave some work to do.
- if status == ALL_DONE:
- return False, route['bottom_id']
- else:
- return True, route['bottom_id']
diff --git a/tricircle/common/opts.py b/tricircle/common/opts.py
deleted file mode 100644
index 0b9e9738..00000000
--- a/tricircle/common/opts.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import tricircle.common.client
-
-# Todo: adding rpc cap negotiation configuration after first release
-# import tricircle.common.xrpcapi
-
-
-def list_opts():
- return [
- ('client', tricircle.common.client.client_opts),
- # ('upgrade_levels', tricircle.common.xrpcapi.rpcapi_cap_opt),
- ]
diff --git a/tricircle/common/policy.py b/tricircle/common/policy.py
deleted file mode 100644
index c91a3056..00000000
--- a/tricircle/common/policy.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Policy Engine For Tricircle."""
-
-# Policy controlled API access mainly for the Tricircle Admin API. Regarding
-# to Nova API-GW and Cinder API-GW, the API access control should be done at
-# bottom OpenStack as far as possible if the API request will be forwarded
-# to bottom OpenStack directly for further processing; only these APIs which
-# only can interact with database for example flavor and volume type, because
-# these APIs processing will be terminated at the Tricircle layer, so policy
-# control should be done by Nova API-GW or Cinder API-GW. No work is required
-# to do in the Tricircle Neutron Plugin for Neutron API server is there,
-# Neutron API server will be responsible for policy control.
-
-
-from oslo_config import cfg
-import oslo_log.log as logging
-from oslo_policy import policy
-
-from tricircle.common import exceptions as t_exec
-
-_ENFORCER = None
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-default_policies = [
- policy.RuleDefault('context_is_admin', 'role:admin'),
- policy.RuleDefault('admin_api', 'is_admin:True',
- description='cloud admin allowed'),
- policy.RuleDefault('admin_or_owner',
- 'is_admin:True or project_id:%(project_id)s',
- description='cloud admin or project owner allowed'),
- policy.RuleDefault('default', 'rule:admin_or_owner'),
-]
-
-ADMIN_API_PODS_CREATE = 'admin_api:pods:create'
-ADMIN_API_PODS_DELETE = 'admin_api:pods:delete'
-ADMIN_API_PODS_SHOW = 'admin_api:pods:show'
-ADMIN_API_PODS_LIST = 'admin_api:pods:list'
-
-ADMIN_API_ROUTINGS_CREATE = 'admin_api:routings:create'
-ADMIN_API_ROUTINGS_DELETE = 'admin_api:routings:delete'
-ADMIN_API_ROUTINGS_PUT = 'admin_api:routings:put'
-ADMIN_API_ROUTINGS_SHOW = 'admin_api:routings:show'
-ADMIN_API_ROUTINGS_LIST = 'admin_api:routings:list'
-
-ADMIN_API_JOB_CREATE = 'admin_api:jobs:create'
-ADMIN_API_JOB_LIST = 'admin_api:jobs:list'
-ADMIN_API_JOB_SCHEMA_LIST = 'admin_api:jobs:schema_list'
-ADMIN_API_JOB_REDO = 'admin_api:jobs:redo'
-ADMIN_API_JOB_DELETE = 'admin_api:jobs:delete'
-
-
-tricircle_admin_api_policies = [
- policy.DocumentedRuleDefault(ADMIN_API_PODS_CREATE,
- 'rule:admin_api',
- description='Create pod.',
- operations=[
- {
- 'path': '/pods',
- 'method': 'POST'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_PODS_DELETE,
- 'rule:admin_api',
- description='Delete specified pod.',
- operations=[
- {
- 'path': '/pods/{pod_id}',
- 'method': 'DELETE'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_PODS_SHOW,
- 'rule:admin_api',
- description='Show pod details.',
- operations=[
- {
- 'path': '/pods/{pod_id}',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_PODS_LIST,
- 'rule:admin_api',
- description='List pods.',
- operations=[
- {
- 'path': '/pods',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_ROUTINGS_CREATE,
- 'rule:admin_api',
- description='Create resource routing',
- operations=[
- {
- 'path': '/routings',
- 'method': 'POST'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_ROUTINGS_DELETE,
- 'rule:admin_api',
- description='Delete resource routing',
- operations=[
- {
- 'path': '/routings/{id}',
- 'method': 'DELETE'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_ROUTINGS_PUT,
- 'rule:admin_api',
- description='Update resource routing',
- operations=[
- {
- 'path': '/routings/{id}',
- 'method': 'PUT'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_ROUTINGS_SHOW,
- 'rule:admin_api',
- description='Show resource routing detail',
- operations=[
- {
- 'path': '/routings/{id}',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_ROUTINGS_LIST,
- 'rule:admin_api',
- description='List resource routings',
- operations=[
- {
- 'path': '/routings',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_JOB_CREATE,
- 'rule:admin_api',
- description='Create job',
- operations=[
- {
- 'path': '/jobs',
- 'method': 'POST'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_JOB_LIST,
- 'rule:admin_api',
- description='List jobs',
- operations=[
- {
- 'path': '/jobs',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_JOB_SCHEMA_LIST,
- 'rule:admin_api',
- description='List job schemas',
- operations=[
- {
- 'path': '/jobs/schemas',
- 'method': 'GET'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_JOB_REDO,
- 'rule:admin_api',
- description='Redo job',
- operations=[
- {
- 'path': '/jobs/{id}',
- 'method': 'PUT'
- }
- ]),
- policy.DocumentedRuleDefault(ADMIN_API_JOB_DELETE,
- 'rule:admin_api',
- description='Delete job',
- operations=[
- {
- 'path': '/jobs/{id}',
- 'method': 'DELETE'
- }
- ])
-]
-
-
-def list_policies():
- policies = (default_policies +
- tricircle_admin_api_policies)
- return policies
-
-
-# we can get a policy enforcer by this init.
-# oslo policy supports change policy rule dynamically.
-# at present, policy.enforce will reload the policy rules when it checks
-# the policy file has been touched.
-def init(policy_file=None, rules=None,
- default_rule=None, use_conf=True, overwrite=True):
- """Init an Enforcer class.
-
- :param policy_file: Custom policy file to use, if none is
- specified, ``conf.policy_file`` will be
- used.
- :param rules: Default dictionary / Rules to use. It will be
- considered just in the first instantiation. If
- :meth:`load_rules` with ``force_reload=True``,
- :meth:`clear` or :meth:`set_rules` with
- ``overwrite=True`` is called this will be overwritten.
- :param default_rule: Default rule to use, conf.default_rule will
- be used if none is specified.
- :param use_conf: Whether to load rules from cache or config file.
- :param overwrite: Whether to overwrite existing rules when reload rules
- from config file.
- """
- global _ENFORCER
- if not _ENFORCER:
- # https://docs.openstack.org/oslo.policy/latest/user/usage.html
- _ENFORCER = policy.Enforcer(CONF,
- policy_file=policy_file,
- rules=rules,
- default_rule=default_rule,
- use_conf=use_conf,
- overwrite=overwrite)
- _ENFORCER.register_defaults(list_policies())
- return _ENFORCER
-
-
-def set_rules(rules, overwrite=True, use_conf=False):
- """Set rules based on the provided dict of rules.
-
- :param rules: New rules to use. It should be an instance of dict.
- :param overwrite: Whether to overwrite current rules or update them
- with the new rules.
- :param use_conf: Whether to reload rules from config file.
- """
- init(use_conf=False)
- _ENFORCER.set_rules(rules, overwrite, use_conf)
-
-
-def populate_default_rules():
- reset()
- init(use_conf=False)
- dict_rules = {}
- for default in list_policies():
- dict_rules[default.name] = default.check_str
- rules = policy.Rules.from_dict(dict_rules)
- set_rules(rules)
-
-
-def reset():
- global _ENFORCER
- if _ENFORCER:
- _ENFORCER.clear()
- _ENFORCER = None
-
-
-def enforce(context, rule=None, target=None, *args, **kwargs):
- """Check authorization of a rule against the target and credentials.
-
- :param dict context: As much information about the user performing the
- action as possible.
- :param rule: The rule to evaluate.
- :param dict target: As much information about the object being operated
- on as possible.
- :return: ``True`` if the policy allows the action.
- ``False`` if the policy does not allow the action.
- """
- enforcer = init()
- credentials = context.to_dict()
- if target is None:
- target = {'project_id': context.project_id,
- 'user_id': context.user_id}
-
- exc = t_exec.PolicyNotAuthorized
-
- try:
- result = enforcer.enforce(rule, target, credentials,
- do_raise=True, exc=exc, *args, **kwargs)
-
- except t_exec.PolicyNotAuthorized as e:
- result = False
- LOG.exception("%(msg)s, %(rule)s, %(target)s",
- {'msg': str(e), 'rule': rule, 'target': target})
- return result
diff --git a/tricircle/common/request_source.py b/tricircle/common/request_source.py
deleted file mode 100644
index ef380f78..00000000
--- a/tricircle/common/request_source.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from oslo_middleware import base
-from tricircle.common import constants as cons
-import webob
-
-LOG = logging.getLogger(__name__)
-
-
-class RequestSource(base.ConfigurableMiddleware):
- """RequestSource Middleware
-
- This middleware distinguishes the source of the requests. It can find out
- which request is from central Neutron and which is from local Neutron.
-
- This middleware updates the context to put the source of requests
- extracted from headers.
-
- In order to make RequestSource Middleware work, this middleware should
- place after keystoneContext(in etc/neutron/api-paste.ini).
- """
-
- def distinguish_requests_source(self, req):
- source_header = req.headers.get(cons.USER_AGENT, "")
-
- if source_header in cons.REQUEST_SOURCE_TYPE:
- ctx = req.environ['neutron.context']
- ctx.USER_AGENT = source_header
- req.environ['neutron.context'] = ctx
-
- @webob.dec.wsgify
- def __call__(self, req):
- self.distinguish_requests_source(req)
-
- response = req.get_response(self.application)
- return response
diff --git a/tricircle/common/resource_handle.py b/tricircle/common/resource_handle.py
deleted file mode 100644
index 8656f557..00000000
--- a/tricircle/common/resource_handle.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import keystoneauth1.identity.generic as auth_identity
-from keystoneauth1 import session
-
-from neutronclient.common import exceptions as q_exceptions
-from neutronclient.neutron import client as q_client
-from oslo_config import cfg
-from oslo_log import log as logging
-from tricircle.common import constants as cons
-from tricircle.common import exceptions
-
-client_opts = [
- cfg.IntOpt('neutron_timeout',
- default=60,
- help='timeout for neutron client in seconds'),
-]
-cfg.CONF.register_opts(client_opts, group='client')
-
-
-LIST, CREATE, DELETE, GET, ACTION, UPDATE = 1, 2, 4, 8, 16, 32
-operation_index_map = {'list': LIST, 'create': CREATE, 'delete': DELETE,
- 'get': GET, 'action': ACTION, 'update': UPDATE}
-
-policy_rules = ('bandwidth_limit_rule', 'dscp_marking_rule',
- 'minimum_bandwidth_rule')
-
-LOG = logging.getLogger(__name__)
-
-
-def _transform_filters(filters):
- filter_dict = {}
- for query_filter in filters:
- # only eq filter supported at first
- if query_filter['comparator'] != 'eq':
- continue
- key = query_filter['key']
- value = query_filter['value']
- filter_dict[key] = value
- return filter_dict
-
-
-class ResourceHandle(object):
- def __init__(self, auth_url):
- self.auth_url = auth_url
- self.endpoint_url = None
-
- def is_endpoint_url_set(self):
- return self.endpoint_url is not None
-
- def clear_endpoint_url(self):
- self.endpoint_url = None
-
- def update_endpoint_url(self, url):
- self.endpoint_url = url
-
- @staticmethod
- def get_keystone_session(project_id=None):
- kwargs = {
- 'auth_url': cfg.CONF.client.auth_url,
- 'username': cfg.CONF.client.admin_username,
- 'password': cfg.CONF.client.admin_password,
- 'user_domain_name': cfg.CONF.client.admin_user_domain_name,
- 'project_domain_name': cfg.CONF.client.admin_tenant_domain_name}
- if not project_id:
- kwargs['project_name'] = cfg.CONF.client.admin_tenant
- else:
- kwargs['project_id'] = project_id
- auth = auth_identity.Password(**kwargs)
- return session.Session(auth=auth)
-
- @staticmethod
- def get_admin_token(project_id=None):
- return ResourceHandle.get_keystone_session(project_id).get_token()
-
-
-class NeutronResourceHandle(ResourceHandle):
- service_type = cons.ST_NEUTRON
- support_resource = {
- 'network': LIST | CREATE | DELETE | GET | UPDATE,
- 'subnet': LIST | CREATE | DELETE | GET | UPDATE,
- 'port': LIST | CREATE | DELETE | GET | UPDATE,
- 'router': LIST | CREATE | DELETE | ACTION | GET | UPDATE,
- 'security_group': LIST | CREATE | GET | DELETE,
- 'security_group_rule': LIST | CREATE | DELETE,
- 'floatingip': LIST | CREATE | UPDATE | DELETE,
- 'trunk': LIST | CREATE | UPDATE | GET | DELETE | ACTION,
- 'port_chain': LIST | CREATE | DELETE | GET | UPDATE,
- 'port_pair_group': LIST | CREATE | DELETE | GET | UPDATE,
- 'port_pair': LIST | CREATE | DELETE | GET | UPDATE,
- 'flow_classifier': LIST | CREATE | DELETE | GET | UPDATE,
- 'qos_policy': LIST | CREATE | DELETE | GET | UPDATE,
- 'bandwidth_limit_rule': LIST | CREATE | DELETE | GET | UPDATE,
- 'dscp_marking_rule': LIST | CREATE | DELETE | GET | UPDATE,
- 'minimum_bandwidth_rule': LIST | CREATE | DELETE | GET | UPDATE}
-
- def _get_client(self, cxt):
- token = cxt.auth_token
- if not token and cxt.is_admin:
- token = self.get_admin_token(cxt.tenant)
- return q_client.Client('2.0',
- token=token,
- auth_url=self.auth_url,
- endpoint_url=self.endpoint_url,
- timeout=cfg.CONF.client.neutron_timeout)
-
- def handle_list(self, cxt, resource, filters):
- try:
- client = self._get_client(cxt)
- if resource == 'qos_policy':
- collection = 'qos_policies'
- else:
- collection = '%ss' % resource
- search_opts = _transform_filters(filters)
- return [res for res in getattr(
- client, 'list_%s' % collection)(**search_opts)[collection]]
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
-
- def handle_create(self, cxt, resource, *args, **kwargs):
- try:
- client = self._get_client(cxt)
- ret = getattr(client, 'create_%s' % resource)(
- *args, **kwargs)
-
- if resource == 'qos_policy':
- return ret['policy']
-
- if resource in ret:
- return ret[resource]
- else:
- return ret['%ss' % resource]
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
-
- def handle_update(self, cxt, resource, *args, **kwargs):
- try:
- client = self._get_client(cxt)
- if resource == 'qos_policy':
- return getattr(client, 'update_%s' % resource)(
- *args, **kwargs)['policy']
- return getattr(client, 'update_%s' % resource)(
- *args, **kwargs)[resource]
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
-
- def handle_get(self, cxt, resource, resource_id):
- try:
- client = self._get_client(cxt)
- if resource == 'qos_policy':
- return getattr(client, 'show_%s' % resource)(
- resource_id)['policy']
- if resource in policy_rules:
- (rule_id, policy_id) = resource_id.split('#')
- return getattr(client, 'show_%s' % resource)(
- rule_id, policy_id)[resource]
- return getattr(client, 'show_%s' % resource)(resource_id)[resource]
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
- except q_exceptions.NotFound:
- LOG.debug("%(resource)s %(resource_id)s not found",
- {'resource': resource, 'resource_id': resource_id})
-
- def handle_delete(self, cxt, resource, resource_id):
- try:
- client = self._get_client(cxt)
- if resource in policy_rules:
- (rule_id, policy_id) = resource_id.split('#')
- return getattr(client, 'delete_%s' % resource)(
- rule_id, policy_id)
- return getattr(client, 'delete_%s' % resource)(resource_id)
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
- except q_exceptions.NotFound:
- LOG.debug("Delete %(resource)s %(resource_id)s which not found",
- {'resource': resource, 'resource_id': resource_id})
-
- def handle_action(self, cxt, resource, action, *args, **kwargs):
- try:
- client = self._get_client(cxt)
- func_name = '%s_%s' % (action, resource)
- if not hasattr(client, func_name):
- func_name = '%s_%s' % (resource, action)
- return getattr(client, func_name)(*args, **kwargs)
- except q_exceptions.ConnectionFailed:
- raise exceptions.EndpointNotAvailable(
- 'neutron', client.httpclient.endpoint_url)
-
-
-def _convert_into_with_meta(item, resp):
- return resp, item
diff --git a/tricircle/common/restapp.py b/tricircle/common/restapp.py
deleted file mode 100644
index a5d07ab8..00000000
--- a/tricircle/common/restapp.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystonemiddleware import auth_token
-from oslo_config import cfg
-from oslo_middleware import request_id
-from oslo_service import service
-
-from tricircle.common import exceptions as t_exceptions
-from tricircle.common.i18n import _
-
-
-def auth_app(app):
- app = request_id.RequestId(app)
-
- if cfg.CONF.auth_strategy == 'noauth':
- pass
- elif cfg.CONF.auth_strategy == 'keystone':
- # NOTE(zhiyuan) pkg_resources will try to load tricircle to get module
- # version, passing "project" as empty string to bypass it
- app = auth_token.AuthProtocol(app, {'project': ''})
- else:
- raise t_exceptions.InvalidConfigurationOption(
- opt_name='auth_strategy', opt_value=cfg.CONF.auth_strategy)
-
- return app
-
-
-_launcher = None
-
-
-def serve(api_service, conf, workers=1):
- global _launcher
- if _launcher:
- raise RuntimeError(_('serve() can only be called once'))
-
- _launcher = service.ProcessLauncher(conf, restart_method='mutate')
- _launcher.launch_service(api_service, workers=workers)
-
-
-def wait():
- _launcher.wait()
diff --git a/tricircle/common/rpc.py b/tricircle/common/rpc.py
deleted file mode 100644
index 91500ed2..00000000
--- a/tricircle/common/rpc.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# copy and modify from Nova
-
-__all__ = [
- 'init',
- 'cleanup',
- 'set_defaults',
- 'add_extra_exmods',
- 'clear_extra_exmods',
- 'get_allowed_exmods',
- 'RequestContextSerializer',
- 'get_client',
- 'get_server',
- 'get_notifier',
-]
-
-from oslo_config import cfg
-import oslo_messaging as messaging
-from oslo_serialization import jsonutils
-
-from oslo_messaging.rpc import dispatcher
-
-import tricircle.common.context
-import tricircle.common.exceptions
-
-CONF = cfg.CONF
-TRANSPORT = None
-NOTIFIER = None
-
-ALLOWED_EXMODS = [
- tricircle.common.exceptions.__name__,
-]
-EXTRA_EXMODS = []
-
-
-def init(conf):
- global TRANSPORT, NOTIFIER
- exmods = get_allowed_exmods()
- TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods)
- serializer = RequestContextSerializer(JsonPayloadSerializer())
- NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
-
-
-def cleanup():
- global TRANSPORT, NOTIFIER
- assert TRANSPORT is not None
- assert NOTIFIER is not None
- TRANSPORT.cleanup()
- TRANSPORT = NOTIFIER = None
-
-
-def set_defaults(control_exchange):
- messaging.set_transport_defaults(control_exchange)
-
-
-def add_extra_exmods(*args):
- EXTRA_EXMODS.extend(args)
-
-
-def clear_extra_exmods():
- del EXTRA_EXMODS[:]
-
-
-def get_allowed_exmods():
- return ALLOWED_EXMODS + EXTRA_EXMODS
-
-
-class JsonPayloadSerializer(messaging.NoOpSerializer):
- @staticmethod
- def serialize_entity(context, entity):
- return jsonutils.to_primitive(entity, convert_instances=True)
-
-
-class RequestContextSerializer(messaging.Serializer):
-
- def __init__(self, base):
- self._base = base
-
- def serialize_entity(self, context, entity):
- if not self._base:
- return entity
- return self._base.serialize_entity(context, entity)
-
- def deserialize_entity(self, context, entity):
- if not self._base:
- return entity
- return self._base.deserialize_entity(context, entity)
-
- def serialize_context(self, context):
- return context.to_dict()
-
- def deserialize_context(self, context):
- return tricircle.common.context.Context.from_dict(context)
-
-
-def get_transport_url(url_str=None):
- return messaging.TransportURL.parse(CONF, url_str)
-
-
-def get_client(target, version_cap=None, serializer=None):
- assert TRANSPORT is not None
- serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer)
-
-
-def get_server(target, endpoints, serializer=None):
- assert TRANSPORT is not None
- access_policy = dispatcher.DefaultRPCAccessPolicy
- serializer = RequestContextSerializer(serializer)
- return messaging.get_rpc_server(TRANSPORT,
- target,
- endpoints,
- executor='eventlet',
- serializer=serializer,
- access_policy=access_policy)
-
-
-def get_notifier(service, host=None, publisher_id=None):
- assert NOTIFIER is not None
- if not publisher_id:
- publisher_id = "%s.%s" % (service, host or CONF.host)
- return NOTIFIER.prepare(publisher_id=publisher_id)
diff --git a/tricircle/common/serializer.py b/tricircle/common/serializer.py
deleted file mode 100644
index 2d40108d..00000000
--- a/tricircle/common/serializer.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import six
-
-from oslo_messaging import serializer
-
-ATTR_NOT_SPECIFIED = object()
-
-
-class Mapping(object):
- def __init__(self, mapping):
- self.direct_mapping = mapping
- self.reverse_mapping = {}
- for key, value in six.iteritems(mapping):
- self.reverse_mapping[value] = key
-
-
-_SINGLETON_MAPPING = Mapping({
- ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
-})
-
-
-class TricircleSerializer(serializer.Serializer):
- def __init__(self, base=None):
- super(TricircleSerializer, self).__init__()
- self._base = base
-
- def serialize_entity(self, context, entity):
- if isinstance(entity, dict):
- for key, value in six.iteritems(entity):
- entity[key] = self.serialize_entity(context, value)
-
- elif isinstance(entity, list):
- for i, item in enumerate(entity):
- entity[i] = self.serialize_entity(context, item)
-
- elif entity in _SINGLETON_MAPPING.direct_mapping:
- entity = _SINGLETON_MAPPING.direct_mapping[entity]
-
- if self._base is not None:
- entity = self._base.serialize_entity(context, entity)
-
- return entity
-
- def deserialize_entity(self, context, entity):
- if isinstance(entity, dict):
- for key, value in six.iteritems(entity):
- entity[key] = self.deserialize_entity(context, value)
-
- elif isinstance(entity, list):
- for i, item in enumerate(entity):
- entity[i] = self.deserialize_entity(context, item)
-
- elif entity in _SINGLETON_MAPPING.reverse_mapping:
- entity = _SINGLETON_MAPPING.reverse_mapping[entity]
-
- if self._base is not None:
- entity = self._base.deserialize_entity(context, entity)
-
- return entity
-
- def serialize_context(self, context):
- if self._base is not None:
- context = self._base.serialize_context(context)
-
- return context
-
- def deserialize_context(self, context):
- if self._base is not None:
- context = self._base.deserialize_context(context)
-
- return context
diff --git a/tricircle/common/topics.py b/tricircle/common/topics.py
deleted file mode 100644
index afff6d09..00000000
--- a/tricircle/common/topics.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-CREATE = 'create'
-DELETE = 'delete'
-UPDATE = 'update'
-
-TOPIC_XJOB = 'xjob'
diff --git a/tricircle/common/utils.py b/tricircle/common/utils.py
deleted file mode 100644
index f8f71765..00000000
--- a/tricircle/common/utils.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import six
-
-import pecan
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from tricircle.common import constants as cons
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-
-
-def get_import_path(cls):
- return cls.__module__ + "." + cls.__name__
-
-
-def get_ag_name(region_name):
- return 'ag_%s' % region_name
-
-
-def get_az_name(region_name):
- return 'az_%s' % region_name
-
-
-def get_node_name(region_name):
- return "cascade_%s" % region_name
-
-
-def validate_required_fields_set(body, fields):
- for field in fields:
- if field not in body:
- return False
- return True
-
-
-TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
-FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
-
-
-def is_valid_boolstr(val):
- """Check if the provided string is a valid bool string or not."""
- val = str(val).lower()
- return (val in TRUE_STRINGS) or (val in FALSE_STRINGS)
-
-
-def bool_from_string(subject, strict=False, default=False):
- """Interpret a string as a boolean.
-
- A case-insensitive match is performed such that strings matching 't',
- 'true', 'on', 'y', 'yes', or '1' are considered True and, when
- `strict=False`, anything else returns the value specified by 'default'.
- Useful for JSON-decoded stuff and config file parsing.
- If `strict=True`, unrecognized values, including None, will raise a
- ValueError which is useful when parsing values passed in from an API call.
- Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
- """
-
- if not isinstance(subject, six.string_types):
- subject = six.text_type(subject)
-
- lowered = subject.strip().lower()
-
- if lowered in TRUE_STRINGS:
- return True
- elif lowered in FALSE_STRINGS:
- return False
- elif strict:
- acceptable = ', '.join(
- "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
- msg = _("Unrecognized value '%(val)s', acceptable values are:"
- " %(acceptable)s") % {'val': subject,
- 'acceptable': acceptable}
- raise ValueError(msg)
- else:
- return default
-
-
-def check_string_length(value, name=None, min_len=0, max_len=None):
- """Check the length of specified string
-
- :param value: the value of the string
- :param name: the name of the string
- :param min_len: the minimum length of the string
- :param max_len: the maximum length of the string
-
- """
- if not isinstance(value, six.string_types):
- if name is None:
- msg = _("The input is not a string or unicode")
- else:
- msg = _("%s is not a string or unicode") % name
- raise t_exceptions.InvalidInput(message=msg)
-
- if name is None:
- name = value
-
- if len(value) < min_len:
- msg = _("%(name)s has a minimum character requirement of "
- "%(min_length)s.") % {'name': name, 'min_length': min_len}
- raise t_exceptions.InvalidInput(message=msg)
-
- if max_len and len(value) > max_len:
- msg = _("%(name)s has more than %(max_length)s "
- "characters.") % {'name': name, 'max_length': max_len}
- raise t_exceptions.InvalidInput(message=msg)
-
-
-def get_bottom_network_name(network):
- return '%s#%s' % (network['id'], network['name'])
-
-
-def get_id_from_name(_type, name):
- if _type == cons.RT_NETWORK:
- tokens = name.split('#')
- if len(tokens) == 2:
- id_candidate = tokens[1]
- else:
- id_candidate = tokens[0]
- else:
- id_candidate = name
- if uuidutils.is_uuid_like(id_candidate):
- return id_candidate
- else:
- return None
-
-
-def format_error(code, message, error_type=None):
- error_type_map = {400: 'badRequest',
- 403: 'forbidden',
- 404: 'itemNotFound',
- 409: 'conflictingRequest',
- 500: 'internalServerError'}
- pecan.response.status = code
- if not error_type:
- if code in error_type_map:
- error_type = error_type_map[code]
- else:
- error_type = 'Error'
- # format error message in this form so nova client can
- # correctly parse it
- return {error_type: {'message': message, 'code': code}}
-
-
-def format_api_error(code, message, error_type=None):
- return format_error(code, message, error_type)
-
-
-def format_nova_error(code, message, error_type=None):
- return format_error(code, message, error_type)
-
-
-def format_cinder_error(code, message, error_type=None):
- return format_error(code, message, error_type)
-
-
-def get_pagination_limit(_limit):
- """Return page size limitation.
-
- :param _limit: page size from the client.
- :return limit: limit sets the page size. If the client requests a limit
- beyond the maximum limit in configuration or sets invalid value,
- then the maximum limit will be used. If client doesn't set page limit,
- maximum pagination limit will be used to control the page size.
- """
- max_limit = cfg.CONF.pagination_max_limit
- limit = min(_limit, max_limit) if _limit > 0 else max_limit
-
- return limit
diff --git a/tricircle/common/version.py b/tricircle/common/version.py
deleted file mode 100644
index cf4331cc..00000000
--- a/tricircle/common/version.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-version_info = "tricircle 1.0"
diff --git a/tricircle/common/xrpcapi.py b/tricircle/common/xrpcapi.py
deleted file mode 100644
index 8a8b8805..00000000
--- a/tricircle/common/xrpcapi.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Client side of the job daemon RPC API.
-"""
-
-from oslo_config import cfg
-import oslo_messaging as messaging
-
-from tricircle.common import constants
-from tricircle.common import rpc
-from tricircle.common import serializer as t_serializer
-from tricircle.common import topics
-import tricircle.db.api as db_api
-
-
-CONF = cfg.CONF
-
-rpcapi_cap_opt = cfg.StrOpt('xjobapi',
- default='1.0',
- help='Set a version cap for messages sent to the'
- 'xjob api in any service')
-CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
-
-
-class XJobAPI(object):
-
- """Client side of the xjob rpc API.
-
- API version history:
- * 1.0 - Initial version.
- """
-
- VERSION_ALIASES = {
- 'mitaka': '1.0',
- }
-
- def __init__(self):
- super(XJobAPI, self).__init__()
-
- rpc.init(CONF)
- target = messaging.Target(topic=topics.TOPIC_XJOB, version='1.0')
- upgrade_level = CONF.upgrade_levels.xjobapi
- version_cap = 1.0
- if upgrade_level == 'auto':
- version_cap = self._determine_version_cap(target)
- else:
- version_cap = self.VERSION_ALIASES.get(upgrade_level,
- upgrade_level)
- serializer = t_serializer.TricircleSerializer()
- self.client = rpc.get_client(target,
- version_cap=version_cap,
- serializer=serializer)
-
- # to do the version compatibility for future purpose
- def _determine_version_cap(self, target):
- version_cap = 1.0
- return version_cap
-
- def invoke_method(self, ctxt, project_id, method, _type, id):
- db_api.new_job(ctxt, project_id, _type, id)
- self.client.prepare(exchange='openstack').cast(
- ctxt, method, payload={_type: id})
-
- def setup_bottom_router(self, ctxt, project_id, net_id, router_id, pod_id):
- self.invoke_method(
- ctxt, project_id, constants.job_handles[constants.JT_ROUTER_SETUP],
- constants.JT_ROUTER_SETUP,
- '%s#%s#%s' % (pod_id, router_id, net_id))
-
- def configure_route(self, ctxt, project_id, router_id):
- # NOTE(zhiyuan) this RPC is called by plugin in Neutron server, whose
- # control exchange is "neutron", however, we starts xjob without
- # specifying its control exchange, so the default value "openstack" is
- # used, thus we need to pass exchange as "openstack" here.
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_CONFIGURE_ROUTE],
- constants.JT_CONFIGURE_ROUTE, router_id)
-
- def delete_server_port(self, ctxt, project_id, port_id, pod_id):
- self.invoke_method(
- ctxt, project_id, constants.job_handles[constants.JT_PORT_DELETE],
- constants.JT_PORT_DELETE,
- '%s#%s' % (pod_id, port_id))
-
- def configure_security_group_rules(self, ctxt, project_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_SEG_RULE_SETUP],
- constants.JT_SEG_RULE_SETUP, project_id)
-
- def update_network(self, ctxt, project_id, network_id, pod_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_NETWORK_UPDATE],
- constants.JT_NETWORK_UPDATE,
- '%s#%s' % (pod_id, network_id))
-
- def update_subnet(self, ctxt, project_id, subnet_id, pod_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_SUBNET_UPDATE],
- constants.JT_SUBNET_UPDATE,
- '%s#%s' % (pod_id, subnet_id))
-
- def setup_shadow_ports(self, ctxt, project_id, pod_id, net_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_SHADOW_PORT_SETUP],
- constants.JT_SHADOW_PORT_SETUP, '%s#%s' % (pod_id, net_id))
-
- def sync_trunk(self, t_ctx, project_id, trunk_id, pod_id):
- self.invoke_method(
- t_ctx, project_id, constants.job_handles[constants.JT_TRUNK_SYNC],
- constants.JT_TRUNK_SYNC, '%s#%s' % (pod_id, trunk_id))
-
- def sync_service_function_chain(self, ctxt, project_id, portchain_id,
- net_id, pod_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_SFC_SYNC],
- constants.JT_SFC_SYNC,
- '%s#%s#%s' % (pod_id, portchain_id, net_id))
-
- def recycle_resources(self, ctxt, project_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_RESOURCE_RECYCLE],
- constants.JT_RESOURCE_RECYCLE, project_id)
-
- def create_qos_policy(self, ctxt, project_id, policy_id, pod_id,
- res_type, res_id=None):
- self.invoke_method(
- ctxt, project_id, constants.job_handles[constants.JT_QOS_CREATE],
- constants.JT_QOS_CREATE, '%s#%s#%s#%s' % (pod_id, policy_id,
- res_type, res_id))
-
- def update_qos_policy(self, ctxt, project_id, policy_id, pod_id):
- self.invoke_method(
- ctxt, project_id, constants.job_handles[constants.JT_QOS_UPDATE],
- constants.JT_QOS_UPDATE, '%s#%s' % (pod_id, policy_id))
-
- def delete_qos_policy(self, ctxt, project_id, policy_id, pod_id):
- self.invoke_method(
- ctxt, project_id, constants.job_handles[constants.JT_QOS_DELETE],
- constants.JT_QOS_DELETE, '%s#%s' % (pod_id, policy_id))
-
- def sync_qos_policy_rules(self, ctxt, project_id, policy_id):
- self.invoke_method(
- ctxt, project_id,
- constants.job_handles[constants.JT_SYNC_QOS_RULE],
- constants.JT_SYNC_QOS_RULE, policy_id)
diff --git a/tricircle/db/__init__.py b/tricircle/db/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/db/api.py b/tricircle/db/api.py
deleted file mode 100644
index a57dedc5..00000000
--- a/tricircle/db/api.py
+++ /dev/null
@@ -1,753 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import functools
-import sqlalchemy as sql
-from sqlalchemy import or_
-import time
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-from tricircle.common.context import is_admin_context as _is_admin_context
-from tricircle.common import exceptions
-from tricircle.common.i18n import _
-
-from tricircle.db import core
-from tricircle.db import models
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-def db_test_stub(*args):
- pass
-
-
-def create_pod(context, pod_dict):
- with context.session.begin():
- return core.create_resource(context, models.Pod, pod_dict)
-
-
-def delete_pod(context, pod_id):
- with context.session.begin():
- return core.delete_resource(context, models.Pod, pod_id)
-
-
-def get_pod(context, pod_id):
- with context.session.begin():
- return core.get_resource(context, models.Pod, pod_id)
-
-
-def list_pods(context, filters=None, sorts=None):
- return core.query_resource(context, models.Pod, filters or [],
- sorts or [])
-
-
-def update_pod(context, pod_id, update_dict):
- with context.session.begin():
- return core.update_resource(context, models.Pod, pod_id, update_dict)
-
-
-def create_cached_endpoints(context, config_dict):
- with context.session.begin():
- return core.create_resource(context, models.CachedEndpoint,
- config_dict)
-
-
-def delete_cached_endpoints(context, config_id):
- with context.session.begin():
- return core.delete_resource(context, models.CachedEndpoint,
- config_id)
-
-
-def get_cached_endpoints(context, config_id):
- with context.session.begin():
- return core.get_resource(context, models.CachedEndpoint,
- config_id)
-
-
-def list_cached_endpoints(context, filters=None, sorts=None):
- return core.query_resource(context, models.CachedEndpoint,
- filters or [], sorts or [])
-
-
-def update_cached_endpoints(context, config_id, update_dict):
- with context.session.begin():
- return core.update_resource(
- context, models.CachedEndpoint, config_id, update_dict)
-
-
-def create_resource_mapping(context, top_id, bottom_id, pod_id, project_id,
- resource_type):
- try:
- context.session.begin()
- route = core.create_resource(context, models.ResourceRouting,
- {'top_id': top_id,
- 'bottom_id': bottom_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': resource_type})
- context.session.commit()
- return route
- except db_exc.DBDuplicateEntry:
- # entry has already been created
- context.session.rollback()
- return None
- finally:
- context.session.close()
-
-
-def list_resource_routings(context, filters=None, limit=None, marker=None,
- sorts=None):
- """Return a list of limited number of resource routings
-
- :param context:
- :param filters: list of filter dict with key 'key', 'comparator', 'value'
- :param limit: an integer that limits the maximum number of items
- returned in a single response
- :param marker: id of the last item in the previous list
- :param sorts: a list of (sort_key, sort_dir) pair,
- for example, [('id', 'desc')]
- :return: a list of limited number of items
- """
- with context.session.begin():
- return core.paginate_query(context, models.ResourceRouting,
- limit,
- models.ResourceRouting(
- id=marker) if marker else None,
- filters or [], sorts or [])
-
-
-def get_resource_routing(context, id):
- with context.session.begin():
- return core.get_resource(context, models.ResourceRouting, id)
-
-
-def delete_resource_routing(context, id):
- with context.session.begin():
- return core.delete_resource(context, models.ResourceRouting, id)
-
-
-def update_resource_routing(context, id, update_dict):
- with context.session.begin():
- return core.update_resource(context, models.ResourceRouting, id,
- update_dict)
-
-
-def get_bottom_mappings_by_top_id(context, top_id, resource_type):
- """Get resource id and pod name on bottom
-
- :param context: context object
- :param top_id: resource id on top
- :param resource_type: resource type
- :return: a list of tuple (pod dict, bottom_id)
- """
- route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id},
- {'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource_type}]
- mappings = []
- with context.session.begin():
- routes = core.query_resource(
- context, models.ResourceRouting, route_filters, [])
- for route in routes:
- if not route['bottom_id']:
- continue
- pod = core.get_resource(context, models.Pod, route['pod_id'])
- mappings.append((pod, route['bottom_id']))
- return mappings
-
-
-def delete_pre_created_resource_mapping(context, name):
- with context.session.begin():
- entries = core.query_resource(
- context, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': name}], sorts=[])
- if entries:
- core.delete_resources(
- context, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': entries[0]['bottom_id']}])
- core.delete_resource(context, models.ResourceRouting,
- entries[0]['id'])
-
-
-def get_pod_by_top_id(context, _id):
- """Get pod resource from pod table by top id of resource
-
- :param context: context object
- :param _id: the top id of resource
- :returns: pod resource
- """
- route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id}]
- with context.session.begin():
- routes = core.query_resource(
- context, models.ResourceRouting, route_filters, [])
- if not routes or len(routes) != 1:
- return None
- route = routes[0]
- if not route['bottom_id']:
- return None
- return core.get_resource(context, models.Pod, route['pod_id'])
-
-
-def get_bottom_id_by_top_id_region_name(context, top_id,
- region_name, resource_type):
- """Get resource bottom id by top id and bottom pod name
-
- :param context: context object
- :param top_id: resource id on top
- :param region_name: name of bottom pod
- :param resource_type: resource type
- :return:
- """
- mappings = get_bottom_mappings_by_top_id(context, top_id, resource_type)
- for pod, bottom_id in mappings:
- if pod['region_name'] == region_name:
- return bottom_id
- return None
-
-
-def get_bottom_mappings_by_tenant_pod(context,
- tenant_id,
- pod_id,
- resource_type):
- """Get resource routing for specific tenant and pod
-
- :param context: context object
- :param tenant_id: tenant id to look up
- :param pod_id: pod to look up
- :param resource_type: specific resource
- :return: a dic {top_id : route}
- """
- route_filters = [{'key': 'pod_id',
- 'comparator': 'eq',
- 'value': pod_id},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': tenant_id},
- {'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource_type}]
- routings = {}
- with context.session.begin():
- routes = core.query_resource(
- context, models.ResourceRouting, route_filters, [])
- for _route in routes:
- if not _route['bottom_id']:
- continue
- routings[_route['top_id']] = _route
- return routings
-
-
-def delete_mappings_by_top_id(context, top_id, pod_id=None):
- """Delete resource routing entry based on top resource ID
-
- If pod ID is also provided, only entry in the specific pod will be deleted
-
- :param context: context object
- :param top_id: top resource ID
- :param pod_id: optional pod ID
- :return: None
- """
- filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id}]
- if pod_id:
- filters.append({'key': 'pod_id', 'comparator': 'eq', 'value': pod_id})
- with context.session.begin():
- core.delete_resources(context, models.ResourceRouting, filters=filters)
-
-
-def delete_mappings_by_bottom_id(context, bottom_id):
- with context.session.begin():
- core.delete_resources(
- context, models.ResourceRouting,
- filters=[{'key': 'bottom_id', 'comparator': 'eq',
- 'value': bottom_id}])
-
-
-def get_next_bottom_pod(context, current_pod_id=None):
- pods = list_pods(context, sorts=[(models.Pod.pod_id, True)])
- # NOTE(zhiyuan) number of pods is small, just traverse to filter top pod
- pods = [pod for pod in pods if pod['az_name']]
- for index, pod in enumerate(pods):
- if not current_pod_id:
- return pod
- if pod['pod_id'] == current_pod_id and index < len(pods) - 1:
- return pods[index + 1]
- return None
-
-
-def get_top_pod(context):
-
- filters = [{'key': 'az_name', 'comparator': 'eq', 'value': ''}]
- pods = list_pods(context, filters=filters)
-
- # only one should be searched
- for pod in pods:
- if (pod['region_name'] != '') and \
- (pod['az_name'] == ''):
- return pod
-
- return None
-
-
-def get_pod_by_name(context, region_name):
-
- filters = [{'key': 'region_name',
- 'comparator': 'eq', 'value': region_name}]
- pods = list_pods(context, filters=filters)
-
- # only one should be searched
- for pod in pods:
- if pod['region_name'] == region_name:
- return pod
-
- return None
-
-
-def find_pods_by_az_or_region(context, az_or_region):
- # if az_or_region is None or empty, returning None value directly.
- if not az_or_region:
- return None
- query = context.session.query(models.Pod)
- query = query.filter(or_(models.Pod.region_name == az_or_region,
- models.Pod.az_name == az_or_region))
-
- return [obj.to_dict() for obj in query]
-
-
-def find_pod_by_az_or_region(context, az_or_region):
- pods = find_pods_by_az_or_region(context, az_or_region)
-
- # if pods is None, returning None value directly.
- if pods is None:
- return None
- # if no pod is matched, then we will raise an exception
- if len(pods) < 1:
- raise exceptions.PodNotFound(az_or_region)
- # if the pods list only contain one pod, then this pod will be
- # returned back
- if len(pods) == 1:
- return pods[0]
- # if the pods list contains more than one pod, then we will raise an
- # exception
- if len(pods) > 1:
- raise exceptions.InvalidInput(
- reason='Multiple pods with the same az_name are found')
-
-
-def new_job(context, project_id, _type, resource_id):
- with context.session.begin():
- job_dict = {'id': uuidutils.generate_uuid(),
- 'type': _type,
- 'status': constants.JS_New,
- 'project_id': project_id,
- 'resource_id': resource_id,
- 'extra_id': uuidutils.generate_uuid()}
- job = core.create_resource(context,
- models.AsyncJob, job_dict)
- return job
-
-
-def register_job(context, project_id, _type, resource_id):
- try:
- context.session.begin()
- job_dict = {'id': uuidutils.generate_uuid(),
- 'type': _type,
- 'status': constants.JS_Running,
- 'project_id': project_id,
- 'resource_id': resource_id,
- 'extra_id': constants.SP_EXTRA_ID}
- job = core.create_resource(context,
- models.AsyncJob, job_dict)
- context.session.commit()
- return job
- except db_exc.DBDuplicateEntry:
- context.session.rollback()
- return None
- except db_exc.DBDeadlock:
- context.session.rollback()
- return None
- finally:
- context.session.close()
-
-
-def get_latest_failed_or_new_jobs(context):
- current_timestamp = timeutils.utcnow()
- time_span = datetime.timedelta(seconds=CONF.redo_time_span)
- latest_timestamp = current_timestamp - time_span
- failed_jobs = []
- new_jobs = []
-
- # first we group the jobs by type and resource id, and in each group we
- # pick the latest timestamp
- stmt = context.session.query(
- models.AsyncJob.type, models.AsyncJob.resource_id,
- sql.func.max(models.AsyncJob.timestamp).label('timestamp'))
- stmt = stmt.filter(models.AsyncJob.timestamp >= latest_timestamp)
- stmt = stmt.group_by(models.AsyncJob.type,
- models.AsyncJob.resource_id).subquery()
-
- # then we join the result with the original table and group again, in each
- # group, we pick the "minimum" of the status, for status, the ascendant
- # sort sequence is "0_Fail", "1_Success", "2_Running", "3_New"
- query = context.session.query(models.AsyncJob.type,
- models.AsyncJob.resource_id,
- models.AsyncJob.project_id,
- sql.func.min(models.AsyncJob.status)).join(
- stmt, sql.and_(models.AsyncJob.type == stmt.c.type,
- models.AsyncJob.resource_id == stmt.c.resource_id,
- models.AsyncJob.timestamp == stmt.c.timestamp))
- query = query.group_by(models.AsyncJob.project_id,
- models.AsyncJob.type,
- models.AsyncJob.resource_id)
-
- for job_type, resource_id, project_id, status in query:
- if status == constants.JS_Fail:
- failed_jobs.append({'type': job_type, 'resource_id': resource_id,
- 'project_id': project_id})
- elif status == constants.JS_New:
- new_jobs.append({'type': job_type, 'resource_id': resource_id,
- 'project_id': project_id})
- return failed_jobs, new_jobs
-
-
-def get_job(context, job_id):
- with context.session.begin():
- return core.get_resource(context, models.AsyncJob, job_id)
-
-
-def get_job_from_log(context, job_id):
- with context.session.begin():
- return core.get_resource(context, models.AsyncJobLog, job_id)
-
-
-def delete_job(context, job_id):
- with context.session.begin():
- return core.delete_resource(context, models.AsyncJob, job_id)
-
-
-def list_jobs(context, filters=None, sorts=None, limit=None, marker=None):
- with context.session.begin():
- marker_obj = None
- if marker is not None:
- marker_obj = context.session.query(models.AsyncJob).filter(
- models.AsyncJob.id == marker).first()
- return core.paginate_query(
- context, models.AsyncJob, limit, marker_obj,
- filters or [], sorts or [])
-
-
-def list_jobs_from_log(context, filters=None, sorts=None,
- limit=None, marker=None):
- with context.session.begin():
- marker_obj = None
- if marker is not None:
- marker_obj = context.session.query(models.AsyncJobLog).filter(
- models.AsyncJobLog.id == marker).first()
-
- filter_is_success = True
- if filters is not None and len(filters) > 0:
- for filter in filters:
- if filter.get('key') == 'status':
- job_status = filter['value']
- # job entry in job log table has no
- # status attribute.
- if job_status == constants.JS_Success:
- filters.remove(filter)
- else:
- filter_is_success = False
- break
- if filter_is_success:
- return core.paginate_query(context, models.AsyncJobLog, limit,
- marker_obj,
- filters or [], sorts or [])
- return []
-
-
-def get_latest_job(context, status, _type, resource_id):
- jobs = core.query_resource(
- context, models.AsyncJob,
- [{'key': 'status', 'comparator': 'eq', 'value': status},
- {'key': 'type', 'comparator': 'eq', 'value': _type},
- {'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}],
- [('timestamp', False)])
- if jobs:
- return jobs[0]
- else:
- return None
-
-
-def get_running_job(context, _type, resource_id):
- jobs = core.query_resource(
- context, models.AsyncJob,
- [{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id},
- {'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running},
- {'key': 'type', 'comparator': 'eq', 'value': _type}], [])
- if jobs:
- return jobs[0]
- else:
- return None
-
-
-def finish_job(context, job_id, successful, timestamp):
- status = constants.JS_Success if successful else constants.JS_Fail
- retries = 5
- for i in range(retries + 1):
- try:
- with context.session.begin():
- db_test_stub(i)
- job_dict = {'status': status,
- 'timestamp': timestamp,
- 'extra_id': uuidutils.generate_uuid()}
- job = core.update_resource(context, models.AsyncJob, job_id,
- job_dict)
- if status == constants.JS_Success:
- log_dict = {'id': uuidutils.generate_uuid(),
- 'type': job['type'],
- 'project_id': job['project_id'],
- 'timestamp': timestamp,
- 'resource_id': job['resource_id']}
- context.session.query(models.AsyncJob).filter(
- sql.and_(
- models.AsyncJob.type == job['type'],
- models.AsyncJob.resource_id == job['resource_id'],
- models.AsyncJob.timestamp <= timestamp)).delete(
- synchronize_session=False)
- core.create_resource(context, models.AsyncJobLog, log_dict)
- else:
- # sqlite has problem handling "<" operator on timestamp,
- # so we slide the timestamp a bit and use "<="
- timestamp = timestamp - datetime.timedelta(microseconds=1)
- context.session.query(models.AsyncJob).filter(
- sql.and_(
- models.AsyncJob.type == job['type'],
- models.AsyncJob.resource_id == job['resource_id'],
- models.AsyncJob.timestamp <= timestamp)).delete(
- synchronize_session=False)
- except db_exc.DBDeadlock:
- if i == retries:
- raise
- time.sleep(1)
- continue
- return
-
-
-def ensure_agent_exists(context, pod_id, host, _type, tunnel_ip):
- try:
- context.session.begin()
- agents = core.query_resource(
- context, models.ShadowAgent,
- [{'key': 'host', 'comparator': 'eq', 'value': host},
- {'key': 'type', 'comparator': 'eq', 'value': _type}], [])
- if agents:
- return
- core.create_resource(context, models.ShadowAgent,
- {'id': uuidutils.generate_uuid(),
- 'pod_id': pod_id,
- 'host': host,
- 'type': _type,
- 'tunnel_ip': tunnel_ip})
- context.session.commit()
- except db_exc.DBDuplicateEntry:
- # agent has already been created
- context.session.rollback()
- finally:
- context.session.close()
-
-
-def get_agent_by_host_type(context, host, _type):
- agents = core.query_resource(
- context, models.ShadowAgent,
- [{'key': 'host', 'comparator': 'eq', 'value': host},
- {'key': 'type', 'comparator': 'eq', 'value': _type}], [])
- return agents[0] if agents else None
-
-
-def _is_user_context(context):
- """Indicates if the request context is a normal user."""
- if not context:
- return False
- if context.is_admin:
- return False
- if not context.user_id or not context.project_id:
- return False
- return True
-
-
-def authorize_project_context(context, project_id):
- """Ensures a request has permission to access the given project."""
- if _is_user_context(context):
- if not context.project_id:
- raise exceptions.NotAuthorized()
- elif context.project_id != project_id:
- raise exceptions.NotAuthorized()
-
-
-def authorize_user_context(context, user_id):
- """Ensures a request has permission to access the given user."""
- if _is_user_context(context):
- if not context.user_id:
- raise exceptions.NotAuthorized()
- elif context.user_id != user_id:
- raise exceptions.NotAuthorized()
-
-
-def require_admin_context(f):
- """Decorator to require admin request context.
-
- The first argument to the wrapped function must be the context.
-
- """
-
- def wrapper(*args, **kwargs):
- if not _is_admin_context(args[0]):
- raise exceptions.AdminRequired()
- return f(*args, **kwargs)
- return wrapper
-
-
-def require_context(f):
- """Decorator to require *any* user or admin context.
-
- This does no authorization for user or project access matching, see
- :py:func:`authorize_project_context` and
- :py:func:`authorize_user_context`.
-
- The first argument to the wrapped function must be the context.
-
- """
-
- def wrapper(*args, **kwargs):
- if not _is_admin_context(args[0]) and not _is_user_context(args[0]):
- raise exceptions.NotAuthorized()
- return f(*args, **kwargs)
- return wrapper
-
-
-def _retry_on_deadlock(f):
- """Decorator to retry a DB API call if Deadlock was received."""
- @functools.wraps(f)
- def wrapped(*args, **kwargs):
- while True:
- try:
- return f(*args, **kwargs)
- except db_exc.DBDeadlock:
- LOG.warning("Deadlock detected when running "
- "'%(func_name)s': Retrying...",
- dict(func_name=f.__name__))
- # Retry!
- time.sleep(0.5)
- continue
- functools.update_wrapper(wrapped, f)
- return wrapped
-
-
-def handle_db_data_error(f):
- def wrapper(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except db_exc.DBDataError:
- msg = _('Error writing field to database')
- LOG.exception(msg)
- raise exceptions.Invalid(msg)
- except Exception as e:
- LOG.exception(str(e))
- raise
-
- return wrapper
-
-
-def model_query(context, *args, **kwargs):
- """Query helper that accounts for context's `read_deleted` field.
-
- :param context: context to query under
- :param session: if present, the session to use
- :param read_deleted: if present, overrides context's read_deleted field.
- :param project_only: if present and context is user-type, then restrict
- query to match the context's project_id.
- """
- session = kwargs.get('session') or context.session
- read_deleted = kwargs.get('read_deleted') or context.read_deleted
- project_only = kwargs.get('project_only')
-
- query = session.query(*args)
-
- if read_deleted == 'no':
- query = query.filter_by(deleted=False)
- elif read_deleted == 'yes':
- pass # omit the filter to include deleted and active
- elif read_deleted == 'only':
- query = query.filter_by(deleted=True)
- elif read_deleted == 'int_no':
- query = query.filter_by(deleted=0)
- else:
- raise Exception(
- _("Unrecognized read_deleted value '%s'") % read_deleted)
-
- if project_only and _is_user_context(context):
- query = query.filter_by(project_id=context.project_id)
-
- return query
-
-
-def is_valid_model_filters(model, filters):
- """Return True if filter values exist on the model
-
- :param model: a Cinder model
- :param filters: dictionary of filters
- """
- for key in filters.keys():
- if not hasattr(model, key):
- return False
- return True
-
-
-def create_recycle_resource(context, resource_id, resource_type, project_id):
- try:
- context.session.begin()
- route = core.create_resource(context, models.RecycleResources,
- {'resource_id': resource_id,
- 'resource_type': resource_type,
- 'project_id': project_id})
- context.session.commit()
- return route
- except db_exc.DBDuplicateEntry:
- # entry has already been created
- context.session.rollback()
- return None
- finally:
- context.session.close()
-
-
-def list_recycle_resources(context, filters=None, sorts=None):
- with context.session.begin():
- resources = core.query_resource(
- context, models.RecycleResources, filters or [], sorts or [])
- return resources
-
-
-def delete_recycle_resource(context, resource_id):
- with context.session.begin():
- return core.delete_resource(
- context, models.RecycleResources, resource_id)
diff --git a/tricircle/db/core.py b/tricircle/db/core.py
deleted file mode 100644
index dbf9b283..00000000
--- a/tricircle/db/core.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-from sqlalchemy.ext import declarative
-from sqlalchemy.inspection import inspect
-import threading
-
-from oslo_config import cfg
-import oslo_db.options as db_options
-import oslo_db.sqlalchemy.session as db_session
-from oslo_db.sqlalchemy import utils as sa_utils
-from oslo_utils import strutils
-
-from tricircle.common import exceptions
-
-db_opts = [
- cfg.StrOpt('tricircle_db_connection',
- help='db connection string for tricircle'),
-]
-cfg.CONF.register_opts(db_opts)
-
-_LOCK = threading.Lock()
-_engine_facade = None
-ModelBase = declarative.declarative_base()
-
-
-def _filter_query(model, query, filters):
- """Apply filter to query
-
- :param model:
- :param query:
- :param filters: list of filter dict with key 'key', 'comparator', 'value'
- like {'key': 'pod_id', 'comparator': 'eq', 'value': 'test_pod_uuid'}
- :return:
- """
- filter_dict = {}
- for query_filter in filters:
- # only eq filter supported at first
- if query_filter['comparator'] != 'eq':
- continue
-
- key = query_filter['key']
- if key not in model.attributes:
- continue
- if isinstance(inspect(model).columns[key].type, sql.Boolean):
- filter_dict[key] = strutils.bool_from_string(query_filter['value'])
- else:
- filter_dict[key] = query_filter['value']
- if filter_dict:
- return query.filter_by(**filter_dict)
- else:
- return query
-
-
-def _get_engine_facade():
- global _LOCK
- with _LOCK:
- global _engine_facade
-
- if not _engine_facade:
- t_connection = cfg.CONF.tricircle_db_connection
- _engine_facade = db_session.EngineFacade(t_connection,
- _conf=cfg.CONF)
- return _engine_facade
-
-
-def _get_resource(context, model, pk_value):
- res_obj = context.session.query(model).get(pk_value)
- if not res_obj:
- raise exceptions.ResourceNotFound(model, pk_value)
- return res_obj
-
-
-def paginate_query(context, model, limit, marker_obj, filters, sorts):
- """Returns a query with sorting / pagination / filtering criteria added.
-
- :param context:
- :param model:
- :param limit: the maximum number of items returned in a single page
- :param marker_obj: data model instance that has the same fields as
- keys in sorts. All its value(s) are from the last item
- of the previous page; we returns the next
- results after this item.
- :param filters: list of filter dict with key 'key', 'comparator', 'value'
- :param sorts: a list of (sort_key, sort_dir) pair,
- for example, [('id', 'desc')]
- :return: the query with sorting/pagination/filtering added
- """
- query = context.session.query(model)
- query = _filter_query(model, query, filters)
-
- sort_keys = []
- sort_dirs = []
- for sort_key, sort_dir in sorts:
- sort_keys.append(sort_key)
- sort_dirs.append(sort_dir)
-
- query = sa_utils.paginate_query(query, model, limit, sort_keys,
- marker=marker_obj, sort_dirs=sort_dirs)
-
- return [obj.to_dict() for obj in query]
-
-
-def create_resource(context, model, res_dict):
- res_obj = model.from_dict(res_dict)
- context.session.add(res_obj)
- context.session.flush()
- # retrieve auto-generated fields
- context.session.refresh(res_obj)
- return res_obj.to_dict()
-
-
-def delete_resource(context, model, pk_value):
- res_obj = _get_resource(context, model, pk_value)
- context.session.delete(res_obj)
-
-
-def delete_resources(context, model, filters, delete_all=False):
- # passing empty filter requires delete_all confirmation
- assert filters or delete_all
- query = context.session.query(model)
- query = _filter_query(model, query, filters)
- query.delete(synchronize_session=False)
-
-
-def get_engine():
- return _get_engine_facade().get_engine()
-
-
-def get_resource(context, model, pk_value):
- return _get_resource(context, model, pk_value).to_dict()
-
-
-def get_session(expire_on_commit=False):
- return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
-
-
-def initialize():
- db_options.set_defaults(
- cfg.CONF,
- connection='sqlite://')
-
-
-def query_resource(context, model, filters, sorts):
- query = context.session.query(model)
- query = _filter_query(model, query, filters)
- for sort_key, sort_dir in sorts:
- sort_dir_func = sql.asc if sort_dir else sql.desc
- query = query.order_by(sort_dir_func(sort_key))
- return [obj.to_dict() for obj in query]
-
-
-def update_resource(context, model, pk_value, update_dict):
- res_obj = _get_resource(context, model, pk_value)
- for key in update_dict:
- if key not in model.attributes:
- continue
- skip = False
- for pkey in inspect(model).primary_key:
- if pkey.name == key:
- skip = True
- break
- if skip:
- continue
- setattr(res_obj, key, update_dict[key])
- return res_obj.to_dict()
-
-
-def update_resources(context, model, filters, update_dict):
- query = context.session.query(model)
- query = _filter_query(model, query, filters)
- query.update(update_dict, synchronize_session=False)
-
-
-class DictBase(object):
- attributes = []
-
- @classmethod
- def from_dict(cls, d):
- return cls(**d)
-
- def to_dict(self):
- d = {}
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- return d
-
- def __getitem__(self, key):
- return getattr(self, key)
diff --git a/tricircle/db/migrate_repo/__init__.py b/tricircle/db/migrate_repo/__init__.py
deleted file mode 100644
index f171f3ca..00000000
--- a/tricircle/db/migrate_repo/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-DB_INIT_VERSION = 0
diff --git a/tricircle/db/migrate_repo/migrate.cfg b/tricircle/db/migrate_repo/migrate.cfg
deleted file mode 100644
index 9acd75f2..00000000
--- a/tricircle/db/migrate_repo/migrate.cfg
+++ /dev/null
@@ -1,26 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=tricircle
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
-
-# When creating new change scripts, Migrate will stamp the new script with
-# a version number. By default this is latest_version + 1. You can set this
-# to 'true' to tell Migrate to use the UTC timestamp instead.
-use_timestamp_numbering=False
-
diff --git a/tricircle/db/migrate_repo/versions/001_init.py b/tricircle/db/migrate_repo/versions/001_init.py
deleted file mode 100644
index 093a1f90..00000000
--- a/tricircle/db/migrate_repo/versions/001_init.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- pods = sql.Table(
- 'pods', meta,
- sql.Column('pod_id', sql.String(length=36), primary_key=True),
- sql.Column('region_name', sql.String(length=255), unique=True,
- nullable=False),
- sql.Column('pod_az_name', sql.String(length=255), nullable=True),
- sql.Column('dc_name', sql.String(length=255), nullable=True),
- sql.Column('az_name', sql.String(length=255), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- cached_endpoints = sql.Table(
- 'cached_endpoints', meta,
- sql.Column('service_id', sql.String(length=64), primary_key=True),
- sql.Column('pod_id', sql.String(length=64), nullable=False),
- sql.Column('service_type', sql.String(length=64), nullable=False),
- sql.Column('service_url', sql.String(length=512), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- tables = [pods, cached_endpoints]
- for table in tables:
- table.create()
-
- fkey = {'columns': [cached_endpoints.c.pod_id],
- 'references': [pods.c.pod_id]}
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('can not downgrade from init repo.')
diff --git a/tricircle/db/migrate_repo/versions/002_resource.py b/tricircle/db/migrate_repo/versions/002_resource.py
deleted file mode 100644
index f27eea5a..00000000
--- a/tricircle/db/migrate_repo/versions/002_resource.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy.dialects import mysql
-
-
-def MediumText():
- return sql.Text().with_variant(mysql.MEDIUMTEXT(), 'mysql')
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- resource_routings = sql.Table(
- 'resource_routings', meta,
- sql.Column('id', sql.BigInteger, primary_key=True),
- sql.Column('top_id', sql.String(length=127), nullable=False),
- sql.Column('bottom_id', sql.String(length=36)),
- sql.Column('pod_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=36)),
- sql.Column('resource_type', sql.String(length=64), nullable=False),
- sql.Column('created_at', sql.DateTime),
- sql.Column('updated_at', sql.DateTime),
- migrate.UniqueConstraint(
- 'top_id', 'pod_id', 'resource_type',
- name='resource_routings0top_id0pod_id0resource_type'
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- async_jobs = sql.Table(
- 'async_jobs', meta,
- sql.Column('id', sql.String(length=36), primary_key=True),
- sql.Column('type', sql.String(length=36)),
- sql.Column('timestamp', sql.TIMESTAMP,
- server_default=sql.text('CURRENT_TIMESTAMP'), index=True),
- sql.Column('status', sql.String(length=36)),
- sql.Column('resource_id', sql.String(length=127)),
- sql.Column('extra_id', sql.String(length=36)),
- migrate.UniqueConstraint(
- 'type', 'status', 'resource_id', 'extra_id',
- name='async_jobs0type0status0resource_id0extra_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- async_job_logs = sql.Table(
- 'async_job_logs', meta,
- sql.Column('id', sql.String(length=36), primary_key=True),
- sql.Column('resource_id', sql.String(length=127)),
- sql.Column('type', sql.String(length=36)),
- sql.Column('timestamp', sql.TIMESTAMP,
- server_default=sql.text('CURRENT_TIMESTAMP'), index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- tables = [async_jobs, resource_routings, async_job_logs]
- for table in tables:
- table.create()
-
- pods = sql.Table('pods', meta, autoload=True)
-
- fkeys = [{'columns': [resource_routings.c.pod_id],
- 'references': [pods.c.pod_id]}]
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('downgrade not support')
diff --git a/tricircle/db/migrate_repo/versions/003_shadow_agent.py b/tricircle/db/migrate_repo/versions/003_shadow_agent.py
deleted file mode 100644
index 2f2bda88..00000000
--- a/tricircle/db/migrate_repo/versions/003_shadow_agent.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- shadow_agents = sql.Table(
- 'shadow_agents', meta,
- sql.Column('id', sql.String(length=36), primary_key=True),
- sql.Column('pod_id', sql.String(length=64), nullable=False),
- sql.Column('host', sql.String(length=255), nullable=False),
- sql.Column('type', sql.String(length=36), nullable=False),
- sql.Column('tunnel_ip', sql.String(length=48), nullable=False),
- migrate.UniqueConstraint(
- 'host', 'type',
- name='host0type'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- shadow_agents.create()
-
- pods = sql.Table('pods', meta, autoload=True)
- fkey = {'columns': [shadow_agents.c.pod_id],
- 'references': [pods.c.pod_id]}
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('downgrade not support')
diff --git a/tricircle/db/migrate_repo/versions/004_fix_resource_routings_pod_id_length.py b/tricircle/db/migrate_repo/versions/004_fix_resource_routings_pod_id_length.py
deleted file mode 100644
index 11edcf16..00000000
--- a/tricircle/db/migrate_repo/versions/004_fix_resource_routings_pod_id_length.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource_routings = Table('resource_routings', meta, autoload=True)
- pods = Table('pods', meta, autoload=True)
-
- col_pod_id_fkey = resource_routings.c.pod_id
- col_pod_id_pkey = pods.c.pod_id
-
- # In the migration script 002_resource.py, the pod_id string length in
- # resource_routings table is 64, but pod_id string length in pods table
- # is 36. The string length in foreign key and primary key isn't the same
- if col_pod_id_fkey.type.length != col_pod_id_pkey.type.length:
- # Delete the old constraint. If it exists, we can't modify the
- # pod_id length.
- migrate.ForeignKeyConstraint(columns=[resource_routings.c.pod_id],
- refcolumns=[pods.c.pod_id]).drop()
-
- col_pod_id_fkey.alter(type=String(col_pod_id_pkey.type.length))
-
- # Create the foreign key constraint
- migrate.ForeignKeyConstraint(columns=[resource_routings.c.pod_id],
- refcolumns=[pods.c.pod_id]).create()
diff --git a/tricircle/db/migrate_repo/versions/005_fix_cached_endpoints_pod_id_length.py b/tricircle/db/migrate_repo/versions/005_fix_cached_endpoints_pod_id_length.py
deleted file mode 100644
index cc34daad..00000000
--- a/tricircle/db/migrate_repo/versions/005_fix_cached_endpoints_pod_id_length.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- cached_endpoints = Table('cached_endpoints', meta, autoload=True)
- pods = Table('pods', meta, autoload=True)
-
- col_pod_id_fkey = cached_endpoints.c.pod_id
- col_pod_id_pkey = pods.c.pod_id
-
- # In the migration script 001_init.py, the pod_id string length in
- # cached_endpoints table is 64, but pod_id string length in pods table
- # is 36. The string length in foreign key and primary key isn't the same
- if col_pod_id_fkey.type.length != col_pod_id_pkey.type.length:
- # Delete the old constraint. If it exists, we can't modify the
- # pod_id length.
- migrate.ForeignKeyConstraint(columns=[cached_endpoints.c.pod_id],
- refcolumns=[pods.c.pod_id]).drop()
-
- col_pod_id_fkey.alter(type=String(col_pod_id_pkey.type.length))
-
- # Create the foreign key constraint
- migrate.ForeignKeyConstraint(columns=[cached_endpoints.c.pod_id],
- refcolumns=[pods.c.pod_id]).create()
diff --git a/tricircle/db/migrate_repo/versions/006_add_project_id_to_async_jobs.py b/tricircle/db/migrate_repo/versions/006_add_project_id_to_async_jobs.py
deleted file mode 100644
index e1bae0fb..00000000
--- a/tricircle/db/migrate_repo/versions/006_add_project_id_to_async_jobs.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- """Function adds project_id field."""
- meta = MetaData(bind=migrate_engine)
-
- # Add a new column project_id for async_jobs
- async_jobs = Table('async_jobs', meta, autoload=True)
- project_id = Column('project_id', String(36), nullable=True)
-
- if not hasattr(async_jobs.c, 'project_id'):
- async_jobs.create_column(project_id)
diff --git a/tricircle/db/migrate_repo/versions/007_add_project_id_to_async_job_logs.py b/tricircle/db/migrate_repo/versions/007_add_project_id_to_async_job_logs.py
deleted file mode 100644
index f9be88a1..00000000
--- a/tricircle/db/migrate_repo/versions/007_add_project_id_to_async_job_logs.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- """Function adds project_id field."""
- meta = MetaData(bind=migrate_engine)
-
- # Add a new column project_id for async_job_logs
- async_job_logs = Table('async_job_logs', meta, autoload=True)
- project_id = Column('project_id', String(36), nullable=True)
-
- if not hasattr(async_job_logs.c, 'project_id'):
- async_job_logs.create_column(project_id)
diff --git a/tricircle/db/migrate_repo/versions/008_fix_shadow_agents_pod_id_length.py b/tricircle/db/migrate_repo/versions/008_fix_shadow_agents_pod_id_length.py
deleted file mode 100644
index 1b41e85b..00000000
--- a/tricircle/db/migrate_repo/versions/008_fix_shadow_agents_pod_id_length.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- shadow_agents = Table('shadow_agents', meta, autoload=True)
- pods = Table('pods', meta, autoload=True)
-
- col_pod_id_fkey = shadow_agents.c.pod_id
- col_pod_id_pkey = pods.c.pod_id
-
- # In the migration script 003_shadow_agent.py, the pod_id string length in
- # shadow_agents table is 64, but pod_id string length in pods table
- # is 36. The string length in foreign key and primary key isn't the same
- if col_pod_id_fkey.type.length != col_pod_id_pkey.type.length:
- # Delete the old foreign key constraint. If it exists, we can't
- # modify the pod_id length.
- migrate.ForeignKeyConstraint(columns=[shadow_agents.c.pod_id],
- refcolumns=[pods.c.pod_id]).drop()
-
- col_pod_id_fkey.alter(type=String(col_pod_id_pkey.type.length))
-
- # Create the foreign key constraint
- migrate.ForeignKeyConstraint(columns=[shadow_agents.c.pod_id],
- refcolumns=[pods.c.pod_id]).create()
diff --git a/tricircle/db/migrate_repo/versions/009_recycle_resources.py b/tricircle/db/migrate_repo/versions/009_recycle_resources.py
deleted file mode 100644
index 579f676c..00000000
--- a/tricircle/db/migrate_repo/versions/009_recycle_resources.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- recycle_resources = sql.Table(
- 'recycle_resources', meta,
- sql.Column('resource_id', sql.String(length=36), primary_key=True),
- sql.Column('resource_type', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=36),
- nullable=False, index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- recycle_resources.create()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('downgrade not support')
diff --git a/tricircle/db/migrate_repo/versions/010_add_resource_routings_bottom_id_index.py b/tricircle/db/migrate_repo/versions/010_add_resource_routings_bottom_id_index.py
deleted file mode 100644
index 6f27c1eb..00000000
--- a/tricircle/db/migrate_repo/versions/010_add_resource_routings_bottom_id_index.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from sqlalchemy import Index
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource_routings = Table('resource_routings', meta, autoload=True)
- index = Index('resource_routings0bottom_id',
- resource_routings.c.bottom_id)
- index.create()
diff --git a/tricircle/db/migrate_repo/versions/011_add_deleting_resources.py b/tricircle/db/migrate_repo/versions/011_add_deleting_resources.py
deleted file mode 100755
index 7a765ae5..00000000
--- a/tricircle/db/migrate_repo/versions/011_add_deleting_resources.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2017 SZZT Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- recycle_resources = sql.Table(
- 'deleting_resources', meta,
- sql.Column('resource_id', sql.String(length=127), nullable=False),
- sql.Column('resource_type', sql.String(length=64), nullable=False),
- sql.Column('deleted_at', sql.DateTime),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- recycle_resources.create()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('downgrade not support')
diff --git a/tricircle/db/migrate_repo/versions/__init__.py b/tricircle/db/migrate_repo/versions/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/db/migration_helpers.py b/tricircle/db/migration_helpers.py
deleted file mode 100644
index 7fdbd30f..00000000
--- a/tricircle/db/migration_helpers.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import os
-
-from oslo_db.sqlalchemy import migration
-
-from tricircle import db
-from tricircle.db import core
-from tricircle.db import migrate_repo
-
-
-def db_version():
- engine = core.get_engine()
- repo_abs_path = find_migrate_repo()
- init_version = migrate_repo.DB_INIT_VERSION
- return migration.db_version(engine, repo_abs_path, init_version)
-
-
-def find_migrate_repo(package=None, repo_name='migrate_repo'):
- package = package or db
- path = os.path.abspath(os.path.join(
- os.path.dirname(package.__file__), repo_name))
- # TODO(zhiyuan) handle path not valid exception
- return path
-
-
-def sync_repo(version):
- repo_abs_path = find_migrate_repo()
- init_version = migrate_repo.DB_INIT_VERSION
- engine = core.get_engine()
- migration.db_sync(engine, repo_abs_path, version, init_version)
diff --git a/tricircle/db/models.py b/tricircle/db/models.py
deleted file mode 100644
index 6f550065..00000000
--- a/tricircle/db/models.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_db.sqlalchemy import models
-
-import sqlalchemy as sql
-from sqlalchemy.dialects import mysql
-from sqlalchemy import schema
-
-from tricircle.db import core
-
-
-def MediumText():
- return sql.Text().with_variant(mysql.MEDIUMTEXT(), 'mysql')
-
-
-# Pod Model
-class Pod(core.ModelBase, core.DictBase):
- __tablename__ = 'pods'
- attributes = ['pod_id', 'region_name', 'pod_az_name', 'dc_name', 'az_name']
-
- pod_id = sql.Column('pod_id', sql.String(length=36), primary_key=True)
- region_name = sql.Column('region_name', sql.String(length=255),
- unique=True, nullable=False)
- pod_az_name = sql.Column('pod_az_name', sql.String(length=255),
- nullable=True)
- dc_name = sql.Column('dc_name', sql.String(length=255), nullable=True)
- az_name = sql.Column('az_name', sql.String(length=255), nullable=False)
-
-
-class CachedEndpoint(core.ModelBase, core.DictBase):
- __tablename__ = 'cached_endpoints'
- attributes = ['service_id', 'pod_id', 'service_type', 'service_url']
-
- service_id = sql.Column('service_id', sql.String(length=64),
- primary_key=True)
- pod_id = sql.Column('pod_id', sql.String(length=36),
- sql.ForeignKey('pods.pod_id'),
- nullable=False)
- service_type = sql.Column('service_type', sql.String(length=64),
- nullable=False)
- service_url = sql.Column('service_url', sql.String(length=512),
- nullable=False)
-
-
-# Routing Model
-class ResourceRouting(core.ModelBase, core.DictBase, models.TimestampMixin):
- __tablename__ = 'resource_routings'
- __table_args__ = (
- schema.UniqueConstraint(
- 'top_id', 'pod_id', 'resource_type',
- name='resource_routings0top_id0pod_id0resource_type'),
- )
- attributes = ['id', 'top_id', 'bottom_id', 'pod_id', 'project_id',
- 'resource_type', 'created_at', 'updated_at']
-
- # sqlite doesn't support auto increment on big integers so we use big int
- # for everything but sqlite
- id = sql.Column(sql.BigInteger().with_variant(sql.Integer(), 'sqlite'),
- primary_key=True, autoincrement=True)
- top_id = sql.Column('top_id', sql.String(length=127), nullable=False)
- bottom_id = sql.Column('bottom_id', sql.String(length=36), index=True)
- pod_id = sql.Column('pod_id', sql.String(length=36),
- sql.ForeignKey('pods.pod_id'),
- nullable=False)
- project_id = sql.Column('project_id', sql.String(length=36))
- resource_type = sql.Column('resource_type', sql.String(length=64),
- nullable=False)
-
-
-class AsyncJob(core.ModelBase, core.DictBase):
- __tablename__ = 'async_jobs'
- __table_args__ = (
- schema.UniqueConstraint(
- 'type', 'status', 'resource_id', 'extra_id',
- name='async_jobs0type0status0resource_id0extra_id'),
- )
-
- attributes = ['id', 'project_id', 'type', 'timestamp', 'status',
- 'resource_id', 'extra_id']
-
- id = sql.Column('id', sql.String(length=36), primary_key=True)
- project_id = sql.Column('project_id', sql.String(length=36))
- type = sql.Column('type', sql.String(length=36))
- timestamp = sql.Column('timestamp', sql.TIMESTAMP,
- server_default=sql.text('CURRENT_TIMESTAMP'),
- index=True)
- status = sql.Column('status', sql.String(length=36))
- resource_id = sql.Column('resource_id', sql.String(length=127))
- extra_id = sql.Column('extra_id', sql.String(length=36))
-
-
-class AsyncJobLog(core.ModelBase, core.DictBase):
- __tablename__ = 'async_job_logs'
-
- attributes = ['id', 'project_id', 'resource_id', 'type', 'timestamp']
-
- id = sql.Column('id', sql.String(length=36), primary_key=True)
- project_id = sql.Column('project_id', sql.String(length=36))
- resource_id = sql.Column('resource_id', sql.String(length=127))
- type = sql.Column('type', sql.String(length=36))
- timestamp = sql.Column('timestamp', sql.TIMESTAMP,
- server_default=sql.text('CURRENT_TIMESTAMP'),
- index=True)
-
-
-class ShadowAgent(core.ModelBase, core.DictBase):
- __tablename__ = 'shadow_agents'
- __table_args__ = (
- schema.UniqueConstraint(
- 'host', 'type',
- name='host0type'),
- )
-
- attributes = ['id', 'pod_id', 'host', 'type', 'tunnel_ip']
-
- id = sql.Column('id', sql.String(length=36), primary_key=True)
- pod_id = sql.Column('pod_id', sql.String(length=36),
- sql.ForeignKey('pods.pod_id'),
- nullable=False)
- host = sql.Column('host', sql.String(length=255), nullable=False)
- type = sql.Column('type', sql.String(length=36), nullable=False)
- # considering IPv6 address, set the length to 48
- tunnel_ip = sql.Column('tunnel_ip', sql.String(length=48), nullable=False)
-
-
-class RecycleResources(core.ModelBase, core.DictBase):
- __tablename__ = 'recycle_resources'
-
- attributes = ['resource_id', 'resource_type', 'project_id']
-
- resource_id = sql.Column('resource_id',
- sql.String(length=36), primary_key=True)
- resource_type = sql.Column('resource_type',
- sql.String(length=64), nullable=False)
- project_id = sql.Column('project_id',
- sql.String(length=36), nullable=False, index=True)
-
-
-class DeletingResources(core.ModelBase, core.DictBase):
- __tablename__ = 'deleting_resources'
-
- __table_args__ = (
- schema.UniqueConstraint(
- 'resource_id', 'resource_type',
- name='deleting_resources0resource_id0resource_type'),
- )
-
- attributes = ['resource_id', 'resource_type', 'deleted_at']
-
- resource_id = sql.Column('resource_id', sql.String(length=127),
- nullable=False, primary_key=True)
-
- resource_type = sql.Column('resource_type', sql.String(length=64),
- nullable=False)
-
- deleted_at = sql.Column('deleted_at', sql.DateTime)
diff --git a/tricircle/db/opts.py b/tricircle/db/opts.py
deleted file mode 100644
index 3d156d1f..00000000
--- a/tricircle/db/opts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import tricircle.db.core
-
-
-def list_opts():
- return [
- ('DEFAULT', tricircle.db.core.db_opts),
- ]
diff --git a/tricircle/network/__init__.py b/tricircle/network/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/network/central_fc_driver.py b/tricircle/network/central_fc_driver.py
deleted file mode 100644
index b3e15098..00000000
--- a/tricircle/network/central_fc_driver.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log
-
-from networking_sfc.db import sfc_db
-from networking_sfc.services.flowclassifier.drivers import base as fc_driver
-
-from neutron_lib.db import model_query
-from neutron_lib.plugins import directory
-from neutronclient.common import exceptions as client_exceptions
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.network import central_plugin
-import tricircle.network.exceptions as n_exceptions
-
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleFcDriver(fc_driver.FlowClassifierDriverBase):
-
- def __init__(self):
- self.xjob_handler = xrpcapi.XJobAPI()
- self.clients = {}
-
- def initialize(self):
- pass
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- @log_helpers.log_method_call
- def create_flow_classifier(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_flow_classifier(self, context):
- pass
-
- @log_helpers.log_method_call
- def delete_flow_classifier(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- flowclassifier_id = context.current['id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, flowclassifier_id, t_constants.RT_FLOW_CLASSIFIER)
- for b_pod, b_classifier_id in mappings:
- b_region_name = b_pod['region_name']
- b_client = self._get_client(b_region_name)
- try:
- b_client.delete_flow_classifiers(t_ctx, b_classifier_id)
- except client_exceptions.NotFound:
- LOG.debug(('flow classifier: %(classifier_id)s not found, '
- 'region name: %(name)s'),
- {'classifier_id': flowclassifier_id,
- 'name': b_region_name})
- db_api.delete_mappings_by_bottom_id(t_ctx, b_classifier_id)
-
- def delete_flow_classifier_precommit(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- flowclassifier_id = context.current['id']
- db_api.create_recycle_resource(
- t_ctx, flowclassifier_id, t_constants.RT_FLOW_CLASSIFIER,
- t_ctx.project_id)
-
- def _get_chain_id_by_flowclassifier_id(
- self, context, fc_plugin, flowclassifier_id):
- chain_classifier_assoc = model_query.query_with_hooks(
- context, sfc_db.ChainClassifierAssoc).filter_by(
- flowclassifier_id=flowclassifier_id).first()
- if chain_classifier_assoc:
- return chain_classifier_assoc['portchain_id']
- return None
-
- def _get_net_id_by_portchain_id(self, context, portchain_id):
- sfc_plugin = directory.get_plugin('sfc')
- port_chain = sfc_plugin.get_port_chain(context, portchain_id)
- if not port_chain:
- raise n_exceptions.PortChainNotFound(portchain_id=portchain_id)
- port_pairs = sfc_plugin.get_port_pairs(
- context, {'portpairgroup_id': port_chain['port_pair_groups']})
- if not port_pairs:
- raise n_exceptions.PortPairsNotFoundForPortPairGroup(
- portpairgroup_id=port_chain['port_pair_groups'])
- core_plugin = directory.get_plugin()
- port = super(central_plugin.TricirclePlugin, core_plugin
- ).get_port(context, port_pairs[0]['ingress'])
- if not port:
- raise n_exceptions.PortNotFound(port_id=port_pairs[0]['ingress'])
- return port['network_id']
-
- def update_flow_classifier_precommit(self, context):
- plugin_context = context._plugin_context
- t_ctx = t_context.get_context_from_neutron_context(plugin_context)
- flowclassifier = context.current
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, flowclassifier['id'], t_constants.RT_FLOW_CLASSIFIER)
- if mappings:
- portchain_id = self._get_chain_id_by_flowclassifier_id(
- plugin_context, context._plugin, flowclassifier['id'])
- if not portchain_id:
- raise n_exceptions.PortChainNotFoundForFlowClassifier(
- flowclassifier_id=flowclassifier['id'])
- net_id = self._get_net_id_by_portchain_id(plugin_context,
- portchain_id)
- if not net_id:
- raise n_exceptions.NetNotFoundForPortChain(
- portchain_id=portchain_id)
- self.xjob_handler.sync_service_function_chain(
- t_ctx, flowclassifier['project_id'], portchain_id,
- net_id, t_constants.POD_NOT_SPECIFIED)
-
- @log_helpers.log_method_call
- def create_flow_classifier_precommit(self, context):
- pass
diff --git a/tricircle/network/central_plugin.py b/tricircle/network/central_plugin.py
deleted file mode 100644
index 1fe60dab..00000000
--- a/tricircle/network/central_plugin.py
+++ /dev/null
@@ -1,2134 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import copy
-import datetime
-import re
-import six
-
-from oslo_config import cfg
-from oslo_db.sqlalchemy import utils as sa_utils
-import oslo_log.helpers as log_helpers
-from oslo_log import log
-
-from neutron.conf.plugins.ml2 import config # noqa
-from neutron.db import agents_db
-from neutron.db.availability_zone import router as router_az
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-# NOTE(zhiyuan) though not used, this import cannot be removed because Router
-# relies on one table defined in l3_agentschedulers_db
-from neutron.db import l3_agentschedulers_db # noqa
-from neutron.db import l3_db
-from neutron.db import l3_dvr_db
-# import l3_hamode_db to load l3_ha option
-from neutron.db import l3_hamode_db # noqa
-from neutron.db import models_v2
-from neutron.db import portbindings_db
-from neutron.objects import ports as q_ports
-from neutron.objects.qos import policy as policy_object
-import neutron.objects.router as router_object
-from neutron.plugins.ml2 import managers as n_managers
-from neutron_lib.api.definitions import availability_zone as az_def
-from neutron_lib.api.definitions import external_net
-from neutron_lib.api.definitions import l3 as l3_apidef
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.api.definitions import portbindings_extended as pb_ext
-from neutron_lib.api.definitions import provider_net
-from neutron_lib.api import validators
-from neutron_lib.api.validators import availability_zone as az_validator
-from neutron_lib.callbacks import events
-from neutron_lib.callbacks import exceptions as callbacks_exc
-from neutron_lib.callbacks import registry
-from neutron_lib.callbacks import resources
-import neutron_lib.callbacks.resources as attributes
-from neutron_lib import constants
-from neutron_lib.db import api as lib_db_api
-from neutron_lib.db import resource_extend
-from neutron_lib.db import utils as db_utils
-from neutron_lib import exceptions
-from neutron_lib.exceptions import availability_zone as az_exc
-from neutron_lib.plugins import directory
-import neutronclient.client as neutronclient
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from sqlalchemy import sql
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.exceptions as t_network_exc
-from tricircle.network import helper
-from tricircle.network import managers
-from tricircle.network import qos_driver
-from tricircle.network import security_groups
-from tricircle.network import utils as nt_utils
-
-tricircle_opts = [
- cfg.ListOpt('type_drivers',
- default=['vxlan,local'],
- help=_('List of network type driver entry points to be loaded '
- 'from the tricircle.network.type_drivers namespace.')),
- cfg.ListOpt('extension_drivers',
- default=[],
- help=_('List of network extension driver entry points to be '
- 'loaded from the neutron.ml2.extension_drivers '
- 'namespace.')),
- cfg.ListOpt('tenant_network_types',
- default=['vxlan,local'],
- help=_('Ordered list of network_types to allocate as tenant '
- 'networks. The default value "local" is useful for '
- 'single pod connectivity.')),
- cfg.ListOpt('network_vlan_ranges',
- default=[],
- help=_('List of :: or '
- ' specifying physical_network names '
- 'usable for VLAN provider and tenant networks, as '
- 'well as ranges of VLAN tags on each available for '
- 'allocation to tenant networks.')),
- cfg.ListOpt('vni_ranges',
- default=[],
- help=_('Comma-separated list of : tuples '
- 'enumerating ranges of VXLAN VNI IDs that are '
- 'available for tenant network allocation.')),
- cfg.ListOpt('flat_networks',
- default='*',
- help=_("List of physical_network names with which flat "
- "networks can be created. Use default '*' to allow "
- "flat networks with arbitrary physical_network names. "
- "Use an empty list to disable flat networks.")),
- cfg.StrOpt('bridge_network_type',
- default='vxlan',
- help=_('Type of l3 bridge network, this type should be enabled '
- 'in tenant_network_types and is not local type.')),
- cfg.StrOpt('default_region_for_external_network',
- default='RegionOne',
- help=_('Default Region where the external network belongs'
- ' to.')),
- cfg.BoolOpt('enable_api_gateway',
- default=True,
- help=_('Whether the Nova API gateway is enabled')),
- cfg.BoolOpt('enable_l3_route_network',
- default=False,
- help=_('Whether using the new L3 networking model. When it is'
- 'set to true, Tricircle will automatically create a'
- 'bottom external network if the name of segment'
- 'matches newL3-..'))
-]
-
-tricircle_opt_group = cfg.OptGroup('tricircle')
-cfg.CONF.register_group(tricircle_opt_group)
-cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
-
-LOG = log.getLogger(__name__)
-
-NON_VM_PORT_TYPES = [constants.DEVICE_OWNER_ROUTER_INTF,
- constants.DEVICE_OWNER_ROUTER_GW,
- constants.DEVICE_OWNER_DHCP]
-
-
-class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
- agents_db.AgentDbMixin,
- security_groups.TricircleSecurityGroupMixin,
- external_net_db.External_net_db_mixin,
- portbindings_db.PortBindingMixin,
- extradhcpopt_db.ExtraDhcpOptMixin,
- l3_db.L3_NAT_dbonly_mixin,
- router_az.RouterAvailabilityZoneMixin):
-
- __native_bulk_support = True
- __native_pagination_support = True
- __native_sorting_support = True
-
- # NOTE(zhiyuan) we don't support "agent" and "availability_zone" extensions
- # and also it's no need for us to support, but "network_availability_zone"
- # depends on these two extensions so we need to register them
- supported_extension_aliases = ["agent",
- "quotas",
- "extra_dhcp_opt",
- "binding",
- "security-group",
- "external-net",
- "availability_zone",
- "provider",
- "network_availability_zone",
- "dvr",
- "router",
- "router_availability_zone",
- "allowed-address-pairs"]
-
- def __new__(cls, *args, **kwargs):
- n = super(TricirclePlugin, cls).__new__(cls, *args, **kwargs)
- registry.subscribe(n._set_distributed_flag,
- resources.ROUTER, events.PRECOMMIT_CREATE)
- return n
-
- def __init__(self):
- super(TricirclePlugin, self).__init__()
- LOG.info("Starting Tricircle Neutron Plugin")
- self.clients = {'top': t_client.Client()}
- self.xjob_handler = xrpcapi.XJobAPI()
- self._setup_rpc()
- self.type_manager = managers.TricircleTypeManager()
- self.extension_manager = n_managers.ExtensionManager()
- self.extension_manager.initialize()
- self.type_manager.initialize()
- self.helper = helper.NetworkHelper(self)
- neutronclient.USER_AGENT = t_constants.CENTRAL
- qos_driver.register()
-
- def _setup_rpc(self):
- self.endpoints = []
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- @log_helpers.log_method_call
- def start_rpc_listeners(self):
- return []
- # NOTE(zhiyuan) use later
- # self.topic = topics.PLUGIN
- # self.conn = n_rpc.create_connection(new=True)
- # self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
- # return self.conn.consume_in_threads()
-
- def _set_distributed_flag(self, resource, event, trigger, context,
- router, router_db, **kwargs):
- """Event handler to set distributed flag on creation."""
- dist = l3_dvr_db.is_distributed_router(router)
- router['distributed'] = dist
- self.set_extra_attr_value(context, router_db, 'distributed', dist)
-
- def validate_availability_zones(self, context, resource_type,
- availability_zones):
- self._validate_availability_zones(context, availability_zones)
-
- def get_router_availability_zones(self, router_db):
- return router_db.get(az_def.AZ_HINTS)
-
- @staticmethod
- def _validate_availability_zones(context, az_list):
- if not az_list:
- return
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin(subtransactions=True):
- pods = core.query_resource(t_ctx, models.Pod, [], [])
- az_set = set(az_list)
-
- known_az_set = set([pod['az_name'] for pod in pods])
- known_az_set = known_az_set | set(
- [pod['region_name'] for pod in pods])
-
- diff = az_set - known_az_set
- if diff:
- raise az_exc.AvailabilityZoneNotFound(
- availability_zone=diff.pop())
-
- @staticmethod
- @resource_extend.extends([attributes.NETWORKS])
- def _extend_availability_zone(net_res, net_db):
- net_res[az_def.AZ_HINTS] = az_validator.convert_az_string_to_list(
- net_db[az_def.AZ_HINTS])
-
- @staticmethod
- def _ensure_az_set_for_external_network(context, req_data):
- external = req_data.get(external_net.EXTERNAL)
- external_set = validators.is_attr_set(external)
- if not external_set or not external:
- return False
- if az_def.AZ_HINTS in req_data and req_data[az_def.AZ_HINTS]:
- return True
- # if no az_hints are specified, we will use default region_name
- req_data[az_def.AZ_HINTS] = \
- [cfg.CONF.tricircle.default_region_for_external_network]
- return True
-
- @staticmethod
- def _fill_provider_info(from_net, to_net):
- provider_attrs = provider_net.ATTRIBUTES
- for provider_attr in provider_attrs:
- if validators.is_attr_set(from_net.get(provider_attr)):
- to_net[provider_attr] = from_net[provider_attr]
-
- def _create_bottom_external_subnet(self, context, subnet, net, top_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- region_name = net[az_def.AZ_HINTS][0]
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- b_net_id = db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, net['id'], region_name, t_constants.RT_NETWORK)
- body = {
- 'subnet': {
- 'name': top_id,
- 'network_id': b_net_id,
- 'tenant_id': subnet['tenant_id']
- }
- }
- attrs = ('ip_version', 'cidr', 'gateway_ip', 'allocation_pools',
- 'enable_dhcp')
- for attr in attrs:
- if validators.is_attr_set(subnet.get(attr)):
- body['subnet'][attr] = subnet[attr]
- self.helper.prepare_bottom_element(
- t_ctx, subnet['tenant_id'], pod, {'id': top_id},
- t_constants.RT_SUBNET, body)
-
- @property
- def _core_plugin(self):
- return self
-
- def create_network(self, context, network):
- net_data = network[attributes.NETWORK]
- tenant_id = net_data['tenant_id']
- is_external = self._ensure_az_set_for_external_network(context,
- net_data)
- if az_def.AZ_HINTS in net_data:
- self._validate_availability_zones(context,
- net_data[az_def.AZ_HINTS])
- with lib_db_api.CONTEXT_WRITER.using(context):
- net_db = self.create_network_db(context, network)
- res = self._make_network_dict(net_db, process_extensions=False,
- context=context)
- self.extension_manager.process_create_network(context, net_data,
- res)
- self._process_l3_create(context, res, net_data)
- net_data['id'] = res['id']
- self.type_manager.create_network_segments(context, net_data,
- tenant_id)
- self.type_manager.extend_network_dict_provider(context, res)
- if az_def.AZ_HINTS in net_data:
- az_hints = az_validator.convert_az_list_to_string(
- net_data[az_def.AZ_HINTS])
- net_db[az_def.AZ_HINTS] = az_hints
- res[az_def.AZ_HINTS] = net_data[az_def.AZ_HINTS]
- # put inside a session so when bottom operations fails db can
- # rollback
- if is_external:
- self._fill_provider_info(res, net_data)
- try:
- self.helper.prepare_bottom_external_network(
- context, net_data, res['id'])
- except q_cli_exceptions.Conflict as e:
- pattern = re.compile('Physical network (.*) is in use')
- match = pattern.search(e.message)
- if not match:
- raise
- else:
- raise exceptions.FlatNetworkInUse(
- physical_network=match.groups()[0])
- # process_extensions is set to False in _make_network_dict, so "tags"
- # field will not be set, we manually set here so openstack client can
- # correctly read this field
- res['tags'] = []
- return res
-
- def delete_network(self, context, network_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- dict_para = {'resource_id': network_id, 'resource_type': 'network'}
- nt_utils.check_resource_not_in_deleting(context, dict_para)
- nt_utils.check_network_not_in_use(self, context, t_ctx, network_id)
- dict_para['deleted_at'] = datetime.datetime.utcnow()
-
- try:
- with t_ctx.session.begin():
- core.create_resource(
- t_ctx, models.DeletingResources, dict_para)
- for pod, bottom_network_id in (
- self.helper.get_real_shadow_resource_iterator(
- t_ctx, t_constants.RT_NETWORK, network_id)):
- self._get_client(pod['region_name']).delete_networks(
- t_ctx, bottom_network_id)
-
- # we do not specify resource_type when deleting routing entries
- # so if both "network" and "shadow_network" type entries exist
- # in one pod(this is possible for cross-pod network), we delete
- # them at the same time
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': network_id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': pod['pod_id']}])
-
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id',
- 'comparator': 'eq',
- 'value': network_id}])
-
- subnets = self._get_subnets_by_network(context, network_id)
- for subnet in subnets:
- self.delete_subnet(context, subnet['id'])
- with context.session.begin(subtransactions=True):
- self.type_manager.release_network_segments(context, network_id)
- super(TricirclePlugin, self).delete_network(
- context, network_id)
- except Exception:
- raise
- finally:
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.DeletingResources,
- filters=[{'key': 'resource_id',
- 'comparator': 'eq',
- 'value': network_id}])
-
- def _raise_if_updates_external_attribute(self, attrs):
- """Raise exception if external attributes are present.
-
- This method is used for plugins that do not support
- updating external attributes.
- """
- if validators.is_attr_set(attrs.get(external_net.EXTERNAL)):
- msg = _("Plugin does not support updating network's "
- "router:external attribute")
- raise exceptions.InvalidInput(error_message=msg)
-
- def _raise_if_updates_provider_attributes(self, attrs):
-
- if any(validators.is_attr_set(attrs.get(a))
- for a in provider_net.ATTRIBUTES):
- msg = _("Plugin does not support updating provider attributes")
- raise exceptions.InvalidInput(error_message=msg)
-
- def update_network(self, context, network_id, network):
- """update top network
-
- update top network and trigger asynchronous job via RPC to update
- bottom network
-
- :param context: neutron context
- :param network_id: top network id
- :param network: updated body
- :return: updated network
- """
- net_data = network[attributes.NETWORK]
- self._raise_if_updates_provider_attributes(net_data)
- self._raise_if_updates_external_attribute(net_data)
-
- with context.session.begin():
- original_network = super(TricirclePlugin, self).get_network(
- context, network_id)
- policy = policy_object.QosPolicy.get_network_policy(
- context, network_id)
- if policy:
- original_network['qos_policy_id'] = policy['id']
- else:
- original_network['qos_policy_id'] = None
-
- updated_network = super(
- TricirclePlugin, self).update_network(
- context, network_id, network)
-
- self.extension_manager.process_update_network(
- context, net_data, updated_network)
-
- self.type_manager.extend_network_dict_provider(context,
- updated_network)
-
- updated_network = self.get_network(context, network_id)
-
- if net_data.get('qos_policy_id', None):
- updated_network['qos_policy_id'] = net_data['qos_policy_id']
-
- if not updated_network.get('qos_policy_id', None):
- updated_network['qos_policy_id'] = None
-
- need_network_update_notify = (
- 'qos_policy_id' in net_data and
- original_network['qos_policy_id'] !=
- updated_network['qos_policy_id'])
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, network_id, t_constants.RT_NETWORK)
- if mappings:
- self.xjob_handler.update_network(
- t_ctx, updated_network['tenant_id'], network_id,
- t_constants.POD_NOT_SPECIFIED)
-
- if need_network_update_notify and \
- updated_network['qos_policy_id'] and mappings:
- t_policy_id = updated_network['qos_policy_id']
- self.xjob_handler.create_qos_policy(
- t_ctx, updated_network['tenant_id'], t_policy_id,
- t_constants.POD_NOT_SPECIFIED, t_constants.RT_NETWORK,
- updated_network['id'])
-
- return updated_network
-
- def _convert_az2region_for_nets(self, context, nets):
- for net in nets:
- self._convert_az2region_for_net(context, net)
-
- def _convert_az2region_for_net(self, context, net):
- az_hints = net.get(az_def.AZ_HINTS)
- if context.is_admin and az_hints:
- t_ctx = t_context.get_context_from_neutron_context(context)
- net[az_def.AZ_HINTS] = self._convert_az2region(t_ctx, az_hints)
-
- def _convert_az2region(self, t_ctx, az_hints):
- return self.helper.convert_az2region(t_ctx, az_hints)
-
- def _get_network_qos_info(self, context, net_id):
- policy = policy_object.QosPolicy.get_network_policy(
- context.elevated(), net_id)
- return policy['id'] if policy else None
-
- def get_network(self, context, network_id, fields=None):
-
- dict_para = {'resource_id': network_id, 'resource_type': 'network'}
- try:
- nt_utils.check_resource_not_in_deleting(context, dict_para)
- except t_exceptions.ResourceIsInDeleting:
- return network_id
- except t_exceptions.ResourceNotFound:
- raise exceptions.NotFound()
-
- net = super(TricirclePlugin, self).get_network(context, network_id,
- fields)
-
- if not fields or 'id' in fields:
- self.type_manager.extend_network_dict_provider(context, net)
-
- self._convert_az2region_for_net(context, net)
-
- net['qos_policy_id'] = \
- self._get_network_qos_info(context.elevated(), net['id'])
-
- return net
-
- def get_networks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- nets = super(TricirclePlugin,
- self).get_networks(context, filters, fields, sorts,
- limit, marker, page_reverse)
- if not fields or 'id' in fields:
- self.type_manager.extend_networks_dict_provider(context, nets)
-
- self._convert_az2region_for_nets(context, nets)
-
- for net in nets:
- net['qos_policy_id'] = \
- self._get_network_qos_info(context.elevated(), net['id'])
-
- return nets
-
- def create_subnet(self, context, subnet):
- subnet_data = subnet['subnet']
- network = self.get_network(context, subnet_data['network_id'])
-
- is_external = network.get(external_net.EXTERNAL)
- with context.session.begin(subtransactions=True):
- res = super(TricirclePlugin, self).create_subnet(context, subnet)
- # put inside a session so when bottom operations fails db can
- # rollback
- if is_external:
- self._create_bottom_external_subnet(
- context, res, network, res['id'])
- snat_port_id = None
- try:
- t_ctx = t_context.get_context_from_neutron_context(context)
- if not subnet_data['name'].startswith(
- t_constants.bridge_subnet_name[:-3]) and not is_external:
- # do not reserve snat port for bridge and external subnet
- snat_port_id = self.helper.prepare_top_snat_port(
- t_ctx, context, res['tenant_id'], network['id'], res['id'])
- self._create_port_binding(context, snat_port_id)
- if res['enable_dhcp']:
- dhcp_port_id = self.helper.prepare_top_dhcp_port(
- t_ctx, context, res['tenant_id'], network['id'], res['id'])
- self._create_port_binding(context, dhcp_port_id)
- except Exception:
- if snat_port_id:
- super(TricirclePlugin, self).delete_port(context, snat_port_id)
- q_ports.PortBinding.delete_objects(context,
- port_id=snat_port_id)
- self.delete_subnet(context, res['id'])
- raise
- return res
-
- def _delete_pre_created_port(self, t_ctx, q_ctx, port_name):
- ports = super(TricirclePlugin, self).get_ports(
- q_ctx, {'name': [port_name]})
- if ports:
- super(TricirclePlugin, self).delete_port(q_ctx, ports[0]['id'])
- q_ports.PortBinding.delete_objects(q_ctx, port_id=ports[0]['id'])
- db_api.delete_pre_created_resource_mapping(t_ctx, port_name)
-
- def delete_subnet(self, context, subnet_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- for pod, bottom_subnet_id in (
- self.helper.get_real_shadow_resource_iterator(
- t_ctx, t_constants.RT_SUBNET, subnet_id)):
- region_name = pod['region_name']
- b_client = self._get_client(region_name)
- b_client.delete_subnets(
- t_ctx, bottom_subnet_id)
- interface_name = t_constants.interface_port_name % (
- region_name, subnet_id)
- self._delete_pre_created_port(t_ctx, context, interface_name)
- # we do not specify resource_type when deleting routing entries
- # so if both "subnet" and "shadow_subnet" type entries exist in
- # one pod(this is possible for cross-pod network), we delete
- # them at the same time
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': subnet_id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': pod['pod_id']}])
- except Exception:
- raise
-
- dhcp_port_name = t_constants.dhcp_port_name % subnet_id
- self._delete_pre_created_port(t_ctx, context, dhcp_port_name)
- snat_port_name = t_constants.snat_port_name % subnet_id
- self._delete_pre_created_port(t_ctx, context, snat_port_name)
- super(TricirclePlugin, self).delete_subnet(context, subnet_id)
-
- def update_subnet(self, context, subnet_id, subnet):
- """update top subnet
-
- update top subnet and trigger asynchronous job via RPC to update
- bottom subnet.
- :param context: neutron context
- :param subnet_id: top subnet id
- :param subnet: updated subnet body
- :return: updated subnet
- """
- with context.session.begin():
- subnet_data = subnet[attributes.SUBNET]
- t_ctx = t_context.get_context_from_neutron_context(context)
- # update top subnet
- result = super(TricirclePlugin, self).update_subnet(
- context, subnet_id, subnet)
- # prepare top dhcp port if user enables dhcp,
- # the top pre-created dhcp port will not be deleted even
- # "enable_dhcp" is updated from True to False
- enable_dhcp = subnet_data.get('enable_dhcp', False)
- if enable_dhcp:
- subnet = super(TricirclePlugin, self).get_subnet(
- context, subnet_id)
- self.helper.prepare_top_dhcp_port(t_ctx, context,
- t_ctx.project_id,
- subnet['network_id'],
- subnet_id)
- # update bottom pod subnet if exist
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, subnet_id, t_constants.RT_SUBNET)
- if mappings:
- self.xjob_handler.update_subnet(t_ctx, result['tenant_id'],
- subnet_id,
- t_constants.POD_NOT_SPECIFIED)
- return result
-
- def _create_port_binding(self, context, port_id):
- port_binding = q_ports.PortBinding(context)
- port_binding.unique_keys.append(['port_id'])
- port_binding.port_id = port_id
- port_binding.host = ''
- port_binding.profile = {}
- port_binding.vif_type = portbindings.VIF_TYPE_UNBOUND
- port_binding.vif_details = {}
- port_binding.vnic_type = portbindings.VNIC_NORMAL
- port_binding.status = 'ACTIVE'
- port_binding.create()
-
- def create_port(self, context, port):
- port_body = port['port']
- if port_body['device_id'] == t_constants.interface_port_device_id:
- _, region_name, subnet_id = port_body['name'].split('_')
- gateway_port_body = self.helper.get_create_interface_body(
- port_body['tenant_id'], port_body['network_id'], region_name,
- subnet_id)
- t_ctx = t_context.get_context_from_neutron_context(context)
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- _, t_gateway_id = self.helper.prepare_top_element(
- t_ctx, context, port_body['tenant_id'], pod,
- {'id': port_body['name']}, t_constants.RT_PORT,
- gateway_port_body)
- self._create_port_binding(context, t_gateway_id)
- return super(TricirclePlugin, self).get_port(context, t_gateway_id)
- db_port = super(TricirclePlugin, self).create_port_db(context, port)
- self._ensure_default_security_group_on_port(context, port)
- sgids = self._get_security_groups_on_port(context, port)
- result = self._make_port_dict(db_port)
- self.extension_manager.process_create_port(context, port_body, result)
- self._process_port_create_security_group(context, result, sgids)
- self._create_port_binding(context, db_port.id)
- return result
-
- def _check_mac_update_allowed(self, orig_port, port):
- unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED,
- portbindings.VIF_TYPE_UNBOUND)
- new_mac = port.get('mac_address')
- mac_change = (new_mac is not None and
- orig_port['mac_address'] != new_mac)
-
- if mac_change and (
- orig_port[portbindings.VIF_TYPE] not in unplugged_types):
- raise exceptions.PortBound(
- port_id=orig_port['id'],
- vif_type=orig_port[portbindings.VIF_TYPE],
- old_mac=orig_port['mac_address'],
- new_mac=port['mac_address'])
-
- def _filter_unsupported_attrs(self, port_data):
- unsupported_attrs = ['fixed_ips', 'qos_policy']
- remove_keys = [key for key in port_data.keys() if (
- key in unsupported_attrs)]
- for key in remove_keys:
- port_data.pop(key)
-
- def _log_update_port_sensitive_attrs(self, port_id, port):
- sensitive_attrs = ['device_id', 'device_owner', portbindings.VNIC_TYPE,
- portbindings.PROFILE, portbindings.HOST_ID]
- request_body = port['port']
- updated_sens_attrs = []
-
- for key in request_body.keys():
- if key in sensitive_attrs:
- updated_sens_attrs.append('%s = %s' % (key, request_body[key]))
-
- warning_attrs = ', '.join(updated_sens_attrs)
- LOG.warning('update port: %(port_id)s , %(warning_attrs)s',
- {'port_id': port_id, 'warning_attrs': warning_attrs})
-
- def _handle_bottom_security_group(self, t_ctx, top_sg, bottom_pod):
- if top_sg:
- b_region_name = bottom_pod['region_name']
- for sg_id in top_sg:
- b_client = self._get_client(region_name=b_region_name)
- b_client.get_security_groups(t_ctx, sg_id)
- if db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, sg_id, b_region_name, t_constants.RT_SG):
- continue
- db_api.create_resource_mapping(
- t_ctx, sg_id, sg_id, bottom_pod['pod_id'], t_ctx.tenant,
- t_constants.RT_SG)
-
- def _create_mapping_for_vm_port(self, t_ctx, port_body, pod):
- entries = self.helper.extract_resource_routing_entries(port_body)
- self.helper.ensure_resource_mapping(t_ctx, port_body['project_id'],
- pod, entries)
-
- def _process_trunk_port(self, ctx, t_ctx, port_body, pod, profile_dict):
- trunk_plugin = directory.get_plugin('trunk')
- trunk_details = port_body.get('trunk_details')
- if trunk_plugin and trunk_details:
- t_trunk_id = trunk_details['trunk_id']
- b_trunk_id = profile_dict.get(
- t_constants.PROFILE_LOCAL_TRUNK_ID)
- entries = [(t_trunk_id, b_trunk_id, t_constants.RT_TRUNK)]
- trunk_plugin.update_subports_device_id(
- ctx, trunk_details, t_trunk_id,
- t_constants.DEVICE_OWNER_SUBPORT)
- t_trunk = trunk_plugin.get_trunk(ctx, t_trunk_id)
- self.helper.ensure_resource_mapping(t_ctx, t_trunk['project_id'],
- pod, entries)
- if trunk_details['sub_ports']:
- self.xjob_handler.sync_trunk(t_ctx, t_trunk['project_id'],
- t_trunk_id, pod['pod_id'])
-
- def _trigger_router_xjob_for_vm_port(self, context, port_body, pod):
- interfaces = super(TricirclePlugin, self).get_ports(
- context,
- {'network_id': [port_body['network_id']],
- 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]},
- fields=['device_id'])
- router_ids = [
- inf['device_id'] for inf in interfaces if inf['device_id']]
- if router_ids:
- # request may be come from service, we use an admin context
- # to run the xjob
- LOG.debug('Update port: network %s has been attached to the '
- 'following routers: %s, xjob triggered',
- port_body['network_id'], router_ids)
- admin_context = t_context.get_admin_context()
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- for router_id in router_ids:
- router = self.get_router(context, router_id)
- if not self.helper.is_local_router(t_ctx, router):
- # for local router, job will be triggered after router
- # interface attachment.
- self.xjob_handler.setup_bottom_router(
- admin_context, router['tenant_id'],
- port_body['network_id'], router_id, pod['pod_id'])
- # network will be attached to only one non-local router,
- # so we break here
- break
- else:
- LOG.debug('Update port: no interfaces found, xjob not'
- 'triggered')
-
- def _delete_bottom_unbound_port(self, t_ctx, port_id, profile_region):
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_id, t_constants.RT_PORT)
- if mappings:
- region_name = mappings[0][0]['region_name']
- bottom_port_id = mappings[0][1]
- bottom_port = self._get_client(region_name).get_ports(
- t_ctx, bottom_port_id)
- if bottom_port['device_id'] in ('', None) and \
- (not bottom_port['device_owner'].startswith(
- 'compute:shadow')):
- db_api.delete_mappings_by_bottom_id(t_ctx, bottom_port['id'])
-
- nw_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, bottom_port['network_id'],
- t_constants.RT_NETWORK)
- for nw_map in nw_mappings:
- region_name = nw_map[0]['region_name']
- if region_name != profile_region:
- self._get_client(region_name).update_ports(
- t_ctx, port_id, {'port': {
- 'device_id': '',
- 'device_owner': '',
- portbindings.HOST_ID: None,
- 'name': bottom_port['name']
- }})
-
- def _process_port_binding(self, context, port_id, port):
- profile = port['port'].get('binding:profile')
- if profile is not None and \
- set([portbindings.VIF_TYPE, portbindings.HOST_ID,
- portbindings.VIF_DETAILS, portbindings.VNIC_TYPE]
- ).issubset(profile.keys()):
- q_ports.PortBinding.update_object(
- context,
- {
- pb_ext.VIF_TYPE: profile[portbindings.VIF_TYPE],
- pb_ext.HOST: profile[portbindings.HOST_ID],
- pb_ext.VIF_DETAILS: profile[portbindings.VIF_DETAILS],
- pb_ext.VNIC_TYPE: profile[portbindings.VNIC_TYPE]
- },
- port_id=port_id
- )
-
- def update_port(self, context, port_id, port):
- t_ctx = t_context.get_context_from_neutron_context(context)
- top_port = super(TricirclePlugin, self).get_port(context, port_id)
- updated_port = None
- self._process_port_binding(context, port_id, port)
- # be careful that l3_db will call update_port to update device_id of
- # router interface, we cannot directly update bottom port in this case,
- # otherwise we will fail when attaching bottom port to bottom router
- # because its device_id is not empty
- if t_constants.PROFILE_REGION in port['port'].get(
- 'binding:profile', {}):
- profile_dict = port['port']['binding:profile']
- region_name = profile_dict[t_constants.PROFILE_REGION]
- device_name = profile_dict[t_constants.PROFILE_DEVICE]
- port_status = profile_dict.get(t_constants.PROFILE_STATUS, '')
- if port_status == 'DOWN' and device_name == '':
- self._delete_bottom_unbound_port(t_ctx, port_id, region_name)
- # this update request comes from local Neutron
- updated_port = super(TricirclePlugin, self).update_port(context,
- port_id,
- port)
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- pod = db_api.get_pod_by_name(t_ctx, region_name)
-
- net = self.get_network(context, updated_port['network_id'])
- is_vxlan_network = (
- net[provider_net.NETWORK_TYPE] == t_constants.NT_VxLAN)
- if is_vxlan_network:
- # if a local type network happens to be a vxlan network, local
- # plugin will still send agent info, so we double check here
- self.helper.create_shadow_agent_if_needed(t_ctx,
- profile_dict, pod)
- if device_name.startswith('compute:'):
- # local plugin will also update region information for bridge
- # gateway port, but we only need to create resource routing
- # entries, trigger xjob and configure security group rules for
- # instance port
- self._create_mapping_for_vm_port(t_ctx, updated_port, pod)
- self._process_trunk_port(context, t_ctx,
- updated_port, pod, profile_dict)
- # only trigger setup_bottom_router job
- self._trigger_router_xjob_for_vm_port(context, updated_port,
- pod)
- self.xjob_handler.configure_security_group_rules(
- t_ctx, updated_port['tenant_id'])
-
- if is_vxlan_network and (
- cfg.CONF.client.cross_pod_vxlan_mode in (
- t_constants.NM_P2P, t_constants.NM_L2GW)):
- self.xjob_handler.setup_shadow_ports(
- t_ctx, updated_port['tenant_id'], pod['pod_id'],
- updated_port['network_id'])
-
- network_binding_policy = \
- policy_object.QosPolicy.get_network_policy(
- context, updated_port['network_id'])
-
- port_binding_policy = policy_object.QosPolicy.get_port_policy(
- context, port_id)
-
- if network_binding_policy:
- t_policy_id = network_binding_policy['id']
- self.xjob_handler.create_qos_policy(
- t_ctx, t_ctx.project_id, t_policy_id, pod['pod_id'],
- t_constants.RT_NETWORK, updated_port['network_id'])
-
- if port_binding_policy:
- t_policy_id = port_binding_policy['id']
- self.xjob_handler.create_qos_policy(
- t_ctx, t_ctx.project_id, t_policy_id, pod['pod_id'],
- t_constants.RT_PORT, port_id)
-
- # for vm port or port with empty device_owner, update top port and
- # bottom port
- elif top_port.get('device_owner') not in NON_VM_PORT_TYPES:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_id, t_constants.RT_PORT)
- request_body = port[attributes.PORT]
- if mappings:
- with context.session.begin():
- original_qos_policy_id = \
- self._get_port_qos_info(context, port_id)
-
- b_pod, b_port_id = mappings[0]
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_port = b_client.get_ports(context, b_port_id)
- self._check_mac_update_allowed(b_port, request_body)
- self._filter_unsupported_attrs(request_body)
- request_body = port[attributes.PORT]
- if request_body.get('security_groups', None):
- self._handle_bottom_security_group(
- t_ctx, request_body['security_groups'], b_pod)
-
- updated_port = super(TricirclePlugin, self).update_port(
- context, port_id, port)
- self.extension_manager.process_update_port(
- context, request_body, updated_port)
- updated_port = \
- super(TricirclePlugin, self).get_port(context, port_id)
- # name is not allowed to be updated, because it is used by
- # lock_handle to retrieve bottom/local resources that have
- # been created but not registered in the resource routing
- # table
- request_body.pop('name', None)
-
- request_body_policy_id = \
- request_body.get('qos_policy_id', None)
- if request_body_policy_id:
- request_body.pop('qos_policy_id')
-
- if request_body:
- try:
- b_client.update_ports(t_ctx, b_port_id, port)
- except q_cli_exceptions.NotFound:
- LOG.error(
- ('port: %(port_id)s not found, '
- 'region name: %(name)s'),
- {'port_id': b_port_id, 'name': b_region_name})
-
- if request_body.get('security_groups', None):
- self.xjob_handler.configure_security_group_rules(
- t_ctx, updated_port['tenant_id'])
-
- updated_port['qos_policy_id'] = request_body_policy_id
- if request_body_policy_id and \
- original_qos_policy_id != \
- request_body_policy_id:
- t_policy_id = updated_port['qos_policy_id']
- self.xjob_handler.create_qos_policy(
- t_ctx, t_ctx.project_id,
- t_policy_id, b_pod['pod_id'],
- t_constants.RT_PORT, b_port_id)
- else:
- self._filter_unsupported_attrs(request_body)
- updated_port = super(TricirclePlugin, self).update_port(
- context, port_id, port)
- self.extension_manager.process_update_port(
- context, request_body, updated_port)
- else:
- # for router interface, router gw, dhcp port, not directly
- # update bottom port
- updated_port = super(TricirclePlugin, self).update_port(
- context, port_id, port)
- self._log_update_port_sensitive_attrs(port_id, port)
- return updated_port
-
- def _pre_delete_port(self, context, port_id, port_check):
- """Do some preliminary operations before deleting the port."""
- LOG.debug("Deleting port %s", port_id)
- try:
- # notify interested parties of imminent port deletion;
- # a failure here prevents the operation from happening
- registry.publish(
- resources.PORT, events.BEFORE_DELETE, self,
- payload=events.DBEventPayload(
- context, metadata={'port_check': port_check},
- resource_id=port_id))
- except callbacks_exc.CallbackFailure as e:
- # NOTE(xiulin): preserve old check's behavior
- if len(e.errors) == 1:
- raise e.errors[0].error
- raise exceptions.ServicePortInUse(port_id=port_id, reason=e)
-
- def delete_port(self, context, port_id, l3_port_check=True):
- t_ctx = t_context.get_context_from_neutron_context(context)
- port = super(TricirclePlugin, self).get_port(context, port_id)
- # NOTE(zhiyuan) the deletion of non vm ports like router interfaces
- # and dhcp ports is handled by "setup_bottom_router" job, this job
- # will issue request to delete central ports, local ports and routing
- # entries, so here we just remove database records for central ports.
- # the deletion of vm ports is different since both users and nova are
- # involved. nova may delete vm ports via local neutron so local neutron
- # needs to send request to central neutron to delete the corresponding
- # central ports; users may delete a pre-created vm ports via central
- # neutron so central neutron needs to send request to local neutron to
- # delete the corresponding local ports. to avoid infinite api calls,
- # we use a "delete_server_port" job to delete the local ports.
- if port.get('device_owner') not in NON_VM_PORT_TYPES:
- self._pre_delete_port(context, port_id, False)
- try:
- # since we don't create resource routing entries for shadow
- # ports, we traverse pods where the network is located to
- # delete ports
- for pod, _id in self.helper.get_real_shadow_resource_iterator(
- t_ctx, t_constants.RT_NETWORK, port['network_id']):
- self.xjob_handler.delete_server_port(
- t_ctx, port['tenant_id'], port_id, pod['pod_id'])
- except Exception:
- raise
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id',
- 'comparator': 'eq',
- 'value': port_id}])
- super(TricirclePlugin, self).delete_port(context, port_id)
- q_ports.PortBinding.delete_objects(context, port_id=port_id)
-
- def _get_port_qos_info(self, context, port_id):
- policy = policy_object.QosPolicy.get_port_policy(context, port_id)
- return policy['id'] if policy else None
-
- def get_port(self, context, port_id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_id, t_constants.RT_PORT)
- if mappings:
- region_name = mappings[0][0]['region_name']
- bottom_port_id = mappings[0][1]
- port = self._get_client(region_name).get_ports(
- t_ctx, bottom_port_id)
- # TODO(zhiyuan) handle the case that bottom port does not exist
- port['id'] = port_id
- if fields:
- port = dict(
- [(k, v) for k, v in six.iteritems(port) if k in fields])
- if 'network_id' in port or 'fixed_ips' in port:
- bottom_top_map = {}
- with t_ctx.session.begin():
- for resource in (t_constants.RT_SUBNET,
- t_constants.RT_NETWORK,
- t_constants.RT_ROUTER):
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource}]
- routes = core.query_resource(
- t_ctx, models.ResourceRouting, route_filters, [])
- for route in routes:
- if route['bottom_id']:
- bottom_top_map[
- route['bottom_id']] = route['top_id']
- self._map_port_from_bottom_to_top(port, bottom_top_map)
- else:
- port = super(TricirclePlugin, self).get_port(context,
- port_id, fields)
-
- port['qos_policy_id'] = \
- self._get_port_qos_info(context, port_id)
- return port
-
- @staticmethod
- def _apply_ports_filters(query, model, filters):
- if not filters:
- return query
-
- fixed_ips = filters.pop('fixed_ips', {})
- ip_addresses = fixed_ips.get('ip_address')
- subnet_ids = fixed_ips.get('subnet_id')
- if ip_addresses or subnet_ids:
- query = query.join(models_v2.Port.fixed_ips)
- if ip_addresses:
- query = query.filter(
- models_v2.IPAllocation.ip_address.in_(ip_addresses))
- if subnet_ids:
- query = query.filter(
- models_v2.IPAllocation.subnet_id.in_(subnet_ids))
-
- for key, value in six.iteritems(filters):
- column = getattr(model, key, None)
- if column is not None:
- if not value:
- query = query.filter(sql.false())
- return query
- query = query.filter(column.in_(value))
- return query
-
- def _get_ports_from_db_with_number(self, context,
- number, last_port_id, top_bottom_map,
- filters=None):
- query = context.session.query(models_v2.Port)
- # set step as two times of number to have better chance to obtain all
- # ports we need
- search_step = number * 2
- if search_step < 100:
- search_step = 100
- query = self._apply_ports_filters(query, models_v2.Port, filters)
- query = sa_utils.paginate_query(
- query, models_v2.Port, search_step,
- # create a dummy port object
- marker=models_v2.Port(
- id=last_port_id) if last_port_id else None,
- sort_keys=['id'],
- sort_dirs=['desc-nullsfirst'])
- total = 0
- ret = []
- for port in query:
- total += 1
- if port['id'] not in top_bottom_map:
- ret.append(self._make_port_dict(port))
- if len(ret) == number:
- return ret
- # NOTE(zhiyuan) we have traversed all the ports
- if total < search_step:
- return ret
- else:
- ret.extend(self._get_ports_from_db_with_number(
- context, number - len(ret), ret[-1]['id'], top_bottom_map))
- return ret
-
- def _get_ports_from_top_with_number(self, context,
- number, last_port_id, top_bottom_map,
- filters=None):
- with context.session.begin():
- ret = self._get_ports_from_db_with_number(
- context, number, last_port_id, top_bottom_map, filters)
- return {'ports': ret}
-
- def _get_ports_from_top(self, context, top_bottom_map, filters=None):
- with context.session.begin():
- ret = []
- query = context.session.query(models_v2.Port)
- query = self._apply_ports_filters(query, models_v2.Port, filters)
- for port in query:
- if port['id'] not in top_bottom_map:
- ret.append(self._make_port_dict(port))
- return ret
-
- @staticmethod
- def _map_port_from_bottom_to_top(port, bottom_top_map):
- if 'network_id' in port and port['network_id'] in bottom_top_map:
- port['network_id'] = bottom_top_map[port['network_id']]
- if 'fixed_ips' in port:
- for ip in port['fixed_ips']:
- if ip['subnet_id'] in bottom_top_map:
- ip['subnet_id'] = bottom_top_map[ip['subnet_id']]
- if 'device_id' in port and port['device_id'] in bottom_top_map:
- port['device_id'] = bottom_top_map[port['device_id']]
-
- @staticmethod
- def _map_ports_from_bottom_to_top(ports, bottom_top_map):
- # TODO(zhiyuan) judge if it's fine to remove unmapped port
- port_list = []
- for port in ports:
- if port['id'] not in bottom_top_map:
- continue
- if port.get('device_owner') in NON_VM_PORT_TYPES:
- continue
- port['id'] = bottom_top_map[port['id']]
- TricirclePlugin._map_port_from_bottom_to_top(port, bottom_top_map)
- port_list.append(port)
- return port_list
-
- @staticmethod
- def _get_map_filter_ids(key, value, pod_id, top_bottom_map):
- if key in ('id', 'network_id', 'device_id'):
- id_list = []
- for _id in value:
- key = '%s_%s' % (pod_id, _id)
- if _id in top_bottom_map:
- id_list.append(top_bottom_map[_id])
- elif key in top_bottom_map:
- id_list.append(top_bottom_map[key])
- else:
- id_list.append(_id)
- return id_list
-
- @staticmethod
- def _filter_shadow_port(ports, pod_id, port_pod_map):
- port_list = []
- for port in ports:
- if pod_id not in port_pod_map[port['id']]:
- port_list.append(port)
- return port_list
-
- def _get_ports_from_pod_with_number(self, context,
- current_pod, number, last_port_id,
- bottom_top_map, top_bottom_map,
- port_pod_map, filters=None):
- # NOTE(zhiyuan) last_port_id is top id, also id in returned port dict
- # also uses top id. when interacting with bottom pod, need to map
- # top to bottom in request and map bottom to top in response
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- q_client = self._get_client(
- current_pod['region_name']).get_native_client('port', t_ctx)
- params = {'limit': number}
- if filters:
- _filters = dict(filters)
- for key, value in _filters:
- if key == 'fixed_ips':
- if 'ip_address' in value:
- _filters[key] = 'ip_address=%s' % value[
- 'ip_address'][0]
- continue
- id_list = self._get_map_filter_ids(
- key, value, current_pod['pod_id'], top_bottom_map)
- if id_list:
- _filters[key] = id_list
- params.update(_filters)
- if last_port_id:
- # map top id to bottom id in request
- params['marker'] = top_bottom_map[last_port_id]
- res = q_client.get(q_client.ports_path, params=params)
- ports = self._filter_shadow_port(res['ports'], current_pod['pod_id'],
- port_pod_map)
- # map bottom id to top id in client response
- mapped_port_list = self._map_ports_from_bottom_to_top(ports,
- bottom_top_map)
- del res['ports']
- res['ports'] = mapped_port_list
-
- if len(res['ports']) == number:
- return res
- else:
- next_pod = db_api.get_next_bottom_pod(
- t_ctx, current_pod_id=current_pod['pod_id'])
- if not next_pod:
- # _get_ports_from_top_with_number uses top id, no need to map
- next_res = self._get_ports_from_top_with_number(
- context, number - len(res['ports']), '', top_bottom_map,
- filters)
- next_res['ports'].extend(res['ports'])
- return next_res
- else:
- # _get_ports_from_pod_with_number itself returns top id, no
- # need to map
- next_res = self._get_ports_from_pod_with_number(
- context, next_pod, number - len(res['ports']), '',
- bottom_top_map, top_bottom_map, port_pod_map, filters)
- next_res['ports'].extend(res['ports'])
- return next_res
-
- def get_ports(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- # Directly sending list request with "id" filter to local Neutron
- # server will cause problems. Because when local Neutron server
- # receives list request with "id" filter, it will query central
- # Neutron server and try to create the port. Here we introduce a
- # special handle for "id" filter
-
- trunk_plugin = directory.get_plugin('trunk')
- if trunk_plugin:
- res = trunk_plugin.get_trunk_subports(context, filters)
- if res is not None:
- return res
-
- if not filters or 'id' not in filters:
- # if filter is empty or "id" is not in the filter, no special
- # handle is required
- ports = self._get_ports(context, filters, fields, sorts, limit,
- marker, page_reverse)
- for port in ports:
- port['qos_policy_id'] = \
- self._get_port_qos_info(context, port['id'])
-
- return ports
- if len(filters) == 1:
- # only "id" is in the filter, we use get_port to get all the ports
- ports = []
- for port_id in filters['id']:
- try:
- ports.append(self.get_port(context, port_id, fields))
- except exceptions.PortNotFound:
- continue
- return ports
- else:
- # other filters are also specified, we first get the ports with
- # other filters, then filter the ports again with "id"
- id_filters = filters.pop('id')
- ports = self._get_ports(context, filters, None, sorts, limit,
- marker, page_reverse)
- ports = [db_utils.resource_fields(
- p, fields) for p in ports if p['id'] in id_filters]
- for port in ports:
- port['qos_policy_id'] = \
- self._get_port_qos_info(context, port['id'])
-
- return ports
-
- def _get_ports(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- non_vm_ports = super(TricirclePlugin, self).get_ports(
- context, {'device_owner': NON_VM_PORT_TYPES}, ['id'])
- non_vm_port_ids = set([port['id'] for port in non_vm_ports])
-
- with t_ctx.session.begin():
- bottom_top_map = {}
- top_bottom_map = {}
- for resource in (t_constants.RT_PORT, t_constants.RT_SUBNET,
- t_constants.RT_NETWORK, t_constants.RT_ROUTER):
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource}]
- routes = core.query_resource(t_ctx, models.ResourceRouting,
- route_filters, [])
-
- for route in routes:
- if route['top_id'] in non_vm_port_ids:
- continue
- if route['bottom_id']:
- bottom_top_map[route['bottom_id']] = route['top_id']
- if route['resource_type'] == t_constants.RT_PORT:
- key = route['top_id']
- else:
- # for non port resource, one top resource is
- # possible to be mapped to more than one bottom
- # resource
- key = '%s_%s' % (route['pod_id'], route['top_id'])
- top_bottom_map[key] = route['bottom_id']
-
- port_pod_map = collections.defaultdict(set)
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': t_constants.RT_SD_PORT}]
- routes = core.query_resource(t_ctx, models.ResourceRouting,
- route_filters, [])
- for route in routes:
- if route['bottom_id']:
- port_pod_map[route['bottom_id']].add(route['pod_id'])
-
- if limit:
- if marker:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, marker, t_constants.RT_PORT)
- # NOTE(zhiyuan) if mapping exists, we retrieve port information
- # from bottom, otherwise from top
- if mappings:
- pod_id = mappings[0][0]['pod_id']
- current_pod = db_api.get_pod(t_ctx, pod_id)
- res = self._get_ports_from_pod_with_number(
- context, current_pod, limit, marker,
- bottom_top_map, top_bottom_map, port_pod_map, filters)
- else:
- res = self._get_ports_from_top_with_number(
- context, limit, marker, top_bottom_map, filters)
-
- else:
- current_pod = db_api.get_next_bottom_pod(t_ctx)
- # only top pod registered
- if current_pod:
- res = self._get_ports_from_pod_with_number(
- context, current_pod, limit, '',
- bottom_top_map, top_bottom_map, port_pod_map, filters)
- else:
- res = self._get_ports_from_top_with_number(
- context, limit, marker, top_bottom_map, filters)
-
- # NOTE(zhiyuan) we can safely return ports, neutron controller will
- # generate links for us so we do not need to worry about it.
- #
- # _get_ports_from_pod_with_number already traverses all the pods
- # to try to get ports equal to limit, so pod is transparent for
- # controller.
- return [db_utils.resource_fields(p, fields) for p in res['ports']]
- else:
- ret = []
- pods = db_api.list_pods(t_ctx)
- for pod in pods:
- if not pod['az_name']:
- continue
- _filters = []
- if filters:
- for key, value in six.iteritems(filters):
- if key == 'fixed_ips':
- if 'ip_address' in value:
- _filters.append(
- {'key': key, 'comparator': 'eq',
- 'value': 'ip_address=%s' % value[
- 'ip_address'][0]})
- continue
- id_list = self._get_map_filter_ids(
- key, value, pod['pod_id'], top_bottom_map)
- if id_list:
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': id_list})
- else:
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': value})
- client = self._get_client(pod['region_name'])
- ports = client.list_ports(t_ctx, filters=_filters)
- ret.extend(self._filter_shadow_port(ports, pod['pod_id'],
- port_pod_map))
- ret = self._map_ports_from_bottom_to_top(ret, bottom_top_map)
- ret.extend(self._get_ports_from_top(context, top_bottom_map,
- filters))
- return [db_utils.resource_fields(p, fields) for p in ret]
-
- def create_router(self, context, router):
- with context.session.begin(subtransactions=True):
- router_db = super(TricirclePlugin, self).create_router(
- context, router)
-
- return router_db
-
- def _delete_top_bridge_resource(self, t_ctx, q_ctx, resource_type,
- resource_id, resource_name):
- # first we update the routing entry to clear bottom_id and expire the
- # entry, if we succeed to delete the bridge resource next, we continue
- # to delete this expired entry; otherwise, we fail to delete the bridge
- # resource, then when the resource is accessed via lock_handle module,
- # that module will find the resource and update the entry
- with t_ctx.session.begin():
- core.update_resources(
- t_ctx, models.ResourceRouting,
- [{'key': 'bottom_id', 'comparator': 'eq',
- 'value': resource_id},
- {'key': 'top_id', 'comparator': 'eq',
- 'value': resource_name}],
- {'bottom_id': None,
- 'created_at': t_constants.expire_time,
- 'updated_at': t_constants.expire_time})
- if resource_type == t_constants.RT_PORT:
- getattr(super(TricirclePlugin, self), 'delete_%s' % resource_type)(
- q_ctx, resource_id)
- else:
- getattr(self, 'delete_%s' % resource_type)(q_ctx, resource_id)
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.ResourceRouting,
- [{'key': 'top_id',
- 'comparator': 'eq',
- 'value': resource_name}])
-
- def _delete_top_bridge_network_subnet(self, t_ctx, q_ctx):
- project_id = t_ctx.project_id
- bridge_subnet_name = t_constants.bridge_subnet_name % project_id
- bridge_subnets = super(TricirclePlugin, self).get_subnets(
- q_ctx, {'name': [bridge_subnet_name]})
- if bridge_subnets:
- self._delete_top_bridge_resource(
- t_ctx, q_ctx, t_constants.RT_SUBNET,
- bridge_subnets[0]['id'], bridge_subnet_name)
- bridge_net_name = t_constants.bridge_net_name % project_id
- bridge_nets = super(TricirclePlugin, self).get_networks(
- q_ctx, {'name': [bridge_net_name]})
- if bridge_nets:
- self._delete_top_bridge_resource(
- t_ctx, q_ctx, t_constants.RT_NETWORK, bridge_nets[0]['id'],
- bridge_net_name)
-
- def _delete_top_bridge_port(self, t_ctx, q_ctx, bridge_port_id,
- bridge_port_name):
- self._delete_top_bridge_resource(t_ctx, q_ctx, t_constants.RT_PORT,
- bridge_port_id, bridge_port_name)
-
- def _delete_shadow_bridge_port(self, t_ctx, bridge_port):
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, bridge_port['network_id'], t_constants.RT_NETWORK)
- for pod, _id in mappings:
- region_name = pod['region_name']
- self._get_client(region_name).delete_ports(t_ctx,
- bridge_port['id'])
-
- def delete_router(self, context, _id):
- router = super(TricirclePlugin,
- self)._ensure_router_not_in_use(context, _id)
- project_id = router['tenant_id']
- t_ctx = t_context.get_context_from_neutron_context(context)
- is_local_router = self.helper.is_local_router(t_ctx, router)
-
- mappings = [
- (m[0], m[1], False) for m in db_api.get_bottom_mappings_by_top_id(
- t_ctx, _id, t_constants.RT_ROUTER)]
- mappings.extend(
- [(m[0], m[1], True) for m in db_api.get_bottom_mappings_by_top_id(
- t_ctx, _id, t_constants.RT_NS_ROUTER)])
-
- for pod, b_router_id, is_ns in mappings:
- b_client = self._get_client(pod['region_name'])
- bridge_port_name = t_constants.bridge_port_name % (project_id,
- b_router_id)
- bridge_ports = super(TricirclePlugin, self).get_ports(
- context, {'name': [bridge_port_name]}, limit=1)
- if bridge_ports:
- # we will not create bridge ports for local router, so here no
- # need to check "is_local_router" again
- t_bridge_port = bridge_ports[0]
- t_bridge_port_id = t_bridge_port['id']
-
- if not is_ns:
- b_client.action_routers(t_ctx, 'remove_gateway',
- b_router_id)
- else:
- b_ns_port_id = t_bridge_port_id
- request_body = {'port_id': b_ns_port_id}
- try:
- b_client.action_routers(t_ctx, 'remove_interface',
- b_router_id, request_body)
- except Exception as e:
- if e.status_code == 404:
- # 404 error means that the router interface has
- # been already detached, skip this exception
- pass
- raise
-
- self._delete_shadow_bridge_port(t_ctx, t_bridge_port)
- self._delete_top_bridge_port(t_ctx, context, t_bridge_port_id,
- bridge_port_name)
- b_client.delete_routers(t_ctx, b_router_id)
- db_api.delete_mappings_by_bottom_id(t_ctx, b_router_id)
-
- if not is_local_router:
- routers = super(TricirclePlugin, self).get_routers(
- context, {'tenant_id': [project_id]})
- if len(routers) <= 1:
- self._delete_top_bridge_network_subnet(t_ctx, context)
-
- super(TricirclePlugin, self).delete_router(context, _id)
-
- def _prepare_top_element(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- return self.helper.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def _get_bridge_subnet_pool_id(self, t_ctx, q_ctx, project_id, pod):
- pool_name = t_constants.bridge_subnet_pool_name
- pool_cidr = cfg.CONF.client.bridge_cidr
- pool_ele = {'id': pool_name}
- body = {'subnetpool': {'tenant_id': project_id,
- 'name': pool_name,
- 'shared': True,
- 'is_default': False,
- 'prefixes': [pool_cidr]}}
-
- is_admin = q_ctx.is_admin
- q_ctx.is_admin = True
- _, pool_id = self._prepare_top_element(t_ctx, q_ctx, project_id, pod,
- pool_ele, 'subnetpool', body)
- q_ctx.is_admin = is_admin
-
- return pool_id
-
- def _get_bridge_network_subnet(self, t_ctx, q_ctx, project_id, pod,
- pool_id):
- net_name = t_constants.bridge_net_name % project_id
- net_ele = {'id': net_name}
- subnet_name = t_constants.bridge_subnet_name % project_id
- subnet_ele = {'id': subnet_name}
-
- is_admin = q_ctx.is_admin
- q_ctx.is_admin = True
-
- net_body = {'network': {
- 'tenant_id': project_id,
- 'name': net_name,
- 'shared': False,
- 'admin_state_up': True,
- provider_net.NETWORK_TYPE: cfg.CONF.tricircle.bridge_network_type}}
- _, net_id = self._prepare_top_element(
- t_ctx, q_ctx, project_id, pod, net_ele, 'network', net_body)
-
- subnet_body = {
- 'subnet': {
- 'network_id': net_id,
- 'name': subnet_name,
- 'prefixlen': 24,
- 'ip_version': 4,
- 'allocation_pools': constants.ATTR_NOT_SPECIFIED,
- 'dns_nameservers': constants.ATTR_NOT_SPECIFIED,
- 'host_routes': constants.ATTR_NOT_SPECIFIED,
- 'cidr': constants.ATTR_NOT_SPECIFIED,
- 'subnetpool_id': pool_id,
- 'enable_dhcp': False,
- 'tenant_id': project_id,
- 'ipv6_ra_mode': None,
- 'ipv6_address_mode': None
- }
- }
- _, subnet_id = self._prepare_top_element(
- t_ctx, q_ctx,
- project_id, pod, subnet_ele, 'subnet', subnet_body)
-
- q_ctx.is_admin = is_admin
-
- net = self.get_network(q_ctx, net_id)
- subnet = self.get_subnet(q_ctx, subnet_id)
-
- return net, subnet
-
- def _get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
- t_net_id, b_router_id, t_subnet=None):
- port_id = self.helper.get_bridge_interface(
- t_ctx, q_ctx, project_id, pod, t_net_id, b_router_id, t_subnet)
- return super(TricirclePlugin, self).get_port(q_ctx, port_id)
-
- def _get_bottom_bridge_elements(self, q_ctx, project_id,
- pod, t_net, is_external, t_subnet, t_port):
- t_ctx = t_context.get_context_from_neutron_context(q_ctx)
- return self.helper.get_bottom_bridge_elements(
- t_ctx, project_id, pod, t_net, is_external, t_subnet, t_port)
-
- def _get_net_id_by_interface_info(self, q_ctx, add_by_port,
- interface_info):
- if add_by_port:
- port = self.get_port(q_ctx, interface_info['port_id'])
- net_id = port['network_id']
- else:
- subnet = self.get_subnet(q_ctx, interface_info['subnet_id'])
- net_id = subnet['network_id']
- return net_id
-
- def _get_subnet_id_by_interface_info(self, q_ctx, add_by_port,
- interface_info):
- if add_by_port:
- port = self.get_port(q_ctx, interface_info['port_id'])
- # here we assume the port has an IP
- return port['fixed_ips'][0]['subnet_id']
- else:
- return interface_info['subnet_id']
-
- def _get_net_pods_by_interface_info(self, t_ctx, q_ctx, add_by_port,
- interface_info):
- net_id = self._get_net_id_by_interface_info(q_ctx, add_by_port,
- interface_info)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, net_id, t_constants.RT_NETWORK)
- return net_id, [mapping[0] for mapping in mappings]
-
- # NOTE(zhiyuan) the origin implementation in l3_db uses port returned from
- # get_port in core plugin to check, change it to base plugin, since only
- # top port information should be checked.
- def _check_router_port(self, context, port_id, device_id):
- port = super(TricirclePlugin, self).get_port(context, port_id)
- if port['device_id'] != device_id:
- raise exceptions.PortInUse(net_id=port['network_id'],
- port_id=port['id'],
- device_id=port['device_id'])
- if not port['fixed_ips']:
- msg = _('Router port must have at least one fixed IP')
- raise exceptions.BadRequest(resource='router', msg=msg)
- return port
-
- def _add_router_gateway(self, context, router_id, router_data):
- # get top external network information
- ext_net_id = router_data[l3_apidef.EXTERNAL_GW_INFO].get('network_id')
- t_ctx = t_context.get_context_from_neutron_context(context)
- network = self.get_network(context, ext_net_id)
-
- # when creating external network in top pod, pod name is passed via
- # az hint parameter, so tricircle plugin knows where to create the
- # corresponding bottom external network. here we get bottom external
- # network ID from resource routing table.
- if not network.get(az_def.AZ_HINTS):
- raise t_exceptions.ExternalNetPodNotSpecify()
-
- t_router = self._get_router(context, router_id)
- self.validate_router_net_location_match(t_ctx, t_router, network)
- is_local_router = self.helper.is_local_router(t_ctx, t_router)
-
- region_name = network[az_def.AZ_HINTS][0]
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- b_net_id = db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, ext_net_id, region_name, t_constants.RT_NETWORK)
-
- # create corresponding bottom router in the pod where external network
- # is located.
- if is_local_router:
- # if router is a local router, we will use RT_ROUTER to attach
- # external network, else use RT_NS_ROUTER
- router_type = t_constants.RT_ROUTER
- is_distributed = t_router.get('distributed', False)
- body = {'router': {'name': t_router['id'],
- 'distributed': is_distributed}}
-
- else:
- router_type = t_constants.RT_NS_ROUTER
- body = {'router': {'name': t_constants.ns_router_name % router_id,
- 'distributed': False}}
-
- _, b_router_id = self.helper.prepare_bottom_element(
- t_ctx, t_router['tenant_id'], pod, t_router, router_type, body)
-
- # both router and external network in bottom pod are ready, attach
- # external network to router in bottom pod.
- b_client = self._get_client(region_name)
- t_info = router_data[l3_apidef.EXTERNAL_GW_INFO]
- b_info = {'network_id': b_net_id}
- if 'enable_snat' in t_info:
- b_info['enable_snat'] = t_info['enable_snat']
- if 'external_fixed_ips' in t_info:
- fixed_ips = []
- for ip in t_info['external_fixed_ips']:
- t_subnet_id = ip['subnet_id']
- b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, t_subnet_id, region_name,
- t_constants.RT_SUBNET)
- fixed_ips.append({'subnet_id': b_subnet_id,
- 'ip_address': ip['ip_address']})
- b_info['external_fixed_ips'] = fixed_ips
- b_client.action_routers(t_ctx, 'add_gateway', b_router_id, b_info)
-
- if is_local_router:
- return
-
- # when internal network(providing fixed ip) and external network
- # (providing floating ip) are in different bottom pods, we utilize a
- # bridge network to connect these two networks. here we create the
- # bridge network.
- t_pod = db_api.get_top_pod(t_ctx)
- project_id = t_router['tenant_id']
- pool_id = self._get_bridge_subnet_pool_id(t_ctx, context, None, t_pod)
- t_bridge_net, t_bridge_subnet = self._get_bridge_network_subnet(
- t_ctx, context, project_id, t_pod, pool_id)
- (_, _, b_bridge_subnet_id,
- b_bridge_net_id) = self._get_bottom_bridge_elements(
- context, project_id, pod, t_bridge_net, True, t_bridge_subnet,
- None)
-
- # here we attach the bridge network to the router in bottom pod. to
- # make this method reentrant, we check if the interface is already
- # attached before attaching the interface.
- def _is_bridge_network_attached():
- interfaces = b_client.list_ports(t_ctx,
- filters=[{'key': 'device_id',
- 'comparator': 'eq',
- 'value': b_router_id}])
- for interface in interfaces:
- for fixed_ip in interface['fixed_ips']:
- if fixed_ip['subnet_id'] == b_bridge_subnet_id:
- return True
- return False
-
- is_attach = _is_bridge_network_attached()
- if not is_attach:
- # though no need to explicitly create the top bridge port since the
- # ip reserved for router interface will be used, we still create it
- # for shadow port creation purpose
- self._get_bridge_interface(
- t_ctx, context, project_id, t_pod, t_bridge_net['id'],
- b_router_id, t_bridge_subnet)
- b_client.action_routers(t_ctx, 'add_interface', b_router_id,
- {'subnet_id': b_bridge_subnet_id})
-
- def _remove_router_gateway(self, context, router_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_router = self._get_router(context, router_id)
- gw_port = t_router.gw_port
- if not gw_port:
- return
- ext_net_id = gw_port['network_id']
- t_network = self.get_network(context, ext_net_id)
- if az_def.AZ_HINTS not in t_network:
- raise t_exceptions.ExternalNetPodNotSpecify()
- if not t_network[az_def.AZ_HINTS]:
- raise t_exceptions.ExternalNetPodNotSpecify()
-
- region_name = t_network[az_def.AZ_HINTS][0]
- is_local_router = self.helper.is_local_router(t_ctx, t_router)
-
- b_router_id = db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, router_id, region_name,
- t_constants.RT_ROUTER if is_local_router
- else t_constants.RT_NS_ROUTER)
-
- if not b_router_id:
- # local router doesn't exist, skip local gateway deletion
- return
-
- b_client = self._get_client(region_name)
- b_client.action_routers(t_ctx, 'remove_gateway', b_router_id)
-
- def update_router(self, context, router_id, router):
- # TODO(zhiyuan) handle the case that SNAT is disabled
- # and check if bridge network solution works with IPv6
- router_data = copy.deepcopy(router['router'])
- need_update_bottom = False
- is_add = False
- if validators.is_attr_set(router_data.get(l3_apidef.EXTERNAL_GW_INFO)):
- need_update_bottom = True
- ext_net_id = router_data[l3_apidef.EXTERNAL_GW_INFO].get(
- 'network_id')
- if ext_net_id:
- is_add = True
- if not need_update_bottom:
- return super(TricirclePlugin, self).update_router(
- context, router_id, router)
- if is_add:
- ret = super(TricirclePlugin, self).update_router(
- context, router_id, router)
- router_data[l3_apidef.EXTERNAL_GW_INFO].update(
- ret[l3_apidef.EXTERNAL_GW_INFO])
- self._add_router_gateway(context, router_id, router_data)
- else:
- self._remove_router_gateway(context, router_id)
- ret = super(TricirclePlugin, self).update_router(
- context, router_id, router)
- t_ctx = t_context.get_context_from_neutron_context(context)
- is_local_router = self.helper.is_local_router(t_ctx, router)
- if not is_local_router:
- self.xjob_handler.configure_route(
- t_ctx, ret['tenant_id'], router_id)
- return ret
-
- def validate_router_net_location_match(self, t_ctx, router, net):
- is_local_router = self.helper.is_local_router(t_ctx, router)
- router_az_hints = self.helper.get_router_az_hints(router)
- if not router_az_hints:
- return
- router_region_names = self._convert_az2region(t_ctx, router_az_hints)
- router_region_set = set(router_region_names)
-
- net_az_hints = net.get(az_def.AZ_HINTS)
- if not net_az_hints:
- if is_local_router:
- # network az hints parameter is not specified, meaning that
- # this network can be located in any pod, such network is
- # allowed to be attached to a local router, for supporting
- # multi-gateway l3 mode
- return
- raise t_exceptions.RouterNetworkLocationMismatch(
- router_az_hints=router_region_names,
- net_az_hints=['All Region'])
-
- # net_az_hints already convert to region name when user is admin
- if t_ctx.is_admin:
- net_region_names = net_az_hints
- else:
- net_region_names = self._convert_az2region(t_ctx, net_az_hints)
- net_region_set = set(net_region_names)
-
- if is_local_router:
- if router_region_set <= net_region_set:
- # pods that this network can be located include the pod of the
- # local router, this attachment is allowed, for supporting
- # multi-gateway l3 mode
- return
- raise t_exceptions.RouterNetworkLocationMismatch(
- router_az_hints=router_region_names,
- net_az_hints=net_region_names)
-
- diff = net_region_set - router_region_set
- if diff:
- raise t_exceptions.RouterNetworkLocationMismatch(
- router_az_hints=router_region_names,
- net_az_hints=net_region_names)
-
- def add_router_interface(self, context, router_id, interface_info):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- router = self._get_router(context, router_id)
- project_id = router['tenant_id']
- add_by_port, _ = self._validate_interface_info(interface_info)
- net_id = self._get_net_id_by_interface_info(
- context, add_by_port, interface_info)
- subnet_id = self._get_subnet_id_by_interface_info(
- context, add_by_port, interface_info)
- net = self.get_network(context, net_id)
- subnet = self.get_subnet(context, subnet_id)
- self.validate_router_net_location_match(t_ctx, router, net)
- is_local_router = self.helper.is_local_router(t_ctx, router)
-
- if is_local_router:
- other_infs = super(TricirclePlugin, self).get_ports(
- context, filters={
- 'network_id': [net_id],
- 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]})
- for other_inf in other_infs:
- if not other_inf['device_id']:
- continue
- other_ip = other_inf['fixed_ips'][0]['ip_address']
- other_router = super(
- TricirclePlugin, self).get_router(context,
- other_inf['device_id'])
- if not self.helper.is_local_router(t_ctx, other_router) and (
- other_ip == subnet['gateway_ip']):
- # this network has already been attached to a non-local
- # router and the gateway port is on that router, in this
- # case, we don't allow this network to be attached other
- # local routers
- raise t_network_exc.NetAttachedToNonLocalRouter(
- network_id=net_id, router_id=other_inf['device_id'])
-
- t_pod = db_api.get_top_pod(t_ctx)
- assert t_pod
-
- if not is_local_router:
- # for single external network, need bridge network for
- # E-W and N-S networking
- pool_id = self._get_bridge_subnet_pool_id(
- t_ctx, context, None, t_pod)
- self._get_bridge_network_subnet(
- t_ctx, context, project_id, t_pod, pool_id)
-
- if is_local_router:
- if self.helper.is_local_network(t_ctx, net):
- router_region = self.helper.get_router_az_hints(router)[0]
- b_client = self._get_client(router_region)
- b_pod = db_api.get_pod_by_name(t_ctx, router_region)
- # get bottom network will create bottom network and subnet
- b_client.get_networks(t_ctx, net_id)
- # create resource mapping so job will be triggered
- db_api.create_resource_mapping(
- t_ctx, net_id, net_id, b_pod['pod_id'], net['project_id'],
- t_constants.RT_NETWORK)
- db_api.create_resource_mapping(
- t_ctx, subnet_id, subnet_id, b_pod['pod_id'],
- subnet['project_id'], t_constants.RT_SUBNET)
-
- return_info = super(TricirclePlugin, self).add_router_interface(
- context, router_id, interface_info)
-
- _, b_pods = self._get_net_pods_by_interface_info(
- t_ctx, context, add_by_port, interface_info)
- if not b_pods:
- LOG.debug('Add router interface: no interfaces found, xjob not'
- 'triggered')
- return return_info
- try:
- if len(b_pods) == 1:
- self.xjob_handler.setup_bottom_router(
- t_ctx, project_id, net_id, router_id, b_pods[0]['pod_id'])
- else:
- self.xjob_handler.setup_bottom_router(
- t_ctx, project_id, net_id, router_id,
- t_constants.POD_NOT_SPECIFIED)
- except Exception:
- # NOTE(zhiyuan) we fail to submit the job, so bottom router
- # operations are not started, it's safe for us to remove the top
- # router interface
- super(TricirclePlugin, self).remove_router_interface(
- context, router_id, interface_info)
- raise
- return return_info
-
- def create_floatingip(self, context, floatingip):
- fip = None
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- fip = super(TricirclePlugin, self).create_floatingip(
- context, floatingip,
- initial_status=constants.FLOATINGIP_STATUS_DOWN)
- if fip['router_id'] and fip['port_id']:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, fip['port_id'], t_constants.RT_PORT)
- if not mappings:
- # mapping does not exist, meaning that the bottom port has
- # not been created, we just return and defer the work to
- # setup bottom floating ip until vm creation
- return fip
-
- int_net_pod, b_int_port_id = mappings[0]
- int_port = self.get_port(context, fip['port_id'])
- net_id = int_port['network_id']
- router = self._get_router(context, fip['router_id'])
- self.xjob_handler.setup_bottom_router(
- t_ctx, router['tenant_id'], net_id,
- fip['router_id'], int_net_pod['pod_id'])
- return fip
- except Exception:
- if fip:
- # if we fail to register the job, delete the fip
- super(TricirclePlugin, self).delete_floatingip(context,
- fip['id'])
- raise
-
- def remove_router_interface(self, context, router_id, interface_info):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- add_by_port, _ = self._validate_interface_info(interface_info,
- for_removal=True)
- net_id, b_pods = self._get_net_pods_by_interface_info(
- t_ctx, context, add_by_port, interface_info)
-
- return_info = super(TricirclePlugin, self).remove_router_interface(
- context, router_id, interface_info)
-
- router = self._get_router(context, router_id)
-
- if not b_pods:
- return return_info
- try:
- if len(b_pods) == 1:
- self.xjob_handler.setup_bottom_router(
- t_ctx, router['tenant_id'], net_id,
- router_id, b_pods[0]['pod_id'])
- else:
- self.xjob_handler.setup_bottom_router(
- t_ctx, router['tenant_id'], net_id,
- router_id, t_constants.POD_NOT_SPECIFIED)
- except Exception:
- # NOTE(zhiyuan) we fail to submit the job, so if bottom router
- # interface exists, it would not be deleted, then after we add
- # the top interface again, the bottom router setup job will reuse
- # the existing bottom interface.
- #
- # we don't create a routing entry between top interface and bottom
- # interface, instead, when we create bottom subnet, we specify the
- # ip of the top interface as the gateway ip of the bottom subnet.
- # later when we attach the bottom subnet to bottom router, neutron
- # server in bottom pod will create the bottom interface using the
- # gateway ip automatically.
- interface_info = {'subnet_id': return_info['subnet_id']}
- super(TricirclePlugin, self).add_router_interface(
- context, router_id, interface_info)
- raise
- return return_info
-
- @staticmethod
- def _rollback_floatingip_data(context, _id, org_data):
- """Rollback the data of floating ip object to the original one
-
- :param context: request context
- :param _id: ID of the floating ip
- :param org_data: data of floating ip we rollback to
- :return: None
- """
- try:
- router_object.FloatingIP.update_objects(context, org_data, id=_id)
- except Exception as e:
- # log the exception and re-raise it
- LOG.exception('Fail to rollback floating ip data, reason: '
- '%(reason)s' % {'reason': e.message})
- raise
-
- def update_floatingip(self, context, _id, floatingip):
- """Update floating ip object in top and bottom pods
-
- :param context: request context
- :param _id: ID of the floating ip
- :param floatingip: data of floating ip we update to
- :return: updated floating ip object
- """
- org_floatingip_dict = self._make_floatingip_dict(
- self._get_floatingip(context, _id))
-
- res = super(TricirclePlugin, self).update_floatingip(
- context, _id, floatingip)
- try:
- if floatingip['floatingip']['port_id']:
- self._associate_floatingip(context, _id, floatingip)
- else:
- self._disassociate_floatingip(context, org_floatingip_dict)
- return res
- except Exception as e:
- # NOTE(zhiyuan) when exception occurs, we update floating ip object
- # to rollback fixed_port_id, fixed_ip_address, router_id
- LOG.exception(
- 'Fail to update floating ip, reason: '
- '%(reason)s, rollback floating ip data' %
- {'reason': e.message})
- org_data = {
- 'fixed_port_id': org_floatingip_dict['port_id'],
- 'fixed_ip_address': org_floatingip_dict['fixed_ip_address'],
- 'router_id': org_floatingip_dict['router_id']}
- self._rollback_floatingip_data(context, _id, org_data)
- raise
-
- def _associate_floatingip(self, context, _id, floatingip):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- fip = floatingip['floatingip']
- floatingip_db = self._get_floatingip(context, _id)
- int_port_id = fip['port_id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, int_port_id, t_constants.RT_PORT)
- if not mappings:
- # mapping does not exist, meaning that the bottom port has not
- # been created, we just return and defer the work to setup bottom
- # floating ip until vm creation
- return
-
- int_net_pod, b_int_port_id = mappings[0]
- int_port = self.get_port(context, int_port_id)
- net_id = int_port['network_id']
- router_id = floatingip_db['router_id']
- router = self._get_router(context, router_id)
- self.xjob_handler.setup_bottom_router(
- t_ctx, router['tenant_id'], net_id,
- floatingip_db['router_id'], int_net_pod['pod_id'])
-
- def _disassociate_floatingip(self, context, ori_floatingip_db):
- if not ori_floatingip_db['port_id']:
- # floating ip has not been associated with fixed ip, no
- # operation in bottom pod needed
- return
-
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- t_int_port_id = ori_floatingip_db['port_id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_int_port_id, t_constants.RT_PORT)
- if not mappings:
- # floating ip in top pod is associated but no mapping between
- # top and bottom internal port, this is an inconsistent state,
- # but since bottom internal port does not exist, no operation
- # in bottom pod is required
- LOG.warning('Internal port associated with floating ip '
- 'does not exist in bottom pod.')
- return
-
- b_int_net_pod, b_int_port_id = mappings[0]
- int_port = self.get_port(context, t_int_port_id)
- net_id = int_port['network_id']
- router_id = ori_floatingip_db['router_id']
- router = self._get_router(context, router_id)
- self.xjob_handler.setup_bottom_router(
- t_ctx, router['tenant_id'], net_id,
- ori_floatingip_db['router_id'], b_int_net_pod['pod_id'])
-
- def delete_floatingip(self, context, _id):
- """Disassociate floating ip if needed then delete it
-
- :param context: request context
- :param _id: ID of the floating ip
- :return: None
- """
- self.update_floatingip(context, _id, {'floatingip': {'port_id': None}})
- super(TricirclePlugin, self).delete_floatingip(context, _id)
diff --git a/tricircle/network/central_qos_plugin.py b/tricircle/network/central_qos_plugin.py
deleted file mode 100644
index 2ad9cc9c..00000000
--- a/tricircle/network/central_qos_plugin.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2017 Hunan University.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from neutron.services.qos import qos_plugin
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.objects import registry as obj_reg
-from oslo_log import log
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.db.api as db_api
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleQosPlugin(qos_plugin.QoSPlugin):
-
- def __init__(self):
- super(TricircleQosPlugin, self).__init__()
- self.clients = {'top': t_client.Client()}
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- def _get_ports_with_policy(self, context, policy):
- networks_ids = policy.get_bound_networks()
-
- ports_with_net_policy = obj_reg.load_class('Port').get_objects(
- context, network_id=networks_ids)
-
- # Filter only these ports which don't have overwritten policy
- ports_with_net_policy = [
- port for port in ports_with_net_policy if
- port.qos_policy_id is None
- ]
-
- ports_ids = policy.get_bound_ports()
- ports_with_policy = obj_reg.load_class('Port').get_objects(
- context, id=ports_ids)
- t_ports = list(set(ports_with_policy + ports_with_net_policy))
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- for t_port in t_ports:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_port.id, t_constants.RT_PORT)
- if mappings:
- b_pod, b_port_id = mappings[0]
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_port = b_client.get_ports(t_ctx, b_port_id)
- new_binding = obj_reg.new_instance(
- 'PortBinding',
- port_id=t_port.id,
- vif_type=b_port.get('binding:vif_type',
- portbindings.VIF_TYPE_UNBOUND),
- vnic_type=b_port.get('binding:vnic_type',
- portbindings.VNIC_NORMAL)
- )
- t_port.binding = new_binding
- else:
- new_binding = obj_reg.new_instance(
- 'PortBinding',
- port_id=t_port.id,
- vif_type=portbindings.VIF_TYPE_UNBOUND,
- vnic_type=portbindings.VNIC_NORMAL
- )
- t_port.binding = new_binding
-
- return t_ports
diff --git a/tricircle/network/central_sfc_driver.py b/tricircle/network/central_sfc_driver.py
deleted file mode 100644
index 133eaec4..00000000
--- a/tricircle/network/central_sfc_driver.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-
-from networking_sfc.db import sfc_db
-from networking_sfc.services.sfc.drivers import base as sfc_driver
-
-from oslo_log import log
-
-from neutron_lib.db import model_query
-from neutron_lib.plugins import directory
-from neutronclient.common import exceptions as client_exceptions
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.network import central_plugin
-import tricircle.network.exceptions as n_exceptions
-
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleSfcDriver(sfc_driver.SfcDriverBase):
-
- def __init__(self):
- self.xjob_handler = xrpcapi.XJobAPI()
- self.clients = {}
-
- def initialize(self):
- pass
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- def _get_net_id_by_port_id(self, context, port_id):
- core_plugin = directory.get_plugin()
- port = super(central_plugin.TricirclePlugin, core_plugin
- ).get_port(context, port_id)
- if not port:
- raise n_exceptions.PortNotFound(port_id=port_id)
- return port['network_id']
-
- def _get_net_id_by_portpairgroups(self, context,
- sfc_plugin, port_pair_groups):
- if not port_pair_groups:
- return None
- port_pairs = sfc_plugin.get_port_pairs(
- context, {'portpairgroup_id': port_pair_groups})
- if not port_pairs:
- return None
- return self._get_net_id_by_port_id(context, port_pairs[0]['ingress'])
-
- @log_helpers.log_method_call
- def create_port_chain(self, context):
- pass
-
- @log_helpers.log_method_call
- def create_port_chain_precommit(self, context):
- plugin_context = context._plugin_context
- t_ctx = t_context.get_context_from_neutron_context(plugin_context)
- port_chain = context.current
- net_id = self._get_net_id_by_portpairgroups(
- plugin_context, context._plugin, port_chain['port_pair_groups'])
- if net_id:
- self.xjob_handler.sync_service_function_chain(
- t_ctx, port_chain['project_id'], port_chain['id'], net_id,
- t_constants.POD_NOT_SPECIFIED)
-
- @log_helpers.log_method_call
- def delete_port_chain(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portchain_id = context.current['id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, portchain_id, t_constants.RT_PORT_CHAIN)
- for b_pod, b_porchain_id in mappings:
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- try:
- b_client.delete_port_chains(t_ctx, b_porchain_id)
- except client_exceptions.NotFound:
- LOG.debug(('port chain: %(portchain_id)s not found, '
- 'region name: %(name)s'),
- {'portchain_id': portchain_id,
- 'name': b_region_name})
- db_api.delete_mappings_by_bottom_id(t_ctx, b_porchain_id)
-
- @log_helpers.log_method_call
- def delete_port_chain_precommit(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portchain_id = context.current['id']
- db_api.create_recycle_resource(
- t_ctx, portchain_id, t_constants.RT_PORT_CHAIN,
- t_ctx.project_id)
-
- @log_helpers.log_method_call
- def delete_port_pair_group(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portpairgroup_id = context.current['id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, portpairgroup_id, t_constants.RT_PORT_PAIR_GROUP)
- for b_pod, b_portpairgroup_id in mappings:
- b_region_name = b_pod['region_name']
- b_client = self._get_client(b_region_name)
-
- try:
- b_client.delete_port_pair_groups(t_ctx, b_portpairgroup_id)
- except client_exceptions.NotFound:
- LOG.debug(('port pair group: %(portpairgroup_id)s not found, '
- 'region name: %(name)s'),
- {'portpairgroup_id': portpairgroup_id,
- 'name': b_region_name})
- db_api.delete_mappings_by_bottom_id(t_ctx, b_portpairgroup_id)
-
- def delete_port_pair_group_precommit(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portpairgroup_id = context.current['id']
- db_api.create_recycle_resource(
- t_ctx, portpairgroup_id, t_constants.RT_PORT_PAIR_GROUP,
- t_ctx.project_id)
-
- @log_helpers.log_method_call
- def delete_port_pair(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portpair_id = context.current['id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, portpair_id, t_constants.RT_PORT_PAIR)
- for b_pod, b_portpair_id in mappings:
- b_region_name = b_pod['region_name']
- b_client = self._get_client(b_region_name)
- try:
- b_client.delete_port_pairs(t_ctx, b_portpair_id)
- except client_exceptions.NotFound:
- LOG.debug(('port pair: %(portpair_id)s not found, '
- 'region name: %(name)s'),
- {'portpair_id': portpair_id, 'name': b_region_name})
- db_api.delete_mappings_by_bottom_id(t_ctx, b_portpair_id)
-
- def delete_port_pair_precommit(self, context):
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- portpair_id = context.current['id']
- db_api.create_recycle_resource(
- t_ctx, portpair_id, t_constants.RT_PORT_PAIR,
- t_ctx.project_id)
-
- def update_port_chain_precommit(self, context):
- plugin_context = context._plugin_context
- t_ctx = t_context.get_context_from_neutron_context(plugin_context)
- port_chain = context.current
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_chain['id'], t_constants.RT_PORT_CHAIN)
- if mappings:
- net_id = self._get_net_id_by_portpairgroups(
- plugin_context, context._plugin,
- port_chain['port_pair_groups'])
- if not net_id:
- return
- self.xjob_handler.sync_service_function_chain(
- t_ctx, port_chain['project_id'], port_chain['id'],
- net_id, t_constants.POD_NOT_SPECIFIED)
-
- def _get_chain_id_by_group_id(self, context, sfc_plugin, portpairgroup_id):
- chain_group_assoc = model_query.query_with_hooks(
- context, sfc_db.ChainGroupAssoc).filter_by(
- portpairgroup_id=portpairgroup_id).first()
- if chain_group_assoc:
- return chain_group_assoc['portchain_id']
- return None
-
- def update_port_pair_group_precommit(self, context):
- plugin_context = context._plugin_context
- t_ctx = t_context.get_context_from_neutron_context(
- context._plugin_context)
- port_pair_group = context.current
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_pair_group['id'], t_constants.RT_PORT_PAIR_GROUP)
- if mappings:
- portchain_id = self._get_chain_id_by_group_id(
- plugin_context, context._plugin, port_pair_group['id'])
- if port_pair_group['port_pairs']:
- net_id = self._get_net_id_by_portpairgroups(
- plugin_context, context._plugin, [port_pair_group['id']])
- elif context.original['port_pairs']:
- portpair_id = context.original['port_pairs'][0]
- port_pair = context._plugin._get_port_pair(
- plugin_context, portpair_id)
- net_id = self._get_net_id_by_port_id(
- plugin_context, port_pair['ingress'])
- else:
- net_id = ''
- if not portchain_id and not net_id:
- return
- self.xjob_handler.sync_service_function_chain(
- t_ctx, port_pair_group['project_id'], portchain_id, net_id,
- t_constants.POD_NOT_SPECIFIED)
-
- def _get_chain_id_by_pair_id(self, context, sfc_plugin, portpair_id):
- port_pair = sfc_plugin._get_port_pair(context, portpair_id)
- if not port_pair:
- raise n_exceptions.PortPairNotFound(portpair_id=portpair_id)
- return self._get_chain_id_by_group_id(
- context, sfc_plugin, port_pair['portpairgroup_id'])
-
- def update_port_pair_precommit(self, context):
- plugin_context = context._plugin_context
- t_ctx = t_context.get_context_from_neutron_context(plugin_context)
- port_pair = context.current
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_pair['id'], t_constants.RT_PORT_PAIR)
- if mappings:
- portchain_id = self._get_chain_id_by_pair_id(
- plugin_context, context._plugin, port_pair['id'])
- net_id = self._get_net_id_by_port_id(
- plugin_context, port_pair['ingress'])
- if not portchain_id or not net_id:
- return
- self.xjob_handler.sync_service_function_chain(
- t_ctx, port_pair['project_id'], portchain_id,
- net_id, t_constants.POD_NOT_SPECIFIED)
-
- @log_helpers.log_method_call
- def update_port_chain(self, context):
- pass
-
- @log_helpers.log_method_call
- def create_port_pair_group(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_port_pair_group(self, context):
- pass
-
- @log_helpers.log_method_call
- def create_port_pair(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_port_pair(self, context):
- pass
diff --git a/tricircle/network/central_sfc_plugin.py b/tricircle/network/central_sfc_plugin.py
deleted file mode 100644
index 7aceecdd..00000000
--- a/tricircle/network/central_sfc_plugin.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_log import log
-
-from networking_sfc.extensions import sfc as sfc_ext
-from networking_sfc.services.sfc import plugin as sfc_plugin
-from neutron_lib import exceptions as n_exc
-from neutron_lib.plugins import directory
-
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleSfcPlugin(sfc_plugin.SfcPlugin):
-
- def __init__(self):
- super(TricircleSfcPlugin, self).__init__()
-
- # TODO(xiulin): Tricircle's top region can not get port's
- # binding information well now, so override this function,
- # we will improve this later.
- def _get_port(self, context, id):
- core_plugin = directory.get_plugin()
- try:
- return core_plugin.get_port(context, id)
- except n_exc.PortNotFound:
- raise sfc_ext.PortPairPortNotFound(id=id)
diff --git a/tricircle/network/central_trunk_driver.py b/tricircle/network/central_trunk_driver.py
deleted file mode 100644
index 7a369727..00000000
--- a/tricircle/network/central_trunk_driver.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import six
-
-from oslo_log import log
-
-from neutron.services.trunk import exceptions as trunk_exc
-from neutron.services.trunk import plugin as trunk_plugin
-from neutron_lib.db import utils as db_utils
-from neutron_lib.plugins import directory
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.network import central_plugin
-from tricircle.network import helper
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleTrunkDriver(trunk_plugin.TrunkPlugin):
-
- def __init__(self):
- super(TricircleTrunkDriver, self).__init__()
- self.xjob_handler = xrpcapi.XJobAPI()
- self.helper = helper.NetworkHelper(self)
- self.clients = {}
-
- def is_rpc_enabled(self):
- return False
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- @property
- def registered_drivers(self):
- return super(TricircleTrunkDriver, self).registered_drivers()
-
- @property
- def supported_interfaces(self):
- """A set of supported interfaces."""
- return super(TricircleTrunkDriver, self).supported_interfaces()
-
- @property
- def supported_agent_types(self):
- """A set of supported agent types."""
- return super(TricircleTrunkDriver, self).supported_agent_types()
-
- def update_trunk(self, context, trunk_id, trunk):
- # update trunk
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- res = super(TricircleTrunkDriver, self).update_trunk(
- context, trunk_id, trunk)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
- trunk_id, b_pod['pod_id'])
- return res
-
- def delete_trunk(self, context, trunk_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- res = super(TricircleTrunkDriver, self).get_trunk(
- context, trunk_id)
- with context.session.begin():
- super(TricircleTrunkDriver, self).delete_trunk(
- context, trunk_id)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
- trunk_id, b_pod['pod_id'])
-
- def get_trunk(self, context, trunk_id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_trunk = super(TricircleTrunkDriver, self).get_trunk(
- context, trunk_id, fields)
- if not fields or 'status' in fields:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod, b_trunk_id = mappings[0]
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_trunk = b_client.get_trunks(t_ctx, b_trunk_id)
- if not b_trunk:
- LOG.error('trunk: %(trunk_id)s not found '
- 'pod name: %(name)s',
- {'trunk_id': b_trunk_id, 'name': b_region_name})
- else:
- t_trunk['status'] = b_trunk['status']
-
- return t_trunk
-
- def _get_trunks_from_top(self, context, top_bottom_map, filters):
- top_trunks = super(TricircleTrunkDriver, self).get_trunks(
- context, filters)
- return [trunk for trunk in top_trunks if
- trunk['id'] not in top_bottom_map]
-
- def _get_min_search_step(self):
- # this method is for unit test mock purpose
- return 100
-
- def _get_trunks_from_top_with_limit(self, context, top_bottom_map,
- filters, limit, marker):
-
- ret = []
- total = 0
- # set step as two times of number to have better chance to obtain all
- # trunks we need
- search_step = limit * 2
- min_search_step = self._get_min_search_step()
- if search_step < min_search_step:
- search_step = min_search_step
- # None means sort direction is desc
- sorts = [('id', None)]
- top_trunks = super(TricircleTrunkDriver, self).get_trunks(
- context, filters, sorts=sorts, limit=search_step, marker=marker)
-
- for trunk in top_trunks:
- total += 1
- if trunk['id'] not in top_bottom_map:
- ret.append(trunk)
- if len(ret) == limit:
- return ret
- # NOTE(xiulin) we have traversed all the trunks
- if total < search_step:
- return ret
- else:
- ret.extend(self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit - len(ret),
- ret[-1]['id']))
- return ret
-
- def _get_trunks_from_pod_with_limit(self, context, current_pod,
- bottom_top_map, top_bottom_map,
- filters, limit, marker):
-
- ret = []
- t_ctx = t_context.get_context_from_neutron_context(context)
- q_client = self._get_client(
- current_pod['region_name']).get_native_client('trunk', t_ctx)
-
- params = {'limit': 0 if not limit else limit}
- if marker:
- b_marker = top_bottom_map[marker]
- params.update({'marker': b_marker})
- if filters:
- if 'id' in filters:
- map_ids = self._get_map_trunk_ids(filters['id'],
- top_bottom_map)
- filters['id'] = map_ids
- params.update(filters)
- bottom_trunks = q_client.get(q_client.trunks_path,
- params=params)['trunks']
- for bottom_trunk in bottom_trunks:
- top_id = bottom_top_map.get(bottom_trunk['id'])
- # TODO(xiulin): handle unmapped trunk
- if top_id:
- bottom_trunk['id'] = top_id
- ret.append(bottom_trunk)
- if len(ret) == limit:
- return ret
-
- remainder = limit - len(ret)
- next_pod = db_api.get_next_bottom_pod(
- t_ctx, current_pod_id=current_pod['pod_id'])
- if next_pod:
- # get from next bottom pod
- next_ret = self._get_trunks_from_pod_with_limit(
- context, next_pod, bottom_top_map, top_bottom_map,
- filters, remainder, None)
- ret.extend(next_ret)
- return ret
- else:
- # get from top pod
- top_ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, remainder, None)
- ret.extend(top_ret)
- return ret
-
- def _map_trunks_from_bottom_to_top(self, trunks, bottom_top_map):
- trunk_list = []
- for trunk in trunks:
- # TODO(xiulin): handle unmapped trunk
- if trunk['id'] not in bottom_top_map:
- continue
- trunk['id'] = bottom_top_map[trunk['id']]
- trunk_list.append(trunk)
- return trunk_list
-
- def _get_map_trunk_ids(self, top_ids, top_bottom_map):
- b_trunk_ids = []
- for _id in top_ids:
- if _id in top_bottom_map:
- b_trunk_ids.append(top_bottom_map[_id])
- else:
- b_trunk_ids.append(_id)
- return b_trunk_ids
-
- def _transform_trunk_filters(self, filters, top_bottom_map):
- _filters = []
- if filters:
- for key, value in six.iteritems(filters):
- if key == 'id':
- value = self._get_map_trunk_ids(value, top_bottom_map)
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': value})
- return _filters
-
- def get_trunks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- ret = []
- bottom_top_map = {}
- top_bottom_map = {}
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': t_constants.RT_TRUNK}]
- routes = db_api.list_resource_routings(t_ctx, route_filters)
- for route in routes:
- bottom_top_map[route['bottom_id']] = route['top_id']
- top_bottom_map[route['top_id']] = route['bottom_id']
-
- if limit:
- if marker:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, marker, t_constants.RT_TRUNK)
- # if mapping exists, we retrieve trunk information
- # from bottom, otherwise from top
- if mappings:
- pod_id = mappings[0][0]['pod_id']
- current_pod = db_api.get_pod(t_ctx, pod_id)
- ret = self._get_trunks_from_pod_with_limit(
- context, current_pod, bottom_top_map, top_bottom_map,
- filters, limit, marker)
- else:
- ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit, marker)
- else:
- current_pod = db_api.get_next_bottom_pod(t_ctx)
- # if current_pod exists, we retrieve trunk information
- # from bottom, otherwise from top
- if current_pod:
- ret = self._get_trunks_from_pod_with_limit(
- context, current_pod, bottom_top_map, top_bottom_map,
- filters, limit, None)
- else:
- ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit, None)
- else:
- pods = db_api.list_pods(t_ctx)
- _filters = self._transform_trunk_filters(filters, top_bottom_map)
- for pod in pods:
- if not pod['az_name']:
- continue
- client = self._get_client(pod['region_name'])
- pod_trunks = client.list_trunks(t_ctx, filters=_filters)
- ret.extend(pod_trunks)
- ret = self._map_trunks_from_bottom_to_top(ret, bottom_top_map)
- top_trunks = self._get_trunks_from_top(context,
- top_bottom_map, filters)
- ret.extend(top_trunks)
-
- return [db_utils.resource_fields(trunk, fields) for trunk in ret]
-
- def get_trunk_subports(self, context, filters=None):
- ret = None
- if not filters or len(filters) != 2:
- return ret
- device_ids = filters.get('device_id', [])
- device_owners = filters.get('device_owner', [])
- if (len(device_owners) != 1
- or len(device_ids) != 1
- or device_owners[0] != t_constants.DEVICE_OWNER_SUBPORT):
- return ret
- try:
- super(TricircleTrunkDriver, self).get_trunk(context, device_ids[0])
- except trunk_exc.TrunkNotFound:
- return ret
-
- core_plugin = directory.get_plugin()
- ret = super(central_plugin.TricirclePlugin, core_plugin).get_ports(
- context, filters)
- return ret
-
- def update_subports_device_id(self, context,
- subports, device_id, device_owner):
- if not subports['sub_ports']:
- return
- core_plugin = directory.get_plugin()
- body = {'port': {
- 'device_id': device_id,
- 'device_owner': device_owner}}
- for subport in subports['sub_ports']:
- super(central_plugin.TricirclePlugin, core_plugin).update_port(
- context, subport['port_id'], body)
-
- def add_subports(self, context, trunk_id, subports):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- self.update_subports_device_id(context, subports, trunk_id,
- t_constants.DEVICE_OWNER_SUBPORT)
- res = super(TricircleTrunkDriver, self).add_subports(
- context, trunk_id, subports)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(
- t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])
-
- return res
-
- def remove_subports(self, context, trunk_id, subports):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- self.update_subports_device_id(context, subports, '', '')
- res = super(TricircleTrunkDriver, self).remove_subports(
- context, trunk_id, subports)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(
- t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])
-
- return res
diff --git a/tricircle/network/central_trunk_plugin.py b/tricircle/network/central_trunk_plugin.py
deleted file mode 100644
index 8e2a08cd..00000000
--- a/tricircle/network/central_trunk_plugin.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import six
-
-from oslo_log import log
-
-from neutron.services.trunk import exceptions as trunk_exc
-from neutron.services.trunk import plugin
-from neutron_lib.db import utils as db_utils
-from neutron_lib.plugins import directory
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.network import central_plugin
-from tricircle.network import helper
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleTrunkPlugin(plugin.TrunkPlugin):
-
- def __init__(self):
- super(TricircleTrunkPlugin, self).__init__()
- self.xjob_handler = xrpcapi.XJobAPI()
- self.helper = helper.NetworkHelper(self)
- self.clients = {}
-
- def is_rpc_enabled(self):
- return False
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- def update_trunk(self, context, trunk_id, trunk):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- res = super(TricircleTrunkPlugin, self).update_trunk(
- context, trunk_id, trunk)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
- trunk_id, b_pod['pod_id'])
- return res
-
- def delete_trunk(self, context, trunk_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- res = super(TricircleTrunkPlugin, self).get_trunk(context, trunk_id)
- with context.session.begin():
- super(TricircleTrunkPlugin, self).delete_trunk(context, trunk_id)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
- trunk_id, b_pod['pod_id'])
-
- def get_trunk(self, context, trunk_id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_trunk = super(TricircleTrunkPlugin, self).get_trunk(context,
- trunk_id, fields)
- if not fields or 'status' in fields:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod, b_trunk_id = mappings[0]
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_trunk = b_client.get_trunks(t_ctx, b_trunk_id)
- if not b_trunk:
- LOG.error('trunk: %(trunk_id)s not found '
- 'pod name: %(name)s',
- {'trunk_id': b_trunk_id, 'name': b_region_name})
- else:
- t_trunk['status'] = b_trunk['status']
-
- return t_trunk
-
- def _get_trunks_from_top(self, context, top_bottom_map, filters):
- top_trunks = super(TricircleTrunkPlugin, self).get_trunks(
- context, filters)
-
- return [trunk for trunk in top_trunks if
- trunk['id'] not in top_bottom_map]
-
- def _get_min_search_step(self):
- # this method is for unit test mock purpose
- return 100
-
- def _get_trunks_from_top_with_limit(self, context, top_bottom_map,
- filters, limit, marker):
-
- ret = []
- total = 0
- # set step as two times of number to have better chance to obtain all
- # trunks we need
- search_step = limit * 2
- min_search_step = self._get_min_search_step()
- if search_step < min_search_step:
- search_step = min_search_step
- # None means sort direction is desc
- sorts = [('id', None)]
- top_trunks = super(TricircleTrunkPlugin, self).get_trunks(
- context, filters, sorts=sorts, limit=search_step, marker=marker)
-
- for trunk in top_trunks:
- total += 1
- if trunk['id'] not in top_bottom_map:
- ret.append(trunk)
- if len(ret) == limit:
- return ret
- # NOTE(xiulin) we have traversed all the trunks
- if total < search_step:
- return ret
- else:
- ret.extend(self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit - len(ret),
- ret[-1]['id']))
- return ret
-
- def _get_trunks_from_pod_with_limit(self, context, current_pod,
- bottom_top_map, top_bottom_map,
- filters, limit, marker):
-
- ret = []
- t_ctx = t_context.get_context_from_neutron_context(context)
- q_client = self._get_client(
- current_pod['region_name']).get_native_client('trunk', t_ctx)
-
- params = {'limit': 0 if not limit else limit}
- if marker:
- b_marker = top_bottom_map[marker]
- params.update({'marker': b_marker})
- if filters:
- if 'id' in filters:
- map_ids = self._get_map_trunk_ids(filters['id'],
- top_bottom_map)
- filters['id'] = map_ids
- params.update(filters)
- bottom_trunks = q_client.get(q_client.trunks_path,
- params=params)['trunks']
- for bottom_trunk in bottom_trunks:
- top_id = bottom_top_map.get(bottom_trunk['id'])
- # TODO(xiulin): handle unmapped trunk
- if top_id:
- bottom_trunk['id'] = top_id
- ret.append(bottom_trunk)
- if len(ret) == limit:
- return ret
-
- remainder = limit - len(ret)
- next_pod = db_api.get_next_bottom_pod(
- t_ctx, current_pod_id=current_pod['pod_id'])
- if next_pod:
- # get from next bottom pod
- next_ret = self._get_trunks_from_pod_with_limit(
- context, next_pod, bottom_top_map, top_bottom_map,
- filters, remainder, None)
- ret.extend(next_ret)
- return ret
- else:
- # get from top pod
- top_ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, remainder, None)
- ret.extend(top_ret)
- return ret
-
- def _map_trunks_from_bottom_to_top(self, trunks, bottom_top_map):
- trunk_list = []
- for trunk in trunks:
- # TODO(xiulin): handle unmapped trunk
- if trunk['id'] not in bottom_top_map:
- continue
- trunk['id'] = bottom_top_map[trunk['id']]
- trunk_list.append(trunk)
- return trunk_list
-
- def _get_map_trunk_ids(self, top_ids, top_bottom_map):
- b_trunk_ids = []
- for _id in top_ids:
- if _id in top_bottom_map:
- b_trunk_ids.append(top_bottom_map[_id])
- else:
- b_trunk_ids.append(_id)
- return b_trunk_ids
-
- def _transform_trunk_filters(self, filters, top_bottom_map):
- _filters = []
- if filters:
- for key, value in six.iteritems(filters):
- if key == 'id':
- value = self._get_map_trunk_ids(value, top_bottom_map)
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': value})
- return _filters
-
- def get_trunks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- ret = []
- bottom_top_map = {}
- top_bottom_map = {}
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': t_constants.RT_TRUNK}]
- routes = db_api.list_resource_routings(t_ctx, route_filters)
- for route in routes:
- bottom_top_map[route['bottom_id']] = route['top_id']
- top_bottom_map[route['top_id']] = route['bottom_id']
-
- if limit:
- if marker:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, marker, t_constants.RT_TRUNK)
- # if mapping exists, we retrieve trunk information
- # from bottom, otherwise from top
- if mappings:
- pod_id = mappings[0][0]['pod_id']
- current_pod = db_api.get_pod(t_ctx, pod_id)
- ret = self._get_trunks_from_pod_with_limit(
- context, current_pod, bottom_top_map, top_bottom_map,
- filters, limit, marker)
- else:
- ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit, marker)
- else:
- current_pod = db_api.get_next_bottom_pod(t_ctx)
- # if current_pod exists, we retrieve trunk information
- # from bottom, otherwise from top
- if current_pod:
- ret = self._get_trunks_from_pod_with_limit(
- context, current_pod, bottom_top_map, top_bottom_map,
- filters, limit, None)
- else:
- ret = self._get_trunks_from_top_with_limit(
- context, top_bottom_map, filters, limit, None)
- else:
- pods = db_api.list_pods(t_ctx)
- _filters = self._transform_trunk_filters(filters, top_bottom_map)
- for pod in pods:
- if not pod['az_name']:
- continue
- client = self._get_client(pod['region_name'])
- pod_trunks = client.list_trunks(t_ctx, filters=_filters)
- ret.extend(pod_trunks)
- ret = self._map_trunks_from_bottom_to_top(ret, bottom_top_map)
- top_trunks = self._get_trunks_from_top(context,
- top_bottom_map, filters)
- ret.extend(top_trunks)
-
- return [db_utils.resource_fields(trunk, fields) for trunk in ret]
-
- def get_trunk_subports(self, context, filters=None):
- ret = None
- if not filters or len(filters) != 2:
- return ret
- device_ids = filters.get('device_id', [])
- device_owners = filters.get('device_owner', [])
- if (len(device_owners) != 1
- or len(device_ids) != 1
- or device_owners[0] != t_constants.DEVICE_OWNER_SUBPORT):
- return ret
- try:
- super(TricircleTrunkPlugin, self).get_trunk(context, device_ids[0])
- except trunk_exc.TrunkNotFound:
- return ret
-
- core_plugin = directory.get_plugin()
- ret = super(central_plugin.TricirclePlugin, core_plugin).get_ports(
- context, filters)
- return ret
-
- def update_subports_device_id(self, context,
- subports, device_id, device_owner):
- if not subports['sub_ports']:
- return
- core_plugin = directory.get_plugin()
- body = {'port': {
- 'device_id': device_id,
- 'device_owner': device_owner}}
- for subport in subports['sub_ports']:
- super(central_plugin.TricirclePlugin, core_plugin).update_port(
- context, subport['port_id'], body)
-
- def add_subports(self, context, trunk_id, subports):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- self.update_subports_device_id(context, subports, trunk_id,
- t_constants.DEVICE_OWNER_SUBPORT)
- res = super(TricircleTrunkPlugin, self).add_subports(
- context, trunk_id, subports)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(
- t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])
-
- return res
-
- def remove_subports(self, context, trunk_id, subports):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- self.update_subports_device_id(context, subports, '', '')
- res = super(TricircleTrunkPlugin, self).remove_subports(
- context, trunk_id, subports)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, trunk_id, t_constants.RT_TRUNK)
- if mappings:
- b_pod = mappings[0][0]
- self.xjob_handler.sync_trunk(
- t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])
-
- return res
diff --git a/tricircle/network/drivers/__init__.py b/tricircle/network/drivers/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/network/drivers/openvswitch/__init__.py b/tricircle/network/drivers/openvswitch/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/network/drivers/type_flat.py b/tricircle/network/drivers/type_flat.py
deleted file mode 100644
index 5907f418..00000000
--- a/tricircle/network/drivers/type_flat.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron_lib import exceptions
-from neutron_lib.plugins.ml2 import api
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.plugins.ml2.drivers import type_flat
-
-from tricircle.common import constants
-
-LOG = log.getLogger(__name__)
-
-
-class FlatTypeDriver(type_flat.FlatTypeDriver):
- def __init__(self):
- super(type_flat.FlatTypeDriver, self).__init__()
- self._parse_networks(cfg.CONF.tricircle.flat_networks)
-
- def get_type(self):
- return constants.NT_FLAT
-
- def initialize(self):
- LOG.info("FlatTypeDriver initialization complete")
-
- def reserve_provider_segment(self, context, segment, filters=None):
- try:
- res = super(FlatTypeDriver,
- self).reserve_provider_segment(context,
- segment,
- filters=None)
- except exceptions.FlatNetworkInUse:
- # to support multiple regions sharing the same physical network
- # for external network, we ignore this exception and let local
- # Neutron judge whether the physical network is valid
- res = segment
- res[api.MTU] = None
- res[api.NETWORK_TYPE] = self.get_type()
- return res
-
- def get_mtu(self, physical_network=None):
- pass
diff --git a/tricircle/network/drivers/type_local.py b/tricircle/network/drivers/type_local.py
deleted file mode 100644
index fb546e91..00000000
--- a/tricircle/network/drivers/type_local.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron_lib.plugins.ml2 import api
-
-from tricircle.common import constants
-
-
-class LocalTypeDriver(api.TypeDriver):
- def get_type(self):
- return constants.NT_LOCAL
-
- def initialize(self):
- pass
-
- def is_partial_segment(self, segment):
- return False
-
- def validate_provider_segment(self, segment):
- pass
-
- def reserve_provider_segment(self, context, segment, filters=None):
- return segment
-
- def allocate_tenant_segment(self, context, filters=None):
- return {api.NETWORK_TYPE: constants.NT_LOCAL}
-
- def release_segment(self, context, segment):
- pass
-
- def get_mtu(self, physical):
- pass
diff --git a/tricircle/network/drivers/type_vlan.py b/tricircle/network/drivers/type_vlan.py
deleted file mode 100644
index 5aa10429..00000000
--- a/tricircle/network/drivers/type_vlan.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-from neutron_lib.plugins.ml2 import api
-from neutron_lib.plugins import utils as plugin_utils
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.plugins.ml2.drivers import type_vlan
-
-from tricircle.common import constants
-
-LOG = log.getLogger(__name__)
-
-
-class VLANTypeDriver(type_vlan.VlanTypeDriver):
- def __init__(self):
- super(VLANTypeDriver, self).__init__()
-
- def _parse_network_vlan_ranges(self):
- try:
- self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
- cfg.CONF.tricircle.network_vlan_ranges)
- except Exception:
- LOG.exception('Failed to parse network_vlan_ranges. '
- 'Service terminated!')
- sys.exit(1)
- LOG.info('Network VLAN ranges: %s', self.network_vlan_ranges)
-
- def get_type(self):
- return constants.NT_VLAN
-
- def reserve_provider_segment(self, context, segment, filters=None):
- res = super(VLANTypeDriver,
- self).reserve_provider_segment(context, segment, filters)
- res[api.NETWORK_TYPE] = constants.NT_VLAN
- return res
-
- def allocate_tenant_segment(self, context, filters=None):
- res = super(VLANTypeDriver,
- self).allocate_tenant_segment(context, filters)
- res[api.NETWORK_TYPE] = constants.NT_VLAN
- return res
-
- def get_mtu(self, physical):
- pass
diff --git a/tricircle/network/drivers/type_vxlan.py b/tricircle/network/drivers/type_vxlan.py
deleted file mode 100644
index a31eed5c..00000000
--- a/tricircle/network/drivers/type_vxlan.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron_lib.plugins.ml2 import api
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.plugins.ml2.drivers import type_vxlan
-from neutron_lib import constants as q_lib_constants
-from neutron_lib import exceptions as n_exc
-
-from tricircle.common import constants
-import tricircle.common.context as t_context
-import tricircle.db.api as db_api
-
-LOG = log.getLogger(__name__)
-
-
-class VxLANTypeDriver(type_vxlan.VxlanTypeDriver):
- def __init__(self):
- super(VxLANTypeDriver, self).__init__()
-
- def get_type(self):
- return constants.NT_VxLAN
-
- def initialize(self):
- try:
- self._initialize(cfg.CONF.tricircle.vni_ranges)
- except n_exc.NetworkTunnelRangeError:
- LOG.exception("Failed to parse vni_ranges. "
- "Service terminated!")
- raise SystemExit()
-
- def reserve_provider_segment(self, context, segment, filters=None):
- res = super(VxLANTypeDriver,
- self).reserve_provider_segment(context, segment, filters)
- res[api.NETWORK_TYPE] = constants.NT_VxLAN
- return res
-
- def allocate_tenant_segment(self, context, filters=None):
- res = super(VxLANTypeDriver,
- self).allocate_tenant_segment(context, filters)
- res[api.NETWORK_TYPE] = constants.NT_VxLAN
- return res
-
- def get_mtu(self, physical_network=None):
- pass
-
- def get_endpoint_by_host(self, host):
- LOG.debug("get_endpoint_by_host() called for host %s", host)
- host_endpoint = {'ip_address': None}
- context = t_context.get_db_context()
- agents = db_api.get_agent_by_host_type(
- context, host, q_lib_constants.AGENT_TYPE_OVS)
- if agents:
- host_endpoint['ip_address'] = agents['tunnel_ip']
- return host_endpoint
diff --git a/tricircle/network/exceptions.py b/tricircle/network/exceptions.py
deleted file mode 100644
index 39ba60e9..00000000
--- a/tricircle/network/exceptions.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron_lib import exceptions
-
-from tricircle.common.i18n import _
-
-
-class RemoteGroupNotSupported(exceptions.InvalidInput):
- message = _('Remote group not supported by Tricircle plugin')
-
-
-class DefaultGroupUpdateNotSupported(exceptions.InvalidInput):
- message = _('Default group update not supported by Tricircle plugin')
-
-
-class BottomPodOperationFailure(exceptions.NeutronException):
- message = _(
- 'Operation for %(resource)s on bottom pod %(region_name)s fails')
-
-
-class DhcpPortNotFound(exceptions.NotFound):
- message = _('Dhcp port for subnet %(subnet_id)s not found')
-
-
-class GatewayPortNotFound(exceptions.NotFound):
- message = _('Gateway port for subnet %(subnet_id)s and region %(region)s '
- 'not found')
-
-
-class CentralizedSNATPortNotFound(exceptions.NotFound):
- message = _('Centralized snat port for subnet %(subnet_id)s not found')
-
-
-class SecurityGroupNotFound(exceptions.NotFound):
- message = _('Security group for %(sg_id)s not found')
-
-
-class SecurityGroupRuleNotFound(exceptions.NotFound):
- message = _('Security group rule for id %(rule_id)s not found')
-
-
-class NetAttachedToNonLocalRouter(exceptions.Conflict):
- message = _('Network %(network_id)s has already been attached to non '
- 'local router %(router_id)s')
-
-
-class PortNotFound(exceptions.NotFound):
- message = _('Port for id %(port_id)s not found')
-
-
-class PortPairsNotFoundForPortPairGroup(exceptions.NotFound):
- message = _(
- 'Port pairs for port pair group %(portpairgroup_id)s not found')
-
-
-class PortPairNotFound(exceptions.NotFound):
- message = _('Port pair for id %(portpair_id)s not found')
-
-
-class PortChainNotFound(exceptions.NotFound):
- message = _('Port chain for id %(portchain_id)s not found')
-
-
-class PortChainNotFoundForFlowClassifier(exceptions.NotFound):
- message = _(
- 'Port chain for flow classifier %(flowclassifier_id)s not found')
-
-
-class NetNotFoundForPortChain(exceptions.NotFound):
- message = _('Net for port chain %(portchain_id)s not found')
diff --git a/tricircle/network/helper.py b/tricircle/network/helper.py
deleted file mode 100644
index e5012104..00000000
--- a/tricircle/network/helper.py
+++ /dev/null
@@ -1,1144 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import netaddr
-import re
-import six
-from six.moves import xrange
-
-from neutron_lib.api.definitions import availability_zone as az_def
-from neutron_lib.api.definitions import external_net
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.api.definitions import provider_net
-from neutron_lib.api import validators
-from neutron_lib import constants
-import neutronclient.common.exceptions as q_cli_exceptions
-from oslo_serialization import jsonutils
-
-from tricircle.common import client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-import tricircle.common.lock_handle as t_lock
-from tricircle.common import utils
-import tricircle.db.api as db_api
-
-
-# manually define these constants to avoid depending on neutron repos
-# neutron.extensions.availability_zone.AZ_HINTS
-AZ_HINTS = 'availability_zone_hints'
-EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL
-TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN
-TYPE_VXLAN = 'vxlan' # neutron.plugins.common.constants.TYPE_VXLAN
-
-OVS_AGENT_DATA_TEMPLATE = {
- 'agent_type': None,
- 'binary': 'neutron-openvswitch-agent',
- 'host': None,
- 'topic': constants.L2_AGENT_TOPIC,
- 'configurations': {
- 'ovs_hybrid_plug': False,
- 'in_distributed_mode': False,
- 'datapath_type': 'system',
- 'arp_responder_enabled': False,
- 'tunneling_ip': None,
- 'vhostuser_socket_dir': '/var/run/openvswitch',
- 'devices': 0,
- 'ovs_capabilities': {
- 'datapath_types': ['netdev', 'system'],
- 'iface_types': ['geneve', 'gre', 'internal', 'ipsec_gre', 'lisp',
- 'patch', 'stt', 'system', 'tap', 'vxlan']},
- 'log_agent_heartbeats': False,
- 'l2_population': True,
- 'tunnel_types': ['vxlan'],
- 'extensions': [],
- 'enable_distributed_routing': False,
- 'bridge_mappings': {}}}
-
-VIF_AGENT_TYPE_MAP = {
- portbindings.VIF_TYPE_OVS: constants.AGENT_TYPE_OVS}
-
-AGENT_DATA_TEMPLATE_MAP = {
- constants.AGENT_TYPE_OVS: OVS_AGENT_DATA_TEMPLATE}
-
-TUNNEL_IP_HANDLE_MAP = {
- constants.AGENT_TYPE_OVS: lambda agent: agent[
- 'configurations']['tunneling_ip']}
-
-MAC_PATTERN = re.compile('([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}')
-
-
-class NetworkHelper(object):
- def __init__(self, call_obj=None):
- self.clients = {}
- self.call_obj = call_obj
-
- @staticmethod
- def _transfer_network_type(network_type):
- network_type_map = {t_constants.NT_VLAN: TYPE_VLAN,
- t_constants.NT_VxLAN: TYPE_VXLAN}
- return network_type_map.get(network_type, network_type)
-
- @staticmethod
- def _get_provider_info(t_net):
- ret = {
- provider_net.NETWORK_TYPE: NetworkHelper._transfer_network_type(
- t_net[provider_net.NETWORK_TYPE]),
- provider_net.SEGMENTATION_ID: t_net[provider_net.SEGMENTATION_ID]
- }
- if t_net[provider_net.NETWORK_TYPE] == t_constants.NT_VLAN:
- ret[provider_net.PHYSICAL_NETWORK] = t_net[
- provider_net.PHYSICAL_NETWORK]
- return ret
-
- def _get_client(self, region_name=None):
- if not region_name:
- if t_constants.TOP not in self.clients:
- self.clients[t_constants.TOP] = client.Client()
- return self.clients[t_constants.TOP]
- if region_name not in self.clients:
- self.clients[region_name] = client.Client(region_name)
- return self.clients[region_name]
-
- @staticmethod
- def _merge_ip_range(ip_range, ip):
- ip_set = netaddr.IPSet(ip_range)
- ip_set.add(ip)
- if ip_set.iscontiguous():
- return ip_set.iprange(), True
- else:
- return ip_range, False
-
- # operate top resource
- def _prepare_top_element_by_call(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
- return getattr(super(self.call_obj.__class__, self.call_obj),
- 'get_%ss' % _type_)(q_ctx_,
- filters={'name': [ele_['id']]})
-
- def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
- if _type_ == t_constants.RT_NETWORK:
- # for network, we call TricirclePlugin's own create_network to
- # handle network segment
- return self.call_obj.create_network(q_ctx_, body_)
- else:
- return getattr(super(self.call_obj.__class__, self.call_obj),
- 'create_%s' % _type_)(q_ctx_, body_)
-
- return t_lock.get_or_create_element(
- t_ctx, q_ctx,
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- def _prepare_top_element_by_client(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
- client = self._get_client()
- return client.list_resources(_type_, t_ctx_,
- [{'key': 'name', 'comparator': 'eq',
- 'value': ele_['id']}])
-
- def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
- client = self._get_client()
- return client.create_resources(_type_, t_ctx_, body_)
-
- assert _type == 'port'
- # currently only top port is possible to be created via client, other
- # top resources should be created directly by plugin
- return t_lock.get_or_create_element(
- t_ctx, q_ctx,
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- def prepare_top_element(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- """Get or create shared top networking resource
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of top pod
- :param ele: dict with "id" as key and distinctive identifier as value
- :param _type: type of the resource
- :param body: request body to create resource
- :return: boolean value indicating whether the resource is newly
- created or already exists and id of the resource
- """
- if self.call_obj:
- return self._prepare_top_element_by_call(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
- else:
- return self._prepare_top_element_by_client(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
- t_net_id, b_router_id, t_subnet=None):
- """Get or create top bridge interface
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of top pod
- :param t_net_id: top bridge network id
- :param b_router_id: bottom router id
- :param t_subnet: optional top bridge subnet dict
- :return: bridge interface id
- """
- port_name = t_constants.bridge_port_name % (project_id,
- b_router_id)
- port_ele = {'id': port_name}
- port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': port_name,
- 'network_id': t_net_id,
- 'device_id': '',
- 'device_owner': ''
- }
- }
- if self.call_obj:
- port_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
- if t_subnet:
- port_body['port'].update(
- {'fixed_ips': [{'subnet_id': t_subnet['id'],
- 'ip_address': t_subnet['gateway_ip']}]})
- _, port_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, port_ele, 'port', port_body)
- return port_id
-
- # operate bottom resource
- def prepare_bottom_element(self, t_ctx,
- project_id, pod, ele, _type, body):
- """Get or create bottom networking resource based on top resource
-
- :param t_ctx: tricircle context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param ele: dict of top resource
- :param _type: type of the resource
- :param body: request body to create resource
- :return: boolean value indicating whether the resource is newly
- created or already exists and id of the resource
- """
- def list_resources(t_ctx_, q_ctx, pod_, ele_, _type_):
- client = self._get_client(pod_['region_name'])
- if _type_ == t_constants.RT_NS_ROUTER:
- _type_ = t_constants.RT_ROUTER
- value = t_constants.ns_router_name % ele_['id']
- elif _type_ == t_constants.RT_SD_PORT:
- _type_ = t_constants.RT_PORT
- value = t_constants.shadow_port_name % ele_['id']
- elif _type_ == t_constants.RT_NETWORK:
- value = utils.get_bottom_network_name(ele_)
- else:
- value = ele_['id']
- return client.list_resources(_type_, t_ctx_,
- [{'key': 'name', 'comparator': 'eq',
- 'value': value}])
-
- def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
- if _type_ == t_constants.RT_NS_ROUTER:
- _type_ = t_constants.RT_ROUTER
- elif _type_ == t_constants.RT_SD_PORT:
- _type_ = t_constants.RT_PORT
- client = self._get_client(pod_['region_name'])
- return client.create_resources(_type_, t_ctx_, body_)
-
- return t_lock.get_or_create_element(
- t_ctx, None, # we don't need neutron context, so pass None
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- @staticmethod
- def get_create_network_body(project_id, network):
- """Get request body to create bottom network
-
- :param project_id: project id
- :param network: top network dict
- :return: request body to create bottom network
- """
- body = {
- 'network': {
- 'tenant_id': project_id,
- 'name': utils.get_bottom_network_name(network),
- 'admin_state_up': True
- }
- }
- network_type = network.get(provider_net.NETWORK_TYPE)
- if network_type == t_constants.NT_VLAN:
- body['network'][provider_net.NETWORK_TYPE] = 'vlan'
- body['network'][provider_net.PHYSICAL_NETWORK] = network[
- provider_net.PHYSICAL_NETWORK]
- body['network'][provider_net.SEGMENTATION_ID] = network[
- provider_net.SEGMENTATION_ID]
- return body
-
- @staticmethod
- def _find_ip_range(pool, gateway_ip):
- ret_pools = []
- ip_range = netaddr.IPRange(pool['start'], pool['end'])
- ip_num = len(ip_range)
- for i, ip in enumerate(ip_range):
- if gateway_ip == ip:
- if i > 0:
- ret_pools.append({'start': ip_range[0].format(),
- 'end': ip_range[i - 1].format()})
- if i < ip_num - 1:
- ret_pools.append(
- {'start': ip_range[i + 1].format(),
- 'end': ip_range[ip_num - 1].format()})
- return ret_pools
-
- @staticmethod
- def _split_pools_by_bottom_gateway_ip(pools, gateway_ip):
- new_pools = []
- g_ip = netaddr.IPAddress(gateway_ip)
- ip_found = False
- for pool in pools:
- if ip_found:
- new_pools.append({'start': pool['start'],
- 'end': pool['end']})
- continue
- ret_pools = NetworkHelper._find_ip_range(pool, g_ip)
- if ret_pools:
- ip_found = True
- new_pools.extend(ret_pools)
- if not ip_found:
- new_pools.extend(pools)
- return new_pools
-
- @staticmethod
- def _merge_pools_by_top_gateway_ip(pools, gateway_ip):
- new_ranges = []
- merged_set = netaddr.IPSet()
- for pool in pools:
- ip_range = netaddr.IPRange(pool['start'], pool['end'])
- ip_range, ip_merged = NetworkHelper._merge_ip_range(
- ip_range, gateway_ip)
- if not ip_merged:
- new_ranges.append(ip_range)
- else:
- # if range1 + gateway_ip is contiguous, range2 + gateway_ip is
- # contiguous, then range1 + range2 + gateway_ip is contiguous,
- # so we add them in the same ip set
- merged_set.add(ip_range)
- new_pools = []
- for new_range in new_ranges:
- new_pools.append({'start': new_range[0].format(),
- 'end': new_range[len(new_range) - 1].format()})
- if merged_set:
- merged_range = merged_set.iprange()
- new_pools.append(
- {'start': merged_range[0].format(),
- 'end': merged_range[len(merged_range) - 1].format()})
- else:
- new_pools.append({'start': gateway_ip, 'end': gateway_ip})
- return new_pools
-
- @staticmethod
- def get_bottom_subnet_pools(t_subnet, b_gateway_ip):
- """Get bottom subnet allocation pools
-
- :param t_subnet: top subnet
- :param b_gateway_ip: bottom subnet gateway ip
- :return: bottom subnet allocation pools
- """
- pools = t_subnet['allocation_pools']
- t_gateway_ip = t_subnet['gateway_ip']
- if not t_gateway_ip:
- # gateway is None, so we don't need to split allocation pools
- return pools
- new_pools = NetworkHelper._split_pools_by_bottom_gateway_ip(
- pools, b_gateway_ip)
- if t_gateway_ip == b_gateway_ip:
- return new_pools
- return NetworkHelper._merge_pools_by_top_gateway_ip(new_pools,
- t_gateway_ip)
-
- @staticmethod
- def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip):
- """Get request body to create bottom subnet
-
- :param project_id: project id
- :param t_subnet: top subnet dict
- :param b_net_id: bottom network id
- :param gateway_ip: bottom gateway ip
- :return: request body to create bottom subnet
- """
- new_pools = NetworkHelper.get_bottom_subnet_pools(t_subnet, gateway_ip)
- body = {
- 'subnet': {
- 'network_id': b_net_id,
- 'name': t_subnet['id'],
- 'ip_version': t_subnet['ip_version'],
- 'cidr': t_subnet['cidr'],
- 'gateway_ip': gateway_ip,
- 'allocation_pools': new_pools,
- 'enable_dhcp': False,
- 'tenant_id': project_id
- }
- }
- return body
-
- @staticmethod
- def get_create_port_body(project_id, t_port, subnet_map, b_net_id,
- b_security_group_ids=None):
- """Get request body to create bottom port
-
- :param project_id: project id
- :param t_port: top port dict
- :param subnet_map: dict with top subnet id as key and bottom subnet
- id as value
- :param b_net_id: bottom network id
- :param b_security_group_ids: list of bottom security group id
- :return: request body to create bottom port
- """
- b_fixed_ips = []
- for ip in t_port['fixed_ips']:
- b_ip = {'subnet_id': subnet_map[ip['subnet_id']],
- 'ip_address': ip['ip_address']}
- b_fixed_ips.append(b_ip)
- body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_port['id'],
- 'network_id': b_net_id,
- 'mac_address': t_port['mac_address'],
- 'fixed_ips': b_fixed_ips
- }
- }
- if b_security_group_ids:
- body['port']['security_groups'] = b_security_group_ids
- return body
-
- def get_create_interface_body(self, project_id, t_net_id, b_region_name,
- t_subnet_id):
- """Get request body to create top interface
-
- :param project_id: project id
- :param t_net_id: top network id
- :param b_region_name: bottom pod name
- :param t_subnet_id: top subnet id
- :return:
- """
- t_interface_name = t_constants.interface_port_name % (b_region_name,
- t_subnet_id)
- t_interface_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_interface_name,
- 'network_id': t_net_id,
- 'device_id': '',
- 'device_owner': 'network:router_interface',
- }
- }
- if self.call_obj:
- t_interface_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
- return t_interface_body
-
- def prepare_bottom_network_subnets(self, t_ctx, q_ctx, project_id, pod,
- t_net, t_subnets):
- """Get or create bottom network, subnet and dhcp port
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param t_net: dict of top network
- :param t_subnets: list of top subnet dict
- :return: bottom network id and a dict with top subnet id as key,
- bottom subnet id as value
- """
- # network
- net_body = self.get_create_network_body(project_id, t_net)
- if net_body['network'].get(provider_net.NETWORK_TYPE):
- # if network type specified, we need to switch to admin account
- admin_context = t_context.get_admin_context()
-
- _, b_net_id = self.prepare_bottom_element(
- admin_context, project_id, pod, t_net, t_constants.RT_NETWORK,
- net_body)
- else:
- _, b_net_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_net, t_constants.RT_NETWORK,
- net_body)
-
- # subnet
- subnet_map = {}
- subnet_dhcp_map = {}
-
- for subnet in t_subnets:
- # gateway
- t_interface_name = t_constants.interface_port_name % (
- pod['region_name'], subnet['id'])
-
- t_interface_body = self.get_create_interface_body(
- project_id, t_net['id'], pod['region_name'], subnet['id'])
-
- _, t_interface_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, {'id': t_interface_name},
- t_constants.RT_PORT, t_interface_body)
- t_interface = self._get_top_element(
- t_ctx, q_ctx, t_constants.RT_PORT, t_interface_id)
- gateway_ip = t_interface['fixed_ips'][0]['ip_address']
-
- subnet_body = self.get_create_subnet_body(
- project_id, subnet, b_net_id, gateway_ip)
- _, b_subnet_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, subnet, t_constants.RT_SUBNET,
- subnet_body)
- subnet_map[subnet['id']] = b_subnet_id
- subnet_dhcp_map[subnet['id']] = subnet['enable_dhcp']
-
- # dhcp port
- for t_subnet_id, b_subnet_id in six.iteritems(subnet_map):
- if not subnet_dhcp_map[t_subnet_id]:
- continue
- self.prepare_dhcp_port(t_ctx, project_id, pod, t_net['id'],
- t_subnet_id, b_net_id, b_subnet_id)
- b_client = self._get_client(pod['region_name'])
- b_client.update_subnets(t_ctx, b_subnet_id,
- {'subnet': {'enable_dhcp': True}})
-
- return b_net_id, subnet_map
-
- def get_bottom_bridge_elements(self, t_ctx, project_id,
- pod, t_net, is_external, t_subnet, t_port):
- """Get or create bottom bridge port
-
- :param t_ctx: tricircle context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param t_net: dict of top bridge network
- :param is_external: whether the bottom network should be created as
- an external network, this is True for south-north case
- :param t_subnet: dict of top bridge subnet
- :param t_port: dict of top bridge port
- :return: tuple (boolean value indicating whether the resource is newly
- created or already exists, bottom port id, bottom subnet id,
- bottom network id)
- """
- net_body = {'network': {
- 'tenant_id': project_id,
- 'name': t_net['id'],
- 'admin_state_up': True}}
- net_body['network'].update(self._get_provider_info(t_net))
- if is_external:
- net_body['network'][EXTERNAL] = True
- _, b_net_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_net, 'network', net_body)
-
- subnet_body = {'subnet': {'network_id': b_net_id,
- 'name': t_subnet['id'],
- 'ip_version': 4,
- 'cidr': t_subnet['cidr'],
- 'enable_dhcp': False,
- 'tenant_id': project_id}}
- _, b_subnet_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
-
- if t_port:
- port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_port['id'],
- 'network_id': b_net_id,
- 'fixed_ips': [
- {'subnet_id': b_subnet_id,
- 'ip_address': t_port['fixed_ips'][0]['ip_address']}]
- }
- }
- is_new, b_port_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_port, 'port', port_body)
-
- return is_new, b_port_id, b_subnet_id, b_net_id
- else:
- return None, None, b_subnet_id, b_net_id
-
- @staticmethod
- def _get_create_dhcp_port_body(project_id, port, b_subnet_id,
- b_net_id):
- body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': port['id'],
- 'network_id': b_net_id,
- 'fixed_ips': [
- {'subnet_id': b_subnet_id,
- 'ip_address': port['fixed_ips'][0]['ip_address']}
- ],
- 'mac_address': port['mac_address'],
- portbindings.PROFILE: {},
- 'device_id': 'reserved_dhcp_port',
- 'device_owner': 'network:dhcp',
- }
- }
- return body
-
- def prepare_top_snat_port(self, t_ctx, q_ctx, project_id, t_net_id,
- t_subnet_id):
- """Create top centralized snat port
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param t_net_id: top network id
- :param t_subnet_id: top subnet id
- :return: top centralized snat port
- """
- t_snat_name = t_constants.snat_port_name % t_subnet_id
- t_snat_port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'name': t_snat_name,
- 'binding:profile': {},
- 'device_id': '',
- 'device_owner': constants.DEVICE_OWNER_ROUTER_SNAT,
- }
- }
- if self.call_obj:
- t_snat_port_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
-
- # NOTE(zhiyuan) for one subnet in different pods, we just create one
- # centralized snat port. though snat port in different pods will have
- # the same IP, VM packets will only got to the local router namespace
- _, t_snat_port_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
- {'id': t_snat_name}, t_constants.RT_PORT, t_snat_port_body)
- return t_snat_port_id
-
- def prepare_top_dhcp_port(self, t_ctx, q_ctx, project_id, t_net_id,
- t_subnet_id):
- """Create top dhcp port
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param t_net_id: top network id
- :param t_subnet_id: top subnet id
- :return: top dhcp port id
- """
- t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
- t_dhcp_port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'name': t_dhcp_name,
- portbindings.PROFILE: {},
- 'device_id': 'reserved_dhcp_port',
- 'device_owner': 'network:dhcp',
- }
- }
- if self.call_obj:
- t_dhcp_port_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
-
- # NOTE(zhiyuan) for one subnet in different pods, we just create
- # one dhcp port. though dhcp port in different pods will have
- # the same IP, each dnsmasq daemon only takes care of VM IPs in
- # its own pod, VM will not receive incorrect dhcp response
- _, t_dhcp_port_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
- {'id': t_dhcp_name}, t_constants.RT_PORT, t_dhcp_port_body)
- return t_dhcp_port_id
-
- def prepare_dhcp_port(self, ctx, project_id, b_pod, t_net_id, t_subnet_id,
- b_net_id, b_subnet_id):
- """Create top dhcp port and map it to bottom dhcp port
-
- :param ctx: tricircle context
- :param project_id: project id
- :param b_pod: dict of bottom pod
- :param t_net_id: top network id
- :param t_subnet_id: top subnet id
- :param b_net_id: bottom network id
- :param b_subnet_id: bottom subnet id
- :return: None
- """
- t_dhcp_port_id = self.prepare_top_dhcp_port(ctx, None, project_id,
- t_net_id, t_subnet_id)
- t_client = self._get_client()
- t_dhcp_port = t_client.get_ports(ctx, t_dhcp_port_id)
- dhcp_port_body = self._get_create_dhcp_port_body(
- project_id, t_dhcp_port, b_subnet_id, b_net_id)
- self.prepare_bottom_element(ctx, project_id, b_pod, t_dhcp_port,
- t_constants.RT_PORT, dhcp_port_body)
-
- def _get_top_element(self, t_ctx, q_ctx, _type, _id):
- if self.call_obj:
- return getattr(self.call_obj, 'get_%s' % _type)(q_ctx, _id)
- else:
- return getattr(self._get_client(), 'get_%ss' % _type)(t_ctx, _id)
-
- @staticmethod
- def get_create_sg_rule_body(rule, sg_id, ip=None):
- ip = ip or rule['remote_ip_prefix']
- # if ip is passed, this is an extended rule for remote group
- return {'security_group_rule': {
- 'tenant_id': rule['tenant_id'],
- 'remote_group_id': None,
- 'direction': rule['direction'],
- 'remote_ip_prefix': ip,
- 'protocol': rule.get('protocol'),
- 'ethertype': rule['ethertype'],
- 'port_range_max': rule.get('port_range_max'),
- 'port_range_min': rule.get('port_range_min'),
- 'security_group_id': sg_id}}
-
- @staticmethod
- def convert_az2region(t_ctx, az_hints):
- region_names = set()
- for az_hint in az_hints:
- pods = db_api.find_pods_by_az_or_region(t_ctx, az_hint)
- if not pods:
- continue
- for pod in pods:
- region_names.add(pod['region_name'])
- return list(region_names)
-
- @staticmethod
- def get_router_az_hints(router):
- # when called by api, availability_zone_hints included in
- # extra_attributes, but when called by xjob, it included in router
- # body directly.
- extra_attributes = router.get('extra_attributes')
- az_hints = router.get(AZ_HINTS)
- if extra_attributes:
- az_hints = extra_attributes.get(AZ_HINTS)
- if not az_hints:
- return None
- if not isinstance(az_hints, list):
- az_hints = jsonutils.loads(az_hints)
- return az_hints
-
- @staticmethod
- def is_local_router(t_ctx, router):
- router_az_hints = NetworkHelper.get_router_az_hints(router)
- if not router_az_hints:
- return False
- if len(router_az_hints) > 1:
- return False
- router_az_hint = router_az_hints[0]
- return bool(db_api.get_pod_by_name(t_ctx, router_az_hint))
-
- @staticmethod
- def is_local_network(t_ctx, net):
- if net[provider_net.NETWORK_TYPE] == t_constants.NT_LOCAL:
- return True
- net_az_hints = net.get(AZ_HINTS)
- if not net_az_hints:
- return False
- if len(net_az_hints) > 1:
- return False
- net_az_hint = net_az_hints[0]
- return bool(db_api.get_pod_by_name(t_ctx, net_az_hint))
-
- @staticmethod
- def get_agent_type_by_vif(vif_type):
- return VIF_AGENT_TYPE_MAP.get(vif_type)
-
- @staticmethod
- def is_need_top_sync_port(port, bridge_cidr):
- """Judge if the port needs to be synced with top port
-
- While synced with top port, shadow agent/port process is triggered
-
- :param port: port dict
- :param bridge_cidr: bridge subnet CIDR
- :return: True/False
- """
- device_owner = port.get('device_owner', '')
- if device_owner.startswith('compute:'):
- # sync with top port for instance port
- return True
- if device_owner not in (constants.DEVICE_OWNER_ROUTER_GW,
- constants.DEVICE_OWNER_ROUTER_INTF):
- # no need to sync with top port if the port is NOT instance port
- # or router interface or router gateway. in DVR case, there are
- # another two router port types, router_interface_distributed and
- # router_centralized_snat, these two don't need to be synced wih
- # top port neither
- return False
- ip = port['fixed_ips'][0]['ip_address']
- # only sync with top port for bridge router port
- return netaddr.IPAddress(ip) in netaddr.IPNetwork(bridge_cidr)
-
- @staticmethod
- def construct_agent_data(agent_type, host, tunnel_ip):
- if agent_type not in AGENT_DATA_TEMPLATE_MAP:
- return {}
- data = copy.copy(AGENT_DATA_TEMPLATE_MAP[agent_type])
- data['agent_type'] = agent_type
- data['host'] = host
- data['configurations']['tunneling_ip'] = tunnel_ip
- return data
-
- @staticmethod
- def fill_agent_data(agent_type, host, agent, profile, tunnel_ip=None):
- _tunnel_ip = None
- if tunnel_ip:
- # explicitly specified tunnel IP has the highest priority
- _tunnel_ip = tunnel_ip
- elif agent_type in TUNNEL_IP_HANDLE_MAP:
- tunnel_handle = TUNNEL_IP_HANDLE_MAP[agent_type]
- _tunnel_ip = tunnel_handle(agent)
- if not _tunnel_ip:
- return
- profile[t_constants.PROFILE_HOST] = host
- profile[t_constants.PROFILE_AGENT_TYPE] = agent_type
- profile[t_constants.PROFILE_TUNNEL_IP] = _tunnel_ip
-
- @staticmethod
- def create_shadow_agent_if_needed(t_ctx, profile, pod):
- if t_constants.PROFILE_HOST not in profile:
- return
- agent_host = profile[t_constants.PROFILE_HOST]
- agent_type = profile[t_constants.PROFILE_AGENT_TYPE]
- agent_tunnel = profile[t_constants.PROFILE_TUNNEL_IP]
- db_api.ensure_agent_exists(t_ctx, pod['pod_id'], agent_host,
- agent_type, agent_tunnel)
-
- @staticmethod
- def fill_binding_info(port_body):
- agent_type = port_body[portbindings.PROFILE]
- # TODO(zhiyuan) support other agent types
- if agent_type == constants.AGENT_TYPE_OVS:
- port_body[portbindings.VIF_DETAILS] = {'port_filter': True,
- 'ovs_hybrid_plug': True}
- port_body[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
- port_body[portbindings.VNIC_TYPE] = portbindings.VNIC_NORMAL
-
- @staticmethod
- def prepare_ports_with_retry(ctx, client, req_create_bodys):
- create_body_map = dict(
- [(create_body['mac_address'],
- create_body) for create_body in req_create_bodys])
- max_tries = 10
- conflict_port_ids = []
- for i in xrange(max_tries):
- create_bodys = list(create_body_map.values())
- if not create_bodys:
- ret_ports = []
- break
- try:
- ret_ports = client.create_ports(ctx, {'ports': create_bodys})
- break
- except q_cli_exceptions.MacAddressInUseClient as e:
- if i == max_tries - 1:
- # we fail in the last try, just raise exception
- raise
- match = MAC_PATTERN.search(e.message)
- if match:
- conflict_mac = match.group()
- if conflict_mac not in create_body_map:
- # rare case, we conflicted with an unrecognized mac
- raise
- conflict_port = create_body_map.pop(conflict_mac)
- if (conflict_port['device_owner'] ==
- t_constants.DEVICE_OWNER_SHADOW):
- conflict_port_ids.append(
- conflict_port['name'].split('_')[-1])
- elif (conflict_port['device_owner'] ==
- t_constants.DEVICE_OWNER_SUBPORT):
- conflict_port_ids.append(conflict_port['device_id'])
- else:
- # the exception no longer contains mac information
- raise
- ret_port_ids = [ret_port['id'] for ret_port in ret_ports]
- ret_port_ids.extend(conflict_port_ids)
- return ret_port_ids
-
- def prepare_shadow_ports(self, ctx, project_id, target_pod, net_id,
- port_bodys, agents, max_bulk_size):
- if not port_bodys:
- return []
- full_create_bodys = []
- for port_body, agent in zip(port_bodys, agents):
- host = port_body[portbindings.HOST_ID]
- create_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_constants.shadow_port_name % port_body['id'],
- 'network_id': net_id,
- 'fixed_ips': [{
- 'ip_address': port_body[
- 'fixed_ips'][0]['ip_address']}],
- 'mac_address': port_body['mac_address'],
- 'device_owner': t_constants.DEVICE_OWNER_SHADOW,
- 'device_id': port_body['device_id'],
- portbindings.HOST_ID: host
- }
- }
- if agent:
- create_body['port'].update(
- {portbindings.PROFILE: {
- t_constants.PROFILE_AGENT_TYPE: agent['type'],
- t_constants.PROFILE_TUNNEL_IP: agent['tunnel_ip']}})
- full_create_bodys.append(create_body['port'])
-
- cursor = 0
- ret_port_ids = []
- client = self._get_client(target_pod['region_name'])
- while cursor < len(full_create_bodys):
- ret_port_ids.extend(self.prepare_ports_with_retry(
- ctx, client,
- full_create_bodys[cursor: cursor + max_bulk_size]))
- cursor += max_bulk_size
- return ret_port_ids
-
- def prepare_shadow_port(self, ctx, project_id, target_pod, net_id,
- port_body, agent=None):
- host = port_body['binding:host_id']
- create_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_constants.shadow_port_name % port_body['id'],
- 'network_id': net_id,
- 'fixed_ips': [{
- 'ip_address': port_body['fixed_ips'][0]['ip_address']}],
- 'device_owner': t_constants.DEVICE_OWNER_SHADOW,
- 'binding:host_id': host
- }
- }
- if agent:
- create_body['port'].update(
- {'binding:profile': {
- t_constants.PROFILE_AGENT_TYPE: agent['type'],
- t_constants.PROFILE_TUNNEL_IP: agent['tunnel_ip']}})
- _, sw_port_id = self.prepare_bottom_element(
- ctx, project_id, target_pod, {'id': port_body['id']},
- t_constants.RT_SD_PORT, create_body)
- return sw_port_id
-
- def prepare_bottom_router(self, n_context, net, b_router_name):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- # use the first pod
- az_name = net[az_def.AZ_HINTS][0]
- pod = db_api.find_pod_by_az_or_region(t_ctx, az_name)
- body = {
- 'router': {
- 'name': b_router_name,
- 'tenant_id': net['tenant_id'],
- 'admin_state_up': True,
- 'distributed': False
- }
- }
- return self.prepare_bottom_element(
- t_ctx, net['tenant_id'], pod, {'id': b_router_name},
- t_constants.RT_ROUTER, body)
-
- def remove_bottom_router_by_name(self, n_context, region_name,
- router_name):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- b_client = self._get_client(region_name)
- bottom_router = b_client.list_routers(
- t_ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': router_name}])
- if bottom_router:
- b_client.delete_routers(t_ctx, bottom_router[0]['id'])
-
- def _fill_provider_info(self, from_net, to_net):
- provider_attrs = provider_net.ATTRIBUTES
- for provider_attr in provider_attrs:
- if validators.is_attr_set(from_net.get(provider_attr)):
- to_net[provider_attr] = from_net[provider_attr]
- if validators.is_attr_set(from_net.get(az_def.AZ_HINTS)):
- to_net[az_def.AZ_HINTS] = from_net[az_def.AZ_HINTS]
-
- def prepare_bottom_external_network(self, n_context, net, top_id):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- # use the first pod
- az_name = net[az_def.AZ_HINTS][0]
- pod = db_api.find_pod_by_az_or_region(t_ctx, az_name)
- body = {
- 'network': {
- 'name': net['name'],
- 'tenant_id': net['tenant_id'],
- 'admin_state_up': True,
- external_net.EXTERNAL: True,
- }
- }
- self._fill_provider_info(net, body['network'])
- return self.prepare_bottom_element(
- t_ctx, net['tenant_id'], pod, {'id': top_id},
- t_constants.RT_NETWORK, body)
-
- def remove_bottom_external_network_by_name(
- self, n_context, region_name, name):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- b_client = self._get_client(region_name)
- b_net = b_client.list_networks(
- t_ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': name}])
- if b_net:
- b_client.delete_networks(t_ctx, b_net[0]['id'])
-
- def prepare_bottom_external_subnet_by_bottom_name(
- self, context, subnet, region_name, b_net_name, top_subnet_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- b_client = self._get_client(region_name)
- bottom_network = b_client.list_networks(
- t_ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': b_net_name}]
- )
- if not bottom_network:
- raise t_exceptions.InvalidInput(
- reason='bottom network not found for %(b_net_name)s'
- % {'b_net_name': b_net_name})
- body = {
- 'subnet': {
- 'name': top_subnet_id,
- 'network_id': bottom_network[0]['id'],
- 'tenant_id': subnet['tenant_id']
- }
- }
- attrs = ('ip_version', 'cidr', 'gateway_ip', 'allocation_pools',
- 'enable_dhcp')
- for attr in attrs:
- if validators.is_attr_set(subnet.get(attr)):
- body['subnet'][attr] = subnet[attr]
- self.prepare_bottom_element(
- t_ctx, subnet['tenant_id'], pod, {'id': top_subnet_id},
- t_constants.RT_SUBNET, body)
-
- def remove_bottom_external_subnet_by_name(
- self, context, region_name, b_subnet_name):
- t_ctx = t_context.get_context_from_neutron_context(context)
- b_client = self._get_client(region_name)
- bottom_subnet = b_client.list_subnets(
- t_ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': b_subnet_name}]
- )
- if bottom_subnet:
- b_client.delete_subnets(t_ctx, bottom_subnet[0]['id'])
-
- def prepare_bottom_router_gateway(
- self, n_context, region_name, segment_name):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- b_client = self._get_client(pod['region_name'])
- # when using new l3 network model, a local router will
- # be created automatically for an external net, and the
- # router's name is same to the net's id
- b_router = b_client.list_routers(
- t_ctx, filters=[{'key': 'name', 'comparator': 'eq',
- 'value': segment_name}])
- if not b_router:
- raise t_exceptions.NotFound()
- b_nets = b_client.list_networks(
- t_ctx, filters=[{'key': 'name',
- 'comparator': 'eq',
- 'value': segment_name}]
- )
- if not b_nets:
- raise t_exceptions.NotFound()
- b_info = {'network_id': b_nets[0]['id']}
- return b_client.action_routers(
- t_ctx, 'add_gateway', b_router[0]['id'], b_info)
-
- def remove_bottom_router_gateway(
- self, n_context, region_name, b_net_name):
- t_ctx = t_context.get_context_from_neutron_context(n_context)
- pod = db_api.get_pod_by_name(t_ctx, region_name)
- b_client = self._get_client(pod['region_name'])
- # when using new l3 network model, a local router will
- # be created automatically for an external net, and the
- # router's name is same to the net's id
- b_router = b_client.list_routers(
- t_ctx, filters=[{'key': 'name', 'comparator': 'eq',
- 'value': b_net_name}])
- if not b_router:
- raise t_exceptions.NotFound()
-
- return b_client.action_routers(
- t_ctx, 'remove_gateway', b_router[0]['id'], b_router[0]['id'])
-
- @staticmethod
- def get_real_shadow_resource_iterator(t_ctx, res_type, res_id):
- shadow_res_type = None
- if res_type in t_constants.REAL_SHADOW_TYPE_MAP:
- shadow_res_type = t_constants.REAL_SHADOW_TYPE_MAP[res_type]
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, res_id, res_type)
- if shadow_res_type:
- mappings.extend(db_api.get_bottom_mappings_by_top_id(
- t_ctx, res_id, shadow_res_type))
-
- processed_pod_set = set()
- for pod, bottom_res_id in mappings:
- region_name = pod['region_name']
- if region_name in processed_pod_set:
- continue
- processed_pod_set.add(region_name)
- yield pod, bottom_res_id
-
- @staticmethod
- def extract_resource_routing_entries(port):
- entries = []
- if not port:
- return entries
- for ip in port['fixed_ips']:
- entries.append((ip['subnet_id'], ip['subnet_id'],
- t_constants.RT_SUBNET))
- entries.append((port['network_id'], port['network_id'],
- t_constants.RT_NETWORK))
- entries.append((port['id'], port['id'],
- t_constants.RT_PORT))
- if port['security_groups']:
- for sg_id in port['security_groups']:
- entries.append((sg_id, sg_id, t_constants.RT_SG))
- return entries
-
- @staticmethod
- def ensure_resource_mapping(t_ctx, project_id, pod, entries):
- """Ensure resource mapping
-
- :param t_ctx: tricircle context
- :param project_id: project id
- :param pod: bottom pod
- :param entries: a list of (top_id, bottom_id, resource_type) tuples.
- :return: None
- """
- for top_id, btm_id, resource_type in entries:
- if db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, top_id, pod['region_name'], resource_type):
- continue
- db_api.create_resource_mapping(t_ctx, top_id, btm_id,
- pod['pod_id'], project_id,
- resource_type)
diff --git a/tricircle/network/local_l3_plugin.py b/tricircle/network/local_l3_plugin.py
deleted file mode 100644
index 83f80fbd..00000000
--- a/tricircle/network/local_l3_plugin.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import orm
-
-from neutron_lib import constants
-from neutron_lib.exceptions import l3 as l3_exc
-
-from neutron.db.models import l3 as l3_models
-from neutron.db import models_v2
-from neutron.services.l3_router import l3_router_plugin
-
-
-class TricircleL3Plugin(l3_router_plugin.L3RouterPlugin):
- # Override the original implementation to allow associating a floating ip
- # to a port whose network is not attached to the router. Tricircle will
- # configures extra routes to guarantee packets can reach the port.
- def get_router_for_floatingip(self, context, internal_port,
- internal_subnet, external_network_id):
- """Find a router to handle the floating-ip association.
-
- :param internal_port: The port for the fixed-ip.
- :param internal_subnet: The subnet for the fixed-ip.
- :param external_network_id: The external network for floating-ip.
-
- :raises: ExternalGatewayForFloatingIPNotFound if no suitable router
- is found.
- """
- router_port = l3_models.RouterPort
- gw_port = orm.aliased(models_v2.Port, name="gw_port")
- router_port_qry = context.session.query(
- router_port.router_id
- ).join(gw_port, gw_port.device_id == router_port.router_id).filter(
- gw_port.network_id == external_network_id,
- gw_port.device_owner == constants.DEVICE_OWNER_ROUTER_GW
- ).distinct()
-
- first_router_id = None
- for router in router_port_qry:
- if not first_router_id:
- first_router_id = router.router_id
- if first_router_id:
- return first_router_id
-
- raise l3_exc.ExternalGatewayForFloatingIPNotFound(
- subnet_id=internal_subnet['id'],
- external_network_id=external_network_id,
- port_id=internal_port['id'])
diff --git a/tricircle/network/local_plugin.py b/tricircle/network/local_plugin.py
deleted file mode 100644
index 3d55b0cb..00000000
--- a/tricircle/network/local_plugin.py
+++ /dev/null
@@ -1,983 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron_lib.api.definitions import availability_zone as az_def
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.api.definitions import provider_net
-from neutron_lib.api import extensions
-from neutron_lib.api import validators
-from neutron_lib.callbacks import events
-import neutron_lib.constants as q_constants
-from neutron_lib.db import utils as db_utils
-import neutron_lib.exceptions as q_exceptions
-from neutron_lib.plugins import directory
-from neutron_lib.plugins.ml2 import api
-from neutron_lib.utils import net
-from neutron_lib.utils import runtime
-import neutronclient.client as neutronclient
-
-import neutron.extensions.securitygroup as ext_sg
-from neutron.plugins.ml2 import plugin
-
-from tricircle.common import client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-
-from tricircle.common import resource_handle
-import tricircle.common.utils as t_utils
-import tricircle.network.exceptions as t_exceptions
-from tricircle.network import helper
-
-
-tricircle_opts = [
- cfg.StrOpt('real_core_plugin', help=_('The core plugin the Tricircle '
- 'local plugin will invoke.')),
- cfg.StrOpt('local_region_name',
- help=_('Region the local Neutron belongs to, has high priority '
- 'than nova.region_name')),
- cfg.StrOpt('central_neutron_url', help=_('Central Neutron server url')),
- cfg.IPOpt('l2gw_tunnel_ip', help=_('Tunnel IP of L2 gateway, need to set '
- 'when client.cross_pod_vxlan_mode is '
- 'set to l2gw'))]
-
-tricircle_opt_group = cfg.OptGroup('tricircle')
-cfg.CONF.register_group(tricircle_opt_group)
-cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
-
-
-LOG = log.getLogger(__name__)
-
-
-class TricirclePlugin(plugin.Ml2Plugin):
-
- __native_bulk_support = True
-
- def __init__(self):
- super(TricirclePlugin, self).__init__()
- core_plugins_namespace = 'neutron.core_plugins'
- plugin_provider = cfg.CONF.tricircle.real_core_plugin
- plugin_class = runtime.load_class_by_alias_or_classname(
- core_plugins_namespace, plugin_provider)
- self.core_plugin = plugin_class()
- self.neutron_handle = resource_handle.NeutronResourceHandle(
- cfg.CONF.client.auth_url)
- self.neutron_handle.endpoint_url = \
- cfg.CONF.tricircle.central_neutron_url
- self.on_trunk_create = {}
- self.on_subnet_delete = {}
- neutronclient.USER_AGENT = t_constants.LOCAL
-
- def start_rpc_listeners(self):
- return self.core_plugin.start_rpc_listeners()
-
- def start_rpc_state_reports_listener(self):
- return self.core_plugin.start_rpc_state_reports_listener()
-
- def rpc_workers_supported(self):
- return self.core_plugin.rpc_workers_supported()
-
- def rpc_state_report_workers_supported(self):
- return self.core_plugin.rpc_state_report_workers_supported()
-
- def _start_subnet_delete(self, context):
- if context.request_id:
- LOG.debug('subnet delete start for ' + context.request_id)
- self.on_subnet_delete[context.request_id] = True
-
- def _end_subnet_delete(self, context):
- if context.request_id:
- LOG.debug('subnet delete end for ' + context.request_id)
- self.on_subnet_delete.pop(context.request_id, None)
-
- def _in_subnet_delete(self, context):
- if context.request_id:
- LOG.debug('check subnet delete state for ' + context.request_id)
- return context.request_id in self.on_subnet_delete
- return False
-
- @staticmethod
- def _adapt_network_body(network):
- network_type = network.get(provider_net.NETWORK_TYPE)
- if network_type == t_constants.NT_LOCAL:
- for key in (provider_net.NETWORK_TYPE,
- provider_net.PHYSICAL_NETWORK,
- provider_net.SEGMENTATION_ID):
- network.pop(key, None)
-
- # remove az_hint from network
- network.pop('availability_zone_hints', None)
-
- @staticmethod
- def _adapt_port_body_for_client(port):
- port.pop('port_security_enabled', None)
- port.pop('allowed_address_pairs', None)
- remove_keys = []
- for key, value in six.iteritems(port):
- if value is q_constants.ATTR_NOT_SPECIFIED:
- remove_keys.append(key)
- for key in remove_keys:
- port.pop(key)
-
- @staticmethod
- def _adapt_port_body_for_call(port):
- if 'mac_address' not in port:
- port['mac_address'] = q_constants.ATTR_NOT_SPECIFIED
- if 'fixed_ips' not in port:
- port['fixed_ips'] = q_constants.ATTR_NOT_SPECIFIED
-
- @staticmethod
- def _construct_params(filters, sorts, limit, marker, page_reverse):
- params = {}
- for key, value in six.iteritems(filters):
- params[key] = value
- if sorts:
- params['sort_key'] = [s[0] for s in sorts]
- if page_reverse:
- params['sort_dir'] = ['desc' if s[1] else 'asc' for s in sorts]
- else:
- params['sort_dir'] = ['asc' if s[1] else 'desc' for s in sorts]
- if limit:
- params['limit'] = limit
- if marker:
- params['marker'] = marker
- return params
-
- @staticmethod
- def _skip_non_api_query(context):
- return not context.auth_token
-
- @staticmethod
- def _get_neutron_region():
- region_name = cfg.CONF.tricircle.local_region_name
- if not region_name:
- region_name = cfg.CONF.nova.region_name
- return region_name
-
- def _ensure_network_subnet(self, context, port):
- network_id = port['network_id']
- # get_network will create bottom network if it doesn't exist, also
- # create bottom subnets if they don't exist
- self.get_network(context, network_id)
-
- def _ensure_subnet(self, context, network, is_top=True):
- subnet_ids = network.get('subnets', [])
- if not is_top:
- if subnet_ids:
- return subnet_ids
- else:
- t_ctx = t_context.get_context_from_neutron_context(context)
- if self._skip_non_api_query(t_ctx):
- return []
- t_network = self.neutron_handle.handle_get(
- t_ctx, 'network', network['id'])
- if not t_network:
- return []
- return self._ensure_subnet(context, t_network)
- if not subnet_ids:
- return []
- if len(subnet_ids) == 1:
- self.get_subnet(context, subnet_ids[0])
- else:
- self.get_subnets(context, filters={'id': subnet_ids})
- return subnet_ids
-
- def _ensure_subnet_dhcp_port(self, t_ctx, q_ctx, b_subnet):
- b_dhcp_ports = self.core_plugin.get_ports(
- q_ctx, filters={'network_id': [b_subnet['network_id']],
- 'device_owner': ['network:dhcp']})
- if b_dhcp_ports:
- return
- if self._skip_non_api_query(t_ctx):
- return
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = {'name': t_constants.dhcp_port_name % b_subnet['id']}
- t_ports = raw_client.list_ports(**params)['ports']
- if not t_ports:
- raise t_exceptions.DhcpPortNotFound(subnet_id=b_subnet['id'])
-
- dhcp_port_body = \
- helper.NetworkHelper._get_create_dhcp_port_body(
- b_subnet['tenant_id'], t_ports[0], b_subnet['id'],
- b_subnet['network_id'])
- dhcp_port_body['port']['id'] = t_ports[0]['id']
- self.core_plugin.create_port(q_ctx, dhcp_port_body)
-
- def _ensure_gateway_port(self, t_ctx, t_subnet):
- region_name = self._get_neutron_region()
- gateway_port_name = t_constants.interface_port_name % (region_name,
- t_subnet['id'])
- gateway_port_body = {
- 'port': {'tenant_id': t_subnet['tenant_id'],
- 'admin_state_up': True,
- 'name': gateway_port_name,
- 'network_id': t_subnet['network_id'],
- 'device_id': t_constants.interface_port_device_id}}
- try:
- return self.neutron_handle.handle_create(
- t_ctx, t_constants.RT_PORT, gateway_port_body)
- except Exception:
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = {'name': gateway_port_name}
- t_ports = raw_client.list_ports(**params)['ports']
- if not t_ports:
- raise t_exceptions.GatewayPortNotFound(
- subnet_id=t_subnet['id'], region=region_name)
- return t_ports[0]
-
- def create_network(self, context, network):
- # this method is overwritten for bottom bridge network and external
- # network creation, for internal network, get_network and get_networks
- # will do the trick
- net_body = network['network']
- self._adapt_network_body(net_body)
- if net_body['name']:
- net_id = t_utils.get_id_from_name(t_constants.RT_NETWORK,
- net_body['name'])
- if net_id:
- net_body['id'] = net_id
-
- net_body.pop('qos_policy_id', None)
-
- b_network = self.core_plugin.create_network(context,
- {'network': net_body})
- return b_network
-
- def _is_valid_network(self, context, network_id):
- try:
- self.core_plugin.get_network(context, network_id)
- except q_exceptions.NotFound:
- if self._in_subnet_delete(context):
- raise
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- t_network = self.neutron_handle.handle_get(
- t_ctx, 'network', network_id)
- region_name = self._get_neutron_region()
- located = self._is_network_located_in_region(t_network,
- region_name)
- if not located:
- LOG.error('network: %(network_id)s not located in current '
- 'region: %(region_name)s, '
- 'az_hints: %(az_hints)s',
- {'network_id': t_network['id'],
- 'region_name': region_name,
- 'az_hints': t_network[az_def.AZ_HINTS]})
- return located
- self._create_bottom_network(context, network_id)
- return True
-
- def _is_network_located_in_region(self, t_network, region_name):
- az_hints = t_network.get(az_def.AZ_HINTS)
- if not az_hints:
- return True
- return region_name in az_hints
-
- def get_network(self, context, _id, fields=None):
- try:
- b_network = self.core_plugin.get_network(context, _id)
- if not self._in_subnet_delete(context):
- subnet_ids = self._ensure_subnet(context, b_network, False)
- else:
- subnet_ids = []
- except q_exceptions.NotFound:
- if self._in_subnet_delete(context):
- raise
- t_ctx = t_context.get_context_from_neutron_context(context)
- if self._skip_non_api_query(t_ctx):
- raise q_exceptions.NetworkNotFound(net_id=_id)
- t_network, b_network = self._create_bottom_network(context, _id)
- subnet_ids = self._ensure_subnet(context, t_network)
- if subnet_ids:
- b_network['subnets'] = subnet_ids
- return db_utils.resource_fields(b_network, fields)
-
- def get_networks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- # if id is not specified in the filter, we just return network data in
- # local Neutron server, otherwise id is specified, we need to retrieve
- # network data from central Neutron server and create network which
- # doesn't exist in local Neutron server.
- if not filters or 'id' not in filters:
- return self.core_plugin.get_networks(
- context, filters, fields, sorts, limit, marker, page_reverse)
-
- b_full_networks = self.core_plugin.get_networks(
- context, filters, None, sorts, limit, marker, page_reverse)
- b_networks = []
- for b_network in b_full_networks:
- subnet_ids = self._ensure_subnet(context, b_network, False)
- if subnet_ids:
- b_network['subnets'] = subnet_ids
- b_networks.append(db_utils.resource_fields(b_network, fields))
-
- if len(b_networks) == len(filters['id']):
- return b_networks
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- if self._skip_non_api_query(t_ctx):
- return b_networks
- t_ctx.auth_token = client.Client.get_admin_token(context.project_id)
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = self._construct_params(filters, sorts, limit, marker,
- page_reverse)
- t_networks = raw_client.list_networks(**params)['networks']
-
- t_id_set = set([network['id'] for network in t_networks])
- b_id_set = set([network['id'] for network in b_networks])
- missing_id_set = t_id_set - b_id_set
- if missing_id_set:
- missing_networks = [network for network in t_networks if (
- network['id'] in missing_id_set)]
- for network in missing_networks:
- region_name = self._get_neutron_region()
- located = self._is_network_located_in_region(network,
- region_name)
- if not located:
- LOG.error('network: %(net_id)s not located in current '
- 'region: %(region_name)s, '
- 'az_hints: %(az_hints)s',
- {'net_id': network['id'],
- 'region_name': region_name,
- 'az_hints': network[az_def.AZ_HINTS]})
- continue
-
- self._adapt_network_body(network)
-
- network.pop('qos_policy_id', None)
- b_network = self.core_plugin.create_network(
- context, {'network': network})
- subnet_ids = self._ensure_subnet(context, network)
- if subnet_ids:
- b_network['subnets'] = subnet_ids
- b_networks.append(db_utils.resource_fields(b_network, fields))
- return b_networks
-
- def create_subnet(self, context, subnet):
- # this method is overwritten for bottom bridge subnet and external
- # subnet creation, for internal subnet, get_subnet and get_subnets
- # will do the trick
- subnet_body = subnet['subnet']
- if subnet_body['name']:
- subnet_id = t_utils.get_id_from_name(t_constants.RT_SUBNET,
- subnet_body['name'])
- if subnet_id:
- subnet_body['id'] = subnet_id
- b_subnet = self.core_plugin.create_subnet(context,
- {'subnet': subnet_body})
- return b_subnet
-
- def _create_bottom_subnet(self, t_ctx, q_ctx, t_subnet):
- if t_subnet['gateway_ip']:
- gateway_port = self._ensure_gateway_port(t_ctx, t_subnet)
- b_gateway_ip = gateway_port['fixed_ips'][0]['ip_address']
- else:
- b_gateway_ip = None
- subnet_body = helper.NetworkHelper.get_create_subnet_body(
- t_subnet['tenant_id'], t_subnet, t_subnet['network_id'],
- b_gateway_ip)['subnet']
- t_subnet['gateway_ip'] = subnet_body['gateway_ip']
- t_subnet['allocation_pools'] = subnet_body['allocation_pools']
- b_subnet = self.core_plugin.create_subnet(q_ctx, {'subnet': t_subnet})
- return b_subnet
-
- def _create_bottom_network(self, context, _id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_network = self.neutron_handle.handle_get(t_ctx, 'network', _id)
- if not t_network:
- raise q_exceptions.NetworkNotFound(net_id=_id)
- self._adapt_network_body(t_network)
- t_network.pop('qos_policy_id', None)
- b_network = self.core_plugin.create_network(context,
- {'network': t_network})
- return t_network, b_network
-
- def get_subnet(self, context, _id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- b_subnet = self.core_plugin.get_subnet(context, _id)
- except q_exceptions.NotFound:
- if self._skip_non_api_query(t_ctx):
- raise q_exceptions.SubnetNotFound(subnet_id=_id)
- t_subnet = self.neutron_handle.handle_get(t_ctx, 'subnet', _id)
- if not t_subnet:
- raise q_exceptions.SubnetNotFound(subnet_id=_id)
- valid = self._is_valid_network(context, t_subnet['network_id'])
- if not valid:
- raise q_exceptions.SubnetNotFound(subnet_id=_id)
- b_subnet = self._create_bottom_subnet(t_ctx, context, t_subnet)
- if b_subnet['enable_dhcp']:
- self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
- return db_utils.resource_fields(b_subnet, fields)
-
- def get_subnets(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- # if id is not specified in the filter, we just return subnet data in
- # local Neutron server, otherwise id is specified, we need to retrieve
- # subnet data from central Neutron server and create subnet which
- # doesn't exist in local Neutron server.
- if not filters or 'id' not in filters:
- return self.core_plugin.get_subnets(
- context, filters, fields, sorts, limit, marker, page_reverse)
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- b_full_subnets = self.core_plugin.get_subnets(
- context, filters, None, sorts, limit, marker, page_reverse)
- b_subnets = []
- for b_subnet in b_full_subnets:
- if b_subnet['enable_dhcp']:
- self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
- b_subnets.append(db_utils.resource_fields(b_subnet, fields))
- if len(b_subnets) == len(filters['id']):
- return b_subnets
-
- if self._skip_non_api_query(t_ctx):
- return b_subnets
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = self._construct_params(filters, sorts, limit, marker,
- page_reverse)
- t_subnets = raw_client.list_subnets(**params)['subnets']
-
- t_id_set = set([subnet['id'] for subnet in t_subnets])
- b_id_set = set([subnet['id'] for subnet in b_subnets])
- missing_id_set = t_id_set - b_id_set
- if missing_id_set:
- missing_subnets = [subnet for subnet in t_subnets if (
- subnet['id'] in missing_id_set)]
- for subnet in missing_subnets:
- valid = self._is_valid_network(context, subnet['network_id'])
- if not valid:
- continue
- b_subnet = self._create_bottom_subnet(t_ctx, context, subnet)
- if b_subnet['enable_dhcp']:
- self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
- b_subnets.append(db_utils.resource_fields(b_subnet, fields))
- return b_subnets
-
- def delete_subnet(self, context, _id):
- self._start_subnet_delete(context)
- try:
- self.core_plugin.delete_subnet(context, _id)
- except Exception:
- raise
- finally:
- self._end_subnet_delete(context)
-
- def update_subnet(self, context, _id, subnet):
- """update bottom subnet
-
- Can not directly use ML2 plugin's update_subnet function,
- because it will call local plugin's get_subnet in a transaction,
- the local plugin's get_subnet will create a dhcp port when subnet's
- enable_dhcp attribute is changed from False to True, but neutron
- doesn't allow calling create_port in a transaction and will raise an
- exception.
-
- :param context: neutron context
- :param _id: subnet_id
- :param subnet: update body
- :return: updated subnet
- """
- t_ctx = t_context.get_context_from_neutron_context(context)
- b_subnet = self.core_plugin.get_subnet(context, _id)
- origin_enable_dhcp = b_subnet['enable_dhcp']
- req_enable_dhcp = subnet['subnet'].get('enable_dhcp')
- # when request enable dhcp, and origin dhcp is disabled,
- # ensure subnet dhcp port is created
- if req_enable_dhcp and not origin_enable_dhcp:
- self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
- res = self.core_plugin.update_subnet(context, _id, subnet)
- return res
-
- @staticmethod
- def _is_special_port(port):
- return port.get('device_owner') in (
- q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_FLOATINGIP,
- q_constants.DEVICE_OWNER_ROUTER_GW,
- q_constants.DEVICE_OWNER_ROUTER_SNAT,
- q_constants.DEVICE_OWNER_DVR_INTERFACE)
-
- def _handle_dvr_snat_port(self, t_ctx, port):
- if port.get('device_owner') != q_constants.DEVICE_OWNER_ROUTER_SNAT:
- return
- subnet_id = port['fixed_ips'][0]['subnet_id']
- t_subnet = self.neutron_handle.handle_get(t_ctx, 'subnet', subnet_id)
- snat_port_name = t_constants.snat_port_name % t_subnet['id']
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = {'name': snat_port_name}
- t_ports = raw_client.list_ports(**params)['ports']
- if not t_ports:
- raise t_exceptions.CentralizedSNATPortNotFound(
- subnet_id=t_subnet['id'])
- port['fixed_ips'][0][
- 'ip_address'] = t_ports[0]['fixed_ips'][0]['ip_address']
-
- def create_port_bulk(self, context, ports):
- # NOTE(zhiyuan) currently this bulk operation is only for shadow port
- # and trunk subports creation optimization
- b_ports = self.core_plugin.get_ports(context, fields=['id'])
- b_port_list = [b_port['id'] for b_port in b_ports]
- for port in ports['ports'][:]:
- port_body = port['port']
- self.get_network(context, port_body['network_id'])
- if port_body['device_owner'] == t_constants.DEVICE_OWNER_SHADOW:
- port_body['id'] = port_body['name'].split('_')[-1]
- self._create_shadow_agent(context, port_body)
- helper.NetworkHelper.fill_binding_info(port_body)
- # clear binding profile set by xmanager
- port_body[portbindings.PROFILE] = {}
- if port_body['id'] in b_port_list:
- port_body.pop('security_groups', None)
- self.update_port(context, port_body['id'], port)
- ports['ports'].remove(port)
- elif (port_body['device_owner'] ==
- t_constants.DEVICE_OWNER_SUBPORT):
- port_body['id'] = port_body['device_id']
- # need set port's device_id to empty, otherwise will raise
- # a exception because the device_id is bound to a device when
- # the trunk add this port as a subport
- port_body['device_owner'] = ''
- port_body['device_id'] = ''
-
- return self.core_plugin.create_port_bulk(context, ports)
-
- def create_port(self, context, port):
- port_body = port['port']
- network_id = port_body['network_id']
- # get_network will create bottom network if it doesn't exist
- self.get_network(context, network_id)
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- raw_client = self.neutron_handle._get_client(t_ctx)
-
- def get_top_port_by_ip(ip):
- params = {'fixed_ips': 'ip_address=%s' % ip,
- 'network_id': network_id}
- t_ports = raw_client.list_ports(**params)['ports']
- if not t_ports:
- raise q_exceptions.InvalidIpForNetwork(
- ip_address=fixed_ip['ip_address'])
- return t_ports[0]
-
- if port_body['fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED and (
- port_body.get('device_owner') != (
- q_constants.DEVICE_OWNER_LOADBALANCERV2)):
- if not self._is_special_port(port_body):
- fixed_ip = port_body['fixed_ips'][0]
- ip_address = fixed_ip.get('ip_address')
- if not ip_address:
- # dhcp agent may request to create a dhcp port without
- # specifying ip address, we just raise an exception to
- # reject this request
- raise q_exceptions.InvalidIpForNetwork(ip_address='None')
- t_port = get_top_port_by_ip(ip_address)
- elif helper.NetworkHelper.is_need_top_sync_port(
- port_body, cfg.CONF.client.bridge_cidr):
- # for port that needs to be synced with top port, we keep ids
- # the same
- ip_address = port_body['fixed_ips'][0]['ip_address']
- port_body['id'] = get_top_port_by_ip(ip_address)['id']
- t_port = port_body
- else:
- self._handle_dvr_snat_port(t_ctx, port_body)
- t_port = port_body
- else:
- self._adapt_port_body_for_client(port['port'])
- t_port = raw_client.create_port(port)['port']
-
- if not self._is_special_port(port_body):
- subnet_id = t_port['fixed_ips'][0]['subnet_id']
- # get_subnet will create bottom subnet if it doesn't exist
- self.get_subnet(context, subnet_id)
-
- for field in ('name', 'device_id', 'device_owner', 'binding:host_id'):
- if port_body.get(field):
- t_port[field] = port_body[field]
-
- self._handle_security_group(t_ctx, context, t_port)
- self._create_shadow_agent(context, port_body)
-
- t_port.pop('qos_policy_id', None)
- b_port = self.core_plugin.create_port(context, {'port': t_port})
- return b_port
-
- def _create_shadow_agent(self, context, port_body):
- """Create shadow agent before creating shadow port
-
- Called inside self.create_port function. Shadow port is created by xjob
- daemon. Xjob daemon will insert agent information(agent type, tunnel
- ip and host) in the binding profile of the request body. This function
- checks if the necessary information is in the request body, if so, it
- invokes real core plugin to create or update shadow agent. For other
- kinds of port creation requests, this function is called but does not
- take effect.
-
- :param context: neutron context
- :param port_body: port update body
- :return: None
- """
- if not extensions.is_extension_supported(self.core_plugin, 'agent'):
- return
- profile_dict = port_body.get(portbindings.PROFILE, {})
- if not validators.is_attr_set(profile_dict):
- return
- if t_constants.PROFILE_TUNNEL_IP not in profile_dict:
- return
- agent_type = profile_dict[t_constants.PROFILE_AGENT_TYPE]
- tunnel_ip = profile_dict[t_constants.PROFILE_TUNNEL_IP]
- agent_host = port_body[portbindings.HOST_ID]
- agent_state = helper.NetworkHelper.construct_agent_data(
- agent_type, agent_host, tunnel_ip)
- self.core_plugin.create_or_update_agent(context, agent_state)
- driver = self.core_plugin.type_manager.drivers.get('vxlan')
- if driver:
- driver.obj.add_endpoint(tunnel_ip, agent_host)
-
- def _fill_agent_info_in_profile(self, context, port_id, host,
- profile_dict):
- """Fill agent information in the binding profile
-
- Called inside self.update_port function. When local plugin handles
- port update request, it checks if host is in the body, if so, local
- plugin will send a port update request to central Neutron to tell
- central plugin that the port has been bound to a host. The information
- of the agent in the host is inserted in the update body by calling this
- function. So after central Neutron receives the request, it can save
- the agent information in the Tricircle shadow agent table.
-
- :param context: neutron object
- :param port_id: port uuid
- :param host: host the port is bound to
- :param profile_dict: binding profile dict in the port update body
- :return: None
- """
- if not extensions.is_extension_supported(self.core_plugin, 'agent'):
- return
- if cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_NOOP:
- return
-
- port = self.core_plugin.get_port(context, port_id)
- net = self.core_plugin.get_network(context, port['network_id'])
- if net[provider_net.NETWORK_TYPE] != t_constants.NT_VxLAN:
- return
-
- vif_type = port[portbindings.VIF_TYPE]
- agent_type = helper.NetworkHelper.get_agent_type_by_vif(vif_type)
- if not agent_type:
- return
- agents = self.core_plugin.get_agents(
- context, filters={'agent_type': [agent_type], 'host': [host]})
- if not agents:
- return
-
- if cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_P2P:
- helper.NetworkHelper.fill_agent_data(agent_type, host, agents[0],
- profile_dict)
- elif cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_L2GW:
- if not cfg.CONF.tricircle.l2gw_tunnel_ip:
- LOG.error('Cross-pod VxLAN networking mode is set to l2gw '
- 'but L2 gateway tunnel ip is not configured')
- return
- l2gw_tunnel_ip = cfg.CONF.tricircle.l2gw_tunnel_ip
- helper.NetworkHelper.fill_agent_data(agent_type, host, agents[0],
- profile_dict,
- tunnel_ip=l2gw_tunnel_ip)
-
- @staticmethod
- def _need_top_update(port_old, port, update_body):
- if (port_old.get('device_owner', '') ==
- t_constants.DEVICE_OWNER_SHADOW and
- port['device_owner'] == '' and
- port['device_id'] == ''):
- return False
- if (port_old.get('device_owner', '') ==
- t_constants.DEVICE_OWNER_NOVA and
- port['device_owner'] == '' and
- port['device_id'] == ''):
- return True
- if not update_body.get(portbindings.HOST_ID):
- # no need to update top port if host is not updated
- return False
- # only for those ports that are synced with top port, we need to
- # update top port
- return helper.NetworkHelper.is_need_top_sync_port(
- port, cfg.CONF.client.bridge_cidr)
-
- def update_port(self, context, _id, port):
- # ovs agent will not call update_port, it updates port status via rpc
- # and direct db operation
- b_port_old = self.core_plugin.get_port(context, _id)
- if not b_port_old:
- return b_port_old
- profile_dict = port['port'].get(portbindings.PROFILE, {})
- if profile_dict.pop(t_constants.PROFILE_FORCE_UP, None) or \
- (b_port_old.get('device_owner', '') == '' and
- b_port_old.get('device_id', '') == '' and
- port['port'].get('device_owner') ==
- t_constants.DEVICE_OWNER_NOVA):
- port['port']['status'] = q_constants.PORT_STATUS_ACTIVE
- port['port'][
- portbindings.VNIC_TYPE] = q_constants.ATTR_NOT_SPECIFIED
-
- b_port = self.core_plugin.update_port(context, _id, port)
- if self._need_top_update(b_port_old, b_port, port['port']):
- region_name = self._get_neutron_region()
- update_dict = {portbindings.PROFILE: {
- t_constants.PROFILE_REGION: region_name,
- t_constants.PROFILE_DEVICE: b_port['device_owner'],
- portbindings.VIF_DETAILS: b_port[portbindings.VIF_DETAILS],
- portbindings.VNIC_TYPE: b_port[portbindings.VNIC_TYPE],
- portbindings.VIF_TYPE: b_port[portbindings.VIF_TYPE],
- portbindings.HOST_ID: b_port[portbindings.HOST_ID]}}
- if b_port.get(t_constants.PROFILE_STATUS):
- update_dict[portbindings.PROFILE].update({
- t_constants.PROFILE_STATUS: b_port['status']
- })
- self._fill_agent_info_in_profile(
- context, _id, port['port'][portbindings.HOST_ID],
- update_dict[portbindings.PROFILE])
-
- if directory.get_plugin('trunk'):
- trunk_details = b_port.get('trunk_details')
- if trunk_details:
- update_dict['binding:profile'].update({
- t_constants.PROFILE_LOCAL_TRUNK_ID:
- trunk_details['trunk_id']})
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- self.neutron_handle.handle_update(t_ctx, 'port', _id,
- {'port': update_dict})
- return b_port
-
- def _start_trunk_create(self, context):
- if context.request_id:
- LOG.debug('trunk create start for ' + context.request_id)
- self.on_trunk_create[context.request_id] = True
-
- def _end_trunk_create(self, context):
- if context.request_id:
- LOG.debug('trunk create end for ' + context.request_id)
- self.on_trunk_create.pop(context.request_id, None)
-
- def _in_trunk_create(self, context):
- if context.request_id:
- return context.request_id in self.on_trunk_create
- return False
-
- def _create_trunk(self, context, t_ctx, port_id):
- trunk_plugin = directory.get_plugin('trunk')
- if not trunk_plugin:
- return
- b_trunks = trunk_plugin.get_trunks(
- context, filters={'port_id': [port_id]})
- if b_trunks:
- return
- t_trunks = self.neutron_handle.handle_list(
- t_ctx, 'trunk', [{'key': 'port_id',
- 'comparator': 'eq',
- 'value': port_id}])
- if not t_trunks:
- return
- t_trunk = t_trunks[0]
- # sub_ports will be created in xjob, so set it to empty here
- t_trunk['sub_ports'] = []
- trunk_plugin.create_trunk(context, {'trunk': t_trunk})
-
- def _ensure_trunk(self, context, t_ctx, port_id):
- # avoid recursive calls: _ensure_trunk will call create_trunk,
- # create_trunk will call get_port, and get_port will call
- # _ensure_trunk again
- if not self._in_trunk_create(context):
- self._start_trunk_create(context)
- try:
- self._create_trunk(context, t_ctx, port_id)
- except Exception:
- raise
- finally:
- self._end_trunk_create(context)
-
- def get_port(self, context, _id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- b_port = self.core_plugin.get_port(context, _id, fields)
- except q_exceptions.NotFound:
- if self._skip_non_api_query(t_ctx):
- raise q_exceptions.PortNotFound(port_id=_id)
- t_port = self.neutron_handle.handle_get(t_ctx, 'port', _id)
- if not t_port:
- raise q_exceptions.PortNotFound(port_id=_id)
- self._ensure_network_subnet(context, t_port)
- self._adapt_port_body_for_call(t_port)
- self._handle_security_group(t_ctx, context, t_port)
- t_port.pop('qos_policy_id', None)
- b_port = self.core_plugin.create_port(context, {'port': t_port})
-
- self._ensure_trunk(context, t_ctx, _id)
- return db_utils.resource_fields(b_port, fields)
-
- def get_ports(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- # if id is not specified in the filter, we just return port data in
- # local Neutron server, otherwise id is specified, we need to retrieve
- # port data from central Neutron server and create port which doesn't
- # exist in local Neutron server.
- if not filters or 'id' not in filters:
- return self.core_plugin.get_ports(context, filters, fields, sorts,
- limit, marker, page_reverse)
-
- b_ports = self.core_plugin.get_ports(context, filters, fields, sorts,
- limit, marker, page_reverse)
- if len(b_ports) == len(filters['id']):
- return b_ports
-
- id_set = set(filters['id'])
- b_id_set = set([port['id'] for port in b_ports])
- missing_id_set = id_set - b_id_set
- t_ctx = t_context.get_context_from_neutron_context(context)
- if self._skip_non_api_query(t_ctx):
- return b_ports
- raw_client = self.neutron_handle._get_client(t_ctx)
- t_ports = []
- for port_id in missing_id_set:
- # use list_port will cause infinite API call since central Neutron
- # server will also use list_port to retrieve port information from
- # local Neutron server, so we show_port one by one
- try:
- t_port = raw_client.show_port(port_id)['port']
- t_ports.append(t_port)
- except Exception:
- # user passes a nonexistent port id
- pass
-
- for port in t_ports:
- self._ensure_network_subnet(context, port)
- self._adapt_port_body_for_call(port)
- self._handle_security_group(t_ctx, context, port)
- port.pop('qos_policy_id', None)
- b_port = self.core_plugin.create_port(context,
- {'port': port})
- b_ports.append(db_utils.resource_fields(b_port, fields))
- return b_ports
-
- def delete_port(self, context, _id, l3_port_check=True):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- b_port = self.core_plugin.get_port(context, _id)
- # to support floating ip, we create a copy port if the target port
- # is not in the pod where the real external network is located. to
- # distinguish it from normal port, we name it with a prefix
- do_top_delete = b_port['device_owner'].startswith(
- q_constants.DEVICE_OWNER_COMPUTE_PREFIX)
- skip_top_delete = t_constants.RT_SD_PORT in b_port['name']
- except q_exceptions.NotFound:
- return
- if do_top_delete and not skip_top_delete:
- self.neutron_handle.handle_delete(t_ctx, t_constants.RT_PORT, _id)
- self.core_plugin.delete_port(context, _id, l3_port_check)
-
- def _handle_security_group(self, t_ctx, q_ctx, port):
- if 'security_groups' not in port:
- return
- if port.get('device_owner') and net.is_port_trusted(port):
- return
- if not port['security_groups']:
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = {'name': 'default'}
- t_sgs = raw_client.list_security_groups(
- **params)['security_groups']
- if t_sgs:
- port['security_groups'] = [t_sgs[0]['id']]
- if port['security_groups'] is q_constants.ATTR_NOT_SPECIFIED:
- return
- for sg_id in port['security_groups']:
- self.get_security_group(q_ctx, sg_id)
-
- def get_security_group(self, context, _id, fields=None, tenant_id=None):
- try:
- return self.core_plugin.get_security_group(
- context, _id, fields, tenant_id)
- except q_exceptions.NotFound:
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_sg = self.neutron_handle.handle_get(t_ctx,
- 'security_group', _id)
- if not t_sg:
- raise ext_sg.SecurityGroupNotFound(id=_id)
- self.core_plugin.create_security_group(context,
- {'security_group': t_sg})
- return self.core_plugin.get_security_group(
- context, _id, fields, tenant_id)
-
- def get_security_groups(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False, default_sg=False):
- # if id is not specified in the filter, we just return security group
- # data in local Neutron server, otherwise id is specified, we need to
- # retrieve network data from central Neutron server and create network
- # which doesn't exist in local Neutron server.
- if not filters or 'id' not in filters:
- return self.core_plugin.get_security_groups(
- context, filters, fields, sorts, limit, marker, page_reverse,
- default_sg)
-
- b_sgs = self.core_plugin.get_security_groups(
- context, filters, fields, sorts, limit, marker, page_reverse,
- default_sg)
- if len(b_sgs) == len(filters['id']):
- return b_sgs
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- raw_client = self.neutron_handle._get_client(t_ctx)
- params = self._construct_params(filters, sorts, limit, marker,
- page_reverse)
- t_sgs = raw_client.list_security_groups(**params)['security_groups']
-
- t_id_set = set([sg['id'] for sg in t_sgs])
- b_id_set = set([sg['id'] for sg in b_sgs])
- missing_id_set = t_id_set - b_id_set
- if missing_id_set:
- missing_sgs = [sg for sg in t_sgs if (
- sg['id'] in missing_id_set)]
- for sg in missing_sgs:
- b_sg = self.core_plugin.create_security_group(
- context, {'security_group': sg})
- b_sgs.append(self.core_plugin.get_security_group(
- context, b_sg['id'], fields))
- return b_sgs
-
- def _handle_segment_change(self, rtype, event, trigger, context, segment):
-
- network_id = segment.get('network_id')
-
- if event == events.PRECOMMIT_CREATE:
- updated_segment = self.type_manager.reserve_network_segment(
- context, segment)
- # The segmentation id might be from ML2 type driver, update it
- # in the original segment.
- segment[api.SEGMENTATION_ID] = updated_segment[api.SEGMENTATION_ID]
- elif event == events.PRECOMMIT_DELETE:
- self.type_manager.release_network_segment(context, segment)
-
- # change in segments could affect resulting network mtu, so let's
- # recalculate it
- network_db = self._get_network(context, network_id)
- network_db.mtu = self._get_network_mtu(
- network_db, validate=(event != events.PRECOMMIT_DELETE))
- network_db.save(session=context.session)
diff --git a/tricircle/network/managers.py b/tricircle/network/managers.py
deleted file mode 100644
index cb1cfa1a..00000000
--- a/tricircle/network/managers.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.plugins.ml2 import managers
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleTypeManager(managers.TypeManager):
-
- def __init__(self):
- self.drivers = {}
-
- # NOTE(zhiyuan) here we call __init__ of super class's super class,
- # which is NamedExtensionManager's __init__ to bypass initialization
- # process of ml2 type manager
- super(managers.TypeManager, self).__init__(
- 'tricircle.network.type_drivers',
- cfg.CONF.tricircle.type_drivers,
- invoke_on_load=True)
- LOG.info('Loaded type driver names: %s', self.names())
-
- self._register_types()
- self._check_tenant_network_types(
- cfg.CONF.tricircle.tenant_network_types)
- self._check_bridge_network_type(
- cfg.CONF.tricircle.bridge_network_type)
-
- def _check_bridge_network_type(self, bridge_network_type):
- if not bridge_network_type:
- return
- if bridge_network_type == 'local':
- LOG.error("Local is not a valid bridge network type. "
- "Service terminated!", bridge_network_type)
- raise SystemExit(1)
-
- type_set = set(self.tenant_network_types)
- if bridge_network_type not in type_set:
- LOG.error("Bridge network type %s is not registered. "
- "Service terminated!", bridge_network_type)
- raise SystemExit(1)
-
- def _register_types(self):
- for ext in self:
- network_type = ext.obj.get_type()
- if network_type not in self.drivers:
- self.drivers[network_type] = ext
-
- def create_network_segments(self, context, network, tenant_id):
- segments = self._process_provider_create(network)
- session = context.session
- with session.begin(subtransactions=True):
- network_id = network['id']
- if segments:
- for segment_index, segment in enumerate(segments):
- segment = self.reserve_provider_segment(
- context, segment)
- self._add_network_segment(context, network_id, segment,
- segment_index)
- else:
- segment = self._allocate_tenant_net_segment(context)
- self._add_network_segment(context, network_id, segment)
diff --git a/tricircle/network/qos_driver.py b/tricircle/network/qos_driver.py
deleted file mode 100644
index 06cbeda5..00000000
--- a/tricircle/network/qos_driver.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2017 Hunan University Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from neutron_lib.api.definitions import portbindings
-from neutron_lib import constants
-from neutron_lib.db import constants as db_constants
-from neutron_lib.services.qos import base
-from neutron_lib.services.qos import constants as qos_consts
-
-from oslo_log import log as logging
-
-from tricircle.common import constants as t_constants
-from tricircle.common import context
-from tricircle.common import xrpcapi
-from tricircle.db import api as db_api
-
-LOG = logging.getLogger(__name__)
-
-DRIVER = None
-
-SUPPORTED_RULES = {
- qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
- qos_consts.MAX_KBPS: {
- 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
- qos_consts.MAX_BURST: {
- 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
- qos_consts.DIRECTION: {
- 'type:values': constants.VALID_DIRECTIONS}
- },
- qos_consts.RULE_TYPE_DSCP_MARKING: {
- qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS}
- },
- qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: {
- qos_consts.MIN_KBPS: {
- 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
- qos_consts.DIRECTION: {'type:values': [constants.EGRESS_DIRECTION]}
- }
-}
-
-VIF_TYPES = [portbindings.VIF_TYPE_OVS,
- portbindings.VIF_TYPE_VHOST_USER,
- portbindings.VIF_TYPE_UNBOUND]
-
-
-class TricircleQoSDriver(base.DriverBase):
- def __init__(self, name, vif_types, vnic_types,
- supported_rules,
- requires_rpc_notifications):
- super(TricircleQoSDriver, self).__init__(name, vif_types, vnic_types,
- supported_rules,
- requires_rpc_notifications)
- self.xjob_handler = xrpcapi.XJobAPI()
-
- @staticmethod
- def create():
- return TricircleQoSDriver(
- name='tricircle',
- vif_types=VIF_TYPES,
- vnic_types=portbindings.VNIC_TYPES,
- supported_rules=SUPPORTED_RULES,
- requires_rpc_notifications=False)
-
- def create_policy(self, q_context, policy):
- """Create policy invocation.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being created, which will have no
- rules.
- """
- pass
-
- def create_policy_precommit(self, q_context, policy):
- """Create policy precommit.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being created, which will have no
- rules.
- """
- pass
-
- def update_policy(self, q_context, policy):
- """Update policy invocation.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being updated.
- """
- pass
-
- def update_policy_precommit(self, q_context, policy):
- """Update policy precommit.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being updated.
- """
- t_context = context.get_context_from_neutron_context(q_context)
- policy_id = policy['id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_context, policy_id, t_constants.RT_QOS)
-
- if mappings:
- self.xjob_handler.update_qos_policy(
- t_context, t_context.project_id, policy_id,
- t_constants.POD_NOT_SPECIFIED)
- self.xjob_handler.sync_qos_policy_rules(
- t_context, t_context.project_id, policy_id)
-
- def delete_policy(self, q_context, policy):
- """Delete policy invocation.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being deleted
- """
-
- def delete_policy_precommit(self, q_context, policy):
- """Delete policy precommit.
-
- :param q_context: current running context information
- :param policy: a QoSPolicy object being deleted
- """
- t_context = context.get_context_from_neutron_context(q_context)
- policy_id = policy['id']
- self.xjob_handler.delete_qos_policy(
- t_context, t_context.project_id, policy_id,
- t_constants.POD_NOT_SPECIFIED)
-
-
-def register():
- """Register the driver."""
- global DRIVER
- if not DRIVER:
- DRIVER = TricircleQoSDriver.create()
- LOG.debug('Tricircle QoS driver registered')
diff --git a/tricircle/network/security_groups.py b/tricircle/network/security_groups.py
deleted file mode 100644
index 4fb62df4..00000000
--- a/tricircle/network/security_groups.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log
-
-from neutron.db import securitygroups_db
-
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-from tricircle.common import context
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common import xrpcapi
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.exceptions as n_exceptions
-from tricircle.network import utils as nt_utils
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
-
- def __init__(self):
- super(TricircleSecurityGroupMixin, self).__init__()
- self.xjob_handler = xrpcapi.XJobAPI()
- self.clients = {}
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key] and str(rule1[key]) != str(rule2[key]):
- return False
- return True
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- def create_security_group_rule(self, q_context, security_group_rule):
- rule = security_group_rule['security_group_rule']
- if rule['remote_group_id']:
- raise n_exceptions.RemoteGroupNotSupported()
- sg_id = rule['security_group_id']
- sg = self.get_security_group(q_context, sg_id)
- if not sg:
- raise n_exceptions.SecurityGroupNotFound(sg_id=sg_id)
-
- new_rule = super(TricircleSecurityGroupMixin,
- self).create_security_group_rule(q_context,
- security_group_rule)
-
- t_context = context.get_context_from_neutron_context(q_context)
-
- try:
- self.xjob_handler.configure_security_group_rules(
- t_context, rule['project_id'])
- except Exception:
- raise n_exceptions.BottomPodOperationFailure(
- resource='security group rule', region_name='')
- return new_rule
-
- def delete_security_group_rule(self, q_context, _id):
- rule = self.get_security_group_rule(q_context, _id)
- if not rule:
- raise n_exceptions.SecurityGroupRuleNotFound(rule_id=_id)
-
- if rule['remote_group_id']:
- raise n_exceptions.RemoteGroupNotSupported()
- sg_id = rule['security_group_id']
- sg = self.get_security_group(q_context, sg_id)
-
- if not sg:
- raise n_exceptions.SecurityGroupNotFound(sg_id=sg_id)
-
- super(TricircleSecurityGroupMixin,
- self).delete_security_group_rule(q_context, _id)
- t_context = context.get_context_from_neutron_context(q_context)
-
- try:
- self.xjob_handler.configure_security_group_rules(
- t_context, rule['project_id'])
- except Exception:
- raise n_exceptions.BottomPodOperationFailure(
- resource='security group rule', region_name='')
-
- def get_security_group(self, context, sg_id, fields=None, tenant_id=None):
- dict_param = {'resource_id': sg_id, 'resource_type': t_constants.RT_SG}
- security_group_list = None
- try:
- security_group_list = nt_utils.check_resource_not_in_deleting(
- context, dict_param)
- except t_exceptions.ResourceNotFound:
- raise
-
- if security_group_list:
- return security_group_list
- else:
- return super(TricircleSecurityGroupMixin, self).\
- get_security_group(context, sg_id)
-
- def delete_security_group(self, context, sg_id):
- LOG.debug("lyman--enter delete security group")
- t_ctx = t_context.get_context_from_neutron_context(context)
- # check the sg whether in security group
- super(TricircleSecurityGroupMixin, self).\
- get_security_group(context, sg_id)
- # check the sg whether in deleting
- dict_para = {'resource_id': sg_id, 'resource_type': t_constants.RT_SG}
-
- nt_utils.check_resource_not_in_deleting(context, dict_para)
- try:
- with t_ctx.session.begin():
- core.create_resource(
- t_ctx, models.DeletingResources, dict_para)
- for pod, bottom_security_group_id in (
- self.helper.get_real_shadow_resource_iterator(
- t_ctx, t_constants.RT_SG, sg_id)):
- self._get_client(pod['region_name']). \
- delete_security_groups(t_ctx, bottom_security_group_id)
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': sg_id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': pod['pod_id']}])
-
- with t_ctx.session.begin():
- super(TricircleSecurityGroupMixin, self). \
- delete_security_group(context, sg_id)
- except Exception:
- raise
- finally:
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.DeletingResources,
- filters=[{
- 'key': 'resource_id', 'comparator': 'eq',
- 'value': sg_id}])
diff --git a/tricircle/network/segment_plugin.py b/tricircle/network/segment_plugin.py
deleted file mode 100644
index 832a01c9..00000000
--- a/tricircle/network/segment_plugin.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright 2018 Huazhong University of Science and Technology.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import re
-
-from neutron.services.segments.plugin import Plugin
-from neutron_lib.api.definitions import availability_zone as az_def
-from neutron_lib.api.definitions import provider_net
-from neutron_lib.db import api as db_api
-from neutron_lib.exceptions import availability_zone as az_exc
-
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network.central_plugin import TricirclePlugin
-from tricircle.network import helper
-
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleSegmentPlugin(Plugin):
- def __init__(self):
- super(TricircleSegmentPlugin, self).__init__()
- self.xjob_handler = xrpcapi.XJobAPI()
- self.clients = {}
- self.central_plugin = TricirclePlugin()
- self.helper = helper.NetworkHelper(self)
-
- def _get_client(self, region_name):
- if region_name not in self.clients:
- self.clients[region_name] = t_client.Client(region_name)
- return self.clients[region_name]
-
- def get_segment(self, context, sgmt_id, fields=None, tenant_id=None):
- return super(TricircleSegmentPlugin, self).get_segment(
- context, sgmt_id)
-
- def get_segments(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- return super(TricircleSegmentPlugin, self).get_segments(
- context, filters, fields, sorts, limit, marker, page_reverse)
-
- @staticmethod
- def _validate_availability_zones(context, az_list):
- if not az_list:
- return
- t_ctx = t_context.get_context_from_neutron_context(context)
- with db_api.CONTEXT_WRITER.using(context):
- pods = core.query_resource(t_ctx, models.Pod, [], [])
- az_set = set(az_list)
-
- known_az_set = set([pod['az_name'] for pod in pods])
- known_az_set = known_az_set | set(
- [pod['region_name'] for pod in pods])
-
- diff = az_set - known_az_set
- if diff:
- raise az_exc.AvailabilityZoneNotFound(
- availability_zone=diff.pop())
-
- def create_segment(self, context, segment):
- """Create a segment."""
- segment_data = segment['segment']
- segment_name = segment_data.get('name')
-
- # if configed enable_l3_route_network,
- # will create real external network for each segment
- if cfg.CONF.tricircle.enable_l3_route_network:
- match_obj = re.match(constants.SEGMENT_NAME_PATTERN,
- segment_name)
- if match_obj:
- match_list = match_obj.groups()
- region_name = match_list[0]
- self._validate_availability_zones(context,
- [region_name])
- # create segment for maintaining the relationship
- # between routed net and real external net
- segment_db = super(TricircleSegmentPlugin, self).\
- create_segment(context, segment)
-
- # prepare real external network in central and bottom
- net_data = {
- 'tenant_id': segment_data.get('tenant_id'),
- 'name': segment_name,
- 'shared': False,
- 'admin_state_up': True,
- az_def.AZ_HINTS: [region_name],
- provider_net.PHYSICAL_NETWORK:
- segment_data.get('physical_network'),
- provider_net.NETWORK_TYPE:
- segment_data.get('network_type'),
- 'router:external': True
- }
- self.central_plugin.create_network(
- context, {'network': net_data})
-
- return segment_db
- else:
- return super(TricircleSegmentPlugin, self).create_segment(
- context, segment)
- else:
- return super(TricircleSegmentPlugin, self).create_segment(
- context, segment)
-
- def delete_segment(self, context, uuid, for_net_delete=False):
- segment_dict = self.get_segment(context, uuid)
- segment_name = segment_dict['name']
-
- # if enable l3 routed network and segment name starts
- # with 'newl3-' need to delete bottom router
- # and bottom external network
- if cfg.CONF.tricircle.enable_l3_route_network and \
- segment_name and \
- segment_name.startswith(constants.PREFIX_OF_SEGMENT_NAME):
-
- # delete real external network
- net_filter = {'name': [segment_name]}
- nets = self.central_plugin.get_networks(context, net_filter)
- if len(nets):
- self.central_plugin.delete_network(context, nets[0]['id'])
-
- return super(TricircleSegmentPlugin, self).delete_segment(
- context, uuid)
- else:
- return super(TricircleSegmentPlugin, self).delete_segment(
- context, uuid)
diff --git a/tricircle/network/utils.py b/tricircle/network/utils.py
deleted file mode 100644
index 50270931..00000000
--- a/tricircle/network/utils.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.db import models_v2
-from neutron_lib import exceptions
-from sqlalchemy import sql
-
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-
-
-def check_resource_not_in_deleting(context, dict_para):
- t_ctx = t_context.get_context_from_neutron_context(context)
- with t_ctx.session.begin():
- resource_filters = []
- for key in dict_para.keys():
- resource_filters.append({'key': key,
- 'comparator': 'eq',
- 'value': dict_para[key]})
-
- deleting_resource = core.query_resource(t_ctx,
- models.DeletingResources,
- resource_filters, [])
-
- if len(deleting_resource):
- if hasattr(context, "USER_AGENT") and \
- context.USER_AGENT == t_constants.LOCAL:
- raise t_exceptions.ResourceNotFound(
- models.DeletingResources, dict_para['resource_id'])
- else:
- raise t_exceptions.ResourceIsInDeleting()
-
-
-def check_network_not_in_use(self, context, t_ctx, network_id):
- # use a different name to avoid override _ensure_entwork_not_in_use
- subnets = self._get_subnets_by_network(context, network_id)
- auto_delete_port_names = []
-
- for subnet in subnets:
- subnet_id = subnet['id']
- region_names = [e[0] for e in t_ctx.session.query(
- sql.distinct(models.Pod.region_name)).join(
- models.ResourceRouting,
- models.Pod.pod_id == models.ResourceRouting.pod_id).filter(
- models.ResourceRouting.top_id == subnet_id)]
- auto_delete_port_names.extend([t_constants.interface_port_name % (
- region_name, subnet_id) for region_name in region_names])
- dhcp_port_name = t_constants.dhcp_port_name % subnet_id
- snat_port_name = t_constants.snat_port_name % subnet_id
- auto_delete_port_names.append(dhcp_port_name)
- auto_delete_port_names.append(snat_port_name)
-
- if not auto_delete_port_names:
- # pre-created port not found, any ports left need to be deleted
- # before deleting network
- non_auto_delete_ports = context.session.query(
- models_v2.Port.id).filter_by(network_id=network_id)
- if non_auto_delete_ports.count():
- raise exceptions.NetworkInUse(net_id=network_id)
- return
-
- t_pod = db_api.get_top_pod(t_ctx)
- auto_delete_port_ids = [e[0] for e in t_ctx.session.query(
- models.ResourceRouting.bottom_id).filter_by(
- pod_id=t_pod['pod_id'], resource_type=t_constants.RT_PORT).filter(
- models.ResourceRouting.top_id.in_(auto_delete_port_names))]
-
- non_auto_delete_ports = context.session.query(
- models_v2.Port.id).filter_by(network_id=network_id).filter(
- ~models_v2.Port.id.in_(auto_delete_port_ids))
- if non_auto_delete_ports.count():
- raise exceptions.NetworkInUse(net_id=network_id)
diff --git a/tricircle/tempestplugin/gate_hook.sh b/tricircle/tempestplugin/gate_hook.sh
deleted file mode 100755
index c4688968..00000000
--- a/tricircle/tempestplugin/gate_hook.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside gate_hook function in devstack gate.
-
-set -ex
-
-GATE_DEST=$BASE/new
-
-# _setup_tricircle_multinode() - Set up two regions test environment
-# in devstack multinode job. Tricircle API, central Neutron and RegionOne
-# services will be enabled in primary node, RegionTwo servies will be
-# enabled in the subnode. Currently only two nodes are supported in the
-# test environment.
-
-function _setup_tricircle_multinode {
-
- export PROJECTS="openstack/networking-sfc $PROJECTS"
- PRIMARY_NODE_IP=$(cat /etc/nodepool/primary_node_private)
- SUBNODE_IP=$(head -n1 /etc/nodepool/sub_nodes_private)
-
- export OVERRIDE_ENABLED_SERVICES="c-api,c-bak,c-sch,c-vol,cinder,"
- export OVERRIDE_ENABLED_SERVICES+="g-api,g-reg,key,"
- export OVERRIDE_ENABLED_SERVICES+="n-api,n-cauth,n-cond,n-cpu,n-crt,"
- export OVERRIDE_ENABLED_SERVICES+="n-novnc,n-obj,n-sch,"
- export OVERRIDE_ENABLED_SERVICES+="placement-api,placement-client,"
- export OVERRIDE_ENABLED_SERVICES+="q-agt,q-dhcp,q-l3,q-meta,"
- export OVERRIDE_ENABLED_SERVICES+="q-metering,q-svc,"
- export OVERRIDE_ENABLED_SERVICES+="dstat,peakmem_tracker,rabbit,mysql"
-
- ENABLE_TRICIRCLE="enable_plugin tricircle https://opendev.org/openstack/tricircle/"
- ENABLE_SFC="enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc/"
-
- # Configure primary node
- export DEVSTACK_LOCAL_CONFIG="$ENABLE_TRICIRCLE"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"$ENABLE_SFC"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_START_SERVICES=True"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_TRUNK=True"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_SFC=True"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_QOS=True"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"REGION_NAME=RegionOne"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"HOST_IP=$PRIMARY_NODE_IP"
-
- ML2_CONFIG=$'\n'"ML2_L3_PLUGIN=tricircle.network.local_l3_plugin.TricircleL3Plugin"
- ML2_CONFIG+=$'\n'"ML2_L3_PLUGIN+=,neutron.services.qos.qos_plugin.QoSPlugin"
- ML2_CONFIG+=$'\n'"[[post-config|/"'$Q_PLUGIN_CONF_FILE]]'
- ML2_CONFIG+=$'\n'"[ml2]"
- ML2_CONFIG+=$'\n'"extension_drivers = port_security,qos"
- ML2_CONFIG+=$'\n'"mechanism_drivers = openvswitch,linuxbridge,l2population"
- ML2_CONFIG+=$'\n'"[agent]"
- ML2_CONFIG+=$'\n'"extensions=sfc"
- ML2_CONFIG+=$'\n'"arp_responder=True"
- ML2_CONFIG+=$'\n'"tunnel_types=vxlan"
- ML2_CONFIG+=$'\n'"l2_population=True"
-
- export DEVSTACK_LOCAL_CONFIG+=$ML2_CONFIG
-
- # Configure sub-node
- export DEVSTACK_SUBNODE_CONFIG="$ENABLE_TRICIRCLE"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"$ENABLE_SFC"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"TRICIRCLE_START_SERVICES=False"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"TRICIRCLE_ENABLE_TRUNK=True"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"TRICIRCLE_ENABLE_SFC=True"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"REGION_NAME=RegionTwo"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"HOST_IP=$SUBNODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"KEYSTONE_REGION_NAME=RegionOne"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"KEYSTONE_SERVICE_HOST=$PRIMARY_NODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"KEYSTONE_AUTH_HOST=$PRIMARY_NODE_IP"
-
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"SERVICE_HOST=$SUBNODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"RABBIT_HOST=$SUBNODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"QPID_HOST=$SUBNODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"DATABASE_HOST=$SUBNODE_IP"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"GLANCE_HOSTPORT=$SUBNODE_IP:9292"
- export DEVSTACK_SUBNODE_CONFIG+=$'\n'"Q_HOST=$SUBNODE_IP"
-
- export DEVSTACK_SUBNODE_CONFIG+=$ML2_CONFIG
-}
-
-if [ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]; then
- _setup_tricircle_multinode
- $GATE_DEST/devstack-gate/devstack-vm-gate.sh
-fi
diff --git a/tricircle/tempestplugin/multi_gw_topology_test.yaml b/tricircle/tempestplugin/multi_gw_topology_test.yaml
deleted file mode 100644
index 6b8d320a..00000000
--- a/tricircle/tempestplugin/multi_gw_topology_test.yaml
+++ /dev/null
@@ -1,573 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
- - task_id: image2
- region: region2
- type: image
- query:
- get_one: true
- - task_id: ext-net1
- region: central
- type: network
- params:
- name: ext-net1
- is_router_external: true
- provider_network_type: vlan
- provider_physical_network: bridge
- availability_zone_hints: [RegionOne]
- - task_id: ext-subnet1
- region: central
- type: subnet
- depend: [ext-net1]
- params:
- name: ext-subnet1
- ip_version: 4
- cidr: 163.3.124.0/24
- is_dhcp_enabled: false
- network_id: ext-net1@id
- - task_id: ext-net2
- region: central
- type: network
- params:
- name: ext-net2
- is_router_external: true
- provider_network_type: flat
- provider_physical_network: extern
- availability_zone_hints: [RegionTwo]
- - task_id: ext-subnet2
- region: central
- type: subnet
- depend: [ext-net2]
- params:
- name: ext-subnet2
- ip_version: 4
- cidr: 163.3.125.0/24
- is_dhcp_enabled: false
- network_id: ext-net2@id
- - task_id: router1
- region: central
- type: router
- params:
- name: router1
- availability_zone_hints: [RegionOne]
- - task_id: router2
- region: central
- type: router
- params:
- name: router2
- availability_zone_hints: [RegionTwo]
- - task_id: router3
- region: central
- type: router
- params:
- name: router3
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- availability_zone_hints: [RegionOne]
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: net2
- region: central
- type: network
- params:
- name: net2
- availability_zone_hints: [RegionTwo]
- - task_id: subnet2
- region: central
- type: subnet
- depend: [net2]
- params:
- name: subnet2
- ip_version: 4
- cidr: 10.0.2.0/24
- network_id: net2@id
- - task_id: add-gateway1
- region: central
- type: router
- action:
- target: router1@id
- method: update
- depend:
- - ext-net1
- - ext-subnet1
- - router1
- params:
- external_gateway_info:
- network_id: ext-net1@id
- enable_snat: true
- - task_id: add-gateway2
- region: central
- type: router
- action:
- target: router2@id
- method: update
- depend:
- - ext-net2
- - ext-subnet2
- - router2
- params:
- external_gateway_info:
- network_id: ext-net2@id
- enable_snat: true
- - task_id: add-subnet1
- region: central
- type: router
- depend:
- - subnet1
- - router1
- action:
- target: router1@id
- method: add_interface_to_router
- params:
- subnet_id: subnet1@id
- - task_id: add-subnet2
- region: central
- type: router
- depend:
- - subnet2
- - router2
- action:
- target: router2@id
- method: add_interface_to_router
- params:
- subnet_id: subnet2@id
- - task_id: inf-net1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- network_id: net1@id
- - task_id: inf-net2
- region: central
- type: port
- depend:
- - net2
- - subnet2
- params:
- network_id: net2@id
- - task_id: add-inf1
- region: central
- type: router
- depend:
- - inf-net1
- - router3
- action:
- target: router3@id
- method: add_interface_to_router
- params:
- port_id: inf-net1@id
- - task_id: add-inf2
- region: central
- type: router
- depend:
- - inf-net2
- - router3
- action:
- target: router3@id
- method: add_interface_to_router
- params:
- port_id: inf-net2@id
- - task_id: vm1
- region: region1
- type: server
- depend:
- - net1
- - image1
- - add-gateway1
- - add-subnet1
- - add-inf1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm1
- networks:
- - uuid: net1@id
- - task_id: vm2
- region: region2
- type: server
- depend:
- - net2
- - image2
- - add-gateway2
- - add-subnet2
- - add-inf2
- params:
- flavor_id: 1
- image_id: image2@id
- name: vm2
- networks:
- - uuid: net2@id
-- task_set_id: wait-for-job
- tasks:
- - task_id: check-job
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check
- depend: [preparation]
- tasks:
- - task_id: router1
- region: region1
- type: router
- query:
- get_one: true
- params:
- name: preparation@router1@id
- - task_id: router2
- region: region2
- type: router
- query:
- get_one: true
- params:
- name: preparation@router2@id
- - task_id: router3-1
- region: region1
- type: router
- query:
- get_one: true
- params:
- name: preparation@router3@id
- - task_id: router3-2
- region: region2
- type: router
- query:
- get_one: true
- params:
- name: preparation@router3@id
- - task_id: check-subnets1
- region: region1
- type: subnet
- validate:
- predicate: any
- condition:
- - cidr: 10.0.1.0/24
- host_routes:
- - destination: 10.0.2.0/24
- nexthop: 10.0.1.*
- - cidr: 100.0.0.0/24
- - cidr: 163.3.124.0/24
- - task_id: check-subnets2
- region: region2
- type: subnet
- validate:
- predicate: any
- condition:
- - cidr: 10.0.2.0/24
- host_routes:
- - destination: 10.0.1.0/24
- nexthop: 10.0.2.*
- - cidr: 100.0.0.0/24
- - cidr: 163.3.125.0/24
- - task_id: check-ports1
- region: region1
- type: port
- depend: [router1]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.1*
- - fixed_ips:
- - ip_address: 163.3.124*
- params:
- device_id: router1@id
- - task_id: check-ports2
- region: region2
- type: port
- depend: [router2]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.2*
- - fixed_ips:
- - ip_address: 163.3.125*
- params:
- device_id: router2@id
- - task_id: check-ports3-1
- region: region1
- type: port
- depend: [router3-1]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.1*
- - fixed_ips:
- - ip_address: 100.0.0*
- params:
- device_id: router3-1@id
- - task_id: check-ports3-2
- region: region2
- type: port
- depend: [router3-2]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.2*
- - fixed_ips:
- - ip_address: 100.0.0*
- params:
- device_id: router3-2@id
- - task_id: check-routers3-1
- region: region1
- type: router
- validate:
- predicate: any
- condition:
- - routes:
- - destination: 10.0.2*
- nexthop: 100.0.0*
- - task_id: check-routers3-2
- region: region2
- type: router
- validate:
- predicate: any
- condition:
- - routes:
- - destination: 10.0.1*
- nexthop: 100.0.0*
- - task_id: check-servers1
- region: region1
- type: server
- validate:
- predicate: any
- condition:
- - status: ACTIVE
- name: vm1
- - task_id: check-servers2
- region: region2
- type: server
- validate:
- predicate: any
- condition:
- - status: ACTIVE
- name: vm2
-- task_set_id: clean
- depend: [preparation]
- tasks:
- - task_id: delete-vm1
- region: region1
- type: server
- action:
- target: preparation@vm1@id
- method: delete
- - task_id: delete-vm2
- region: region2
- type: server
- action:
- target: preparation@vm2@id
- method: delete
- - task_id: remove-gateway1
- region: central
- type: router
- action:
- target: preparation@router1@id
- method: update
- params:
- external_gateway_info: null
- - task_id: remove-gateway2
- region: central
- type: router
- action:
- target: preparation@router2@id
- method: update
- params:
- external_gateway_info: null
- - task_id: remove-subnet1
- region: central
- type: router
- action:
- target: preparation@router1@id
- method: remove_interface_from_router
- params:
- subnet_id: preparation@subnet1@id
- - task_id: remove-subnet2
- region: central
- type: router
- action:
- target: preparation@router2@id
- method: remove_interface_from_router
- params:
- subnet_id: preparation@subnet2@id
- - task_id: remove-inf1
- region: central
- type: router
- action:
- target: preparation@router3@id
- method: remove_interface_from_router
- params:
- port_id: preparation@inf-net1@id
- - task_id: remove-inf2
- region: central
- type: router
- action:
- target: preparation@router3@id
- method: remove_interface_from_router
- params:
- port_id: preparation@inf-net2@id
- - task_id: delete-router1
- region: central
- type: router
- action:
- target: preparation@router1@id
- method: delete
- retries: 3
- depend:
- - remove-gateway1
- - remove-subnet1
- - task_id: delete-router2
- region: central
- type: router
- action:
- target: preparation@router2@id
- method: delete
- retries: 3
- depend:
- - remove-gateway2
- - remove-subnet2
- - task_id: delete-router3
- region: central
- type: router
- action:
- target: preparation@router3@id
- method: delete
- retries: 3
- depend:
- - remove-inf1
- - remove-inf2
- # router3 must be the last router to delete, otherwise bridge
- # resources will not be deleted, this is a known issue
- - delete-router1
- - delete-router2
- - task_id: delete-inf1
- region: central
- type: port
- action:
- target: preparation@inf-net1@id
- method: delete
- depend: [delete-router3]
- - task_id: delete-inf2
- region: central
- type: port
- action:
- target: preparation@inf-net2@id
- method: delete
- depend: [delete-router3]
- - task_id: delete-subnet1
- region: central
- type: subnet
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- depend: [delete-inf1]
- - task_id: delete-subnet2
- region: central
- type: subnet
- action:
- target: preparation@subnet2@id
- method: delete
- retries: 3
- depend: [delete-inf2]
- - task_id: delete-net1
- region: central
- type: network
- action:
- target: preparation@net1@id
- method: delete
- depend: [delete-subnet1]
- - task_id: delete-net2
- region: central
- type: network
- action:
- target: preparation@net2@id
- method: delete
- depend: [delete-subnet2]
- - task_id: delete-ext-subnet1
- region: central
- type: subnet
- action:
- target: preparation@ext-subnet1@id
- method: delete
- depend: [delete-router1]
- - task_id: delete-ext-net1
- region: central
- type: network
- action:
- target: preparation@ext-net1@id
- method: delete
- depend: [delete-ext-subnet1]
- - task_id: delete-ext-subnet2
- region: central
- type: subnet
- action:
- target: preparation@ext-subnet2@id
- method: delete
- depend: [delete-router2]
- - task_id: delete-ext-net2
- region: central
- type: network
- action:
- target: preparation@ext-net2@id
- method: delete
- depend: [delete-ext-subnet2]
-- task_set_id: clean-check
- tasks:
- - task_id: check-no-routers1
- region: region1
- type: router
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-routers2
- region: region2
- type: router
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
diff --git a/tricircle/tempestplugin/port_delete_with_vm_create.yaml b/tricircle/tempestplugin/port_delete_with_vm_create.yaml
deleted file mode 100644
index b7344b4d..00000000
--- a/tricircle/tempestplugin/port_delete_with_vm_create.yaml
+++ /dev/null
@@ -1,246 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: port1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- name: port1
- network_id: net1@id
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
- - task_id: image2
- region: region2
- type: image
- query:
- get_one: true
-- task_set_id: create_vm_in_region1
- depend: [preparation]
- tasks:
- - task_id: vm1
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm1
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
-- task_set_id: check_vm_in_region1
- depend: [preparation]
- tasks:
- - task_id: check_vm1
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
-- task_set_id: wait_for_vm1
- tasks:
- - task_id: check_job_vm
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: delete_vm_in_region1
- depend: [create_vm_in_region1]
- tasks:
- - task_id: delete_vm1
- region: region1
- type: server
- action:
- target: create_vm_in_region1@vm1@id
- method: delete
-- task_set_id: wait_vm_delete_in_region1
- tasks:
- - task_id: wait_delete_vm1
- region: region1
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: create_vm_in_region2
- depend: [preparation]
- tasks:
- - task_id: vm2
- region: region2
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image2@id
- name: vm2
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
-- task_set_id: check_vm_in_region2
- depend: [preparation]
- tasks:
- - task_id: check_vm2
- region: region2
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm2
-- task_set_id: wait_for_vm
- tasks:
- - task_id: check_job_vm2
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: delete_vm_in_region2
- depend: [create_vm_in_region2]
- tasks:
- - task_id: delete_vm2
- region: region2
- type: server
- action:
- target: create_vm_in_region2@vm2@id
- method: delete
-- task_set_id: wait_vm_delete_in_region2
- tasks:
- - task_id: wait_delete_vm2
- region: region2
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: create_vm_in_region1_again
- depend: [preparation]
- tasks:
- - task_id: vm1
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm1
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
-- task_set_id: check_vm_in_region1_again
- depend: [preparation]
- tasks:
- - task_id: check_vm1
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
-- task_set_id: wait_for_vm1_again
- tasks:
- - task_id: check_job_vm
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: delete_vm_in_region1_again
- depend: [create_vm_in_region1_again]
- tasks:
- - task_id: delete_vm1
- region: region1
- type: server
- action:
- target: create_vm_in_region1_again@vm1@id
- method: delete
-- task_set_id: wait_vm_delete_in_region1
- tasks:
- - task_id: wait_delete_vm1
- region: region1
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: delete_net
- depend: [preparation]
- tasks:
- - task_id: delete_port1
- region: central
- type: port
- action:
- target: preparation@port1@id
- method: delete
- - task_id: delete_subnet1
- region: central
- type: subnet
- depend: [delete_port1]
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- - task_id: delete_net1
- region: central
- type: network
- depend: [delete_subnet1]
- action:
- target: preparation@net1@id
- method: delete
-- task_set_id: check_net_delete
- tasks:
- - task_id: check_net_delete_job1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check_net_delete_job2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-
diff --git a/tricircle/tempestplugin/port_delete_with_vm_create_shadow.yaml b/tricircle/tempestplugin/port_delete_with_vm_create_shadow.yaml
deleted file mode 100644
index 60ebddae..00000000
--- a/tricircle/tempestplugin/port_delete_with_vm_create_shadow.yaml
+++ /dev/null
@@ -1,421 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: port1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- name: port1
- network_id: net1@id
- - task_id: port2
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- name: port2
- network_id: net1@id
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
- - task_id: image2
- region: region2
- type: image
- query:
- get_one: true
-- task_set_id: create_vm1_in_region1
- depend: [preparation]
- tasks:
- - task_id: vm1
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm1
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
-- task_set_id: check_vm1_in_region1
- depend: [preparation]
- tasks:
- - task_id: check_vm1
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
-- task_set_id: wait_for_vm1
- tasks:
- - task_id: check_job_vm
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: create_vm2_in_region2
- depend: [preparation]
- tasks:
- - task_id: vm2
- region: region2
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image2@id
- name: vm2
- networks:
- - uuid: preparation@net1@id
- port: preparation@port2@id
-- task_set_id: check_vm2_in_region2
- depend: [preparation]
- tasks:
- - task_id: check_vm2
- region: region2
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm2
-- task_set_id: wait_for_vm2
- tasks:
- - task_id: check_job_vm2
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check_shadow_port
- depend: [preparation]
- tasks:
- - task_id: check_shadow_port2
- region: region1
- type: port
- params:
- network_id: preparation@net1@id
- validate:
- predicate: any
- condition:
- - device_owner: compute:shadow
- - task_id: check_shadow_port1
- region: region2
- type: port
- params:
- network_id: preparation@net1@id
- validate:
- predicate: any
- condition:
- - device_owner: compute:shadow
-- task_set_id: delete_vm2_in_region2
- depend: [create_vm2_in_region2]
- tasks:
- - task_id: delete_vm2
- region: region2
- type: server
- action:
- target: create_vm2_in_region2@vm2@id
- method: delete
-- task_set_id: wait_vm2_delete_in_region2
- tasks:
- - task_id: wait_delete_vm2
- region: region2
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: create_vm3_in_region1
- depend: [preparation]
- tasks:
- - task_id: vm3
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm3
- networks:
- - uuid: preparation@net1@id
- port: preparation@port2@id
-- task_set_id: check_vm3_in_region1
- depend: [preparation]
- tasks:
- - task_id: check_vm3
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm3
-- task_set_id: wait_for_vm3
- tasks:
- - task_id: check_job_vm3
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: delete_vm3_in_region1
- depend: [create_vm3_in_region1]
- tasks:
- - task_id: delete_vm3
- region: region1
- type: server
- action:
- target: create_vm3_in_region1@vm3@id
- method: delete
-- task_set_id: delete_vm1_in_region1
- depend: [create_vm1_in_region1]
- tasks:
- - task_id: delete_vm1
- region: region1
- type: server
- action:
- target: create_vm1_in_region1@vm1@id
- method: delete
-- task_set_id: wait_vm1_delete_in_region1
- tasks:
- - task_id: wait_delete_vm1
- region: region1
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: wait_for_delete_vm3
- tasks:
- - task_id: check_job_delete_vm3
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: create_vm1_in_region1_again
- depend: [preparation]
- tasks:
- - task_id: vm1
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm1
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
-- task_set_id: check_vm1_in_region1_again
- depend: [preparation]
- tasks:
- - task_id: check_vm1_again
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
-- task_set_id: wait_for_vm1_again
- tasks:
- - task_id: check_job_vm_again
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: create_vm4_in_region2
- depend: [preparation]
- tasks:
- - task_id: vm4
- region: region2
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image2@id
- name: vm4
- networks:
- - uuid: preparation@net1@id
- port: preparation@port2@id
-- task_set_id: check_vm4_in_region2
- depend: [preparation]
- tasks:
- - task_id: check_vm4
- region: region2
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm4
-- task_set_id: wait_for_vm4
- tasks:
- - task_id: check_job_vm4
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check_shadow_port_again
- depend: [preparation]
- tasks:
- - task_id: check_shadow_port2_again
- region: region1
- type: port
- params:
- network_id: preparation@net1@id
- validate:
- predicate: any
- condition:
- - device_owner: compute:shadow
- - task_id: check_shadow_port1_again
- region: region2
- type: port
- params:
- network_id: preparation@net1@id
- validate:
- predicate: any
- condition:
- - device_owner: compute:shadow
-- task_set_id: delete_vm1_in_region1_again
- depend: [create_vm1_in_region1_again]
- tasks:
- - task_id: delete_vm1_again
- region: region1
- type: server
- action:
- target: create_vm1_in_region1_again@vm1@id
- method: delete
-- task_set_id: wait_vm1_delete_in_region1_again
- tasks:
- - task_id: wait_delete_vm1_again
- region: region1
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: delete_vm4_in_region2
- depend: [create_vm4_in_region2]
- tasks:
- - task_id: delete_vm4
- region: region2
- type: server
- action:
- target: create_vm4_in_region2@vm4@id
- method: delete
-- task_set_id: wait_vm4_delete_in_region2
- tasks:
- - task_id: wait_delete_vm4
- region: region2
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: wait_for_all
- tasks:
- - task_id: check_job_all
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: delete_net
- depend: [preparation]
- tasks:
- - task_id: delete_port1
- region: central
- type: port
- action:
- target: preparation@port1@id
- method: delete
- - task_id: delete_port2
- region: central
- type: port
- action:
- target: preparation@port2@id
- method: delete
- - task_id: delete_subnet1
- region: central
- type: subnet
- depend: [delete_port1, delete_port2]
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- - task_id: delete_net1
- region: central
- type: network
- depend: [delete_subnet1]
- action:
- target: preparation@net1@id
- method: delete
-- task_set_id: check_net_delete
- tasks:
- - task_id: check_net_delete_job1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check_net_delete_job2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-
diff --git a/tricircle/tempestplugin/post_test_hook.sh b/tricircle/tempestplugin/post_test_hook.sh
deleted file mode 100755
index 7bbef148..00000000
--- a/tricircle/tempestplugin/post_test_hook.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash -xe
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside post_test_hook function in devstack gate.
-
-export DEST=$BASE/new
-export DEVSTACK_DIR=$DEST/devstack
-export TRICIRCLE_DIR=$DEST/tricircle
-export TRICIRCLE_DEVSTACK_PLUGIN_DIR=$TRICIRCLE_DIR/devstack
-export TRICIRCLE_TEMPEST_PLUGIN_DIR=$TRICIRCLE_DIR/tricircle/tempestplugin
-
-# execute test only in the primary node(i.e, RegionOne)
-if [ "$OS_REGION_NAME" -ne "RegionOne" ]; then
- return 0
-fi
-
-PRIMARY_NODE_IP=$(cat /etc/nodepool/primary_node_private)
-
-# use admin role to create Tricircle top Pod and Pod1
-source $DEVSTACK_DIR/openrc admin admin
-unset OS_REGION_NAME
-mytoken=$(openstack --os-region-name=RegionOne token issue | awk 'NR==5 {print $4}')
-echo $mytoken
-
-openstack multiregion networking pod create --region-name CentralRegion
-
-openstack multiregion networking pod create --region-name RegionOne --availability-zone az1
-
-if [ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]; then
- openstack multiregion networking pod create --region-name RegionTwo --availability-zone az2
-fi
-
-# the usage of "nova flavor-create":
-# nova flavor-create [--ephemeral ] [--swap ]
-# [--rxtx-factor ] [--is-public ]
-#
-# the following command is to create a flavor with name='test',
-# id=1, ram=1024MB, disk=10GB, vcpu=1
-# nova flavor-create test 1 1024 10 1
-image_id=$(openstack --os-region-name=RegionOne image list | awk 'NR==4 {print $2}')
-
-# change the tempest configuration to test Tricircle
-env | grep OS_
-
-#Temporary comment smoke test due to ci environment problems
-#if [ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]; then
-# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
-# sudo BASE=$BASE bash smoke_test.sh
-#fi
diff --git a/tricircle/tempestplugin/pre_test_hook.sh b/tricircle/tempestplugin/pre_test_hook.sh
deleted file mode 100755
index 288d69fd..00000000
--- a/tricircle/tempestplugin/pre_test_hook.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside pre_test_hook function in devstack gate.
-
-export localconf=$BASE/new/devstack/local.conf
-export TRICIRCLE_API_CONF=/etc/tricircle/api.conf
-export TRICIRCLE_XJOB_CONF=/etc/tricircle/xjob.conf
diff --git a/tricircle/tempestplugin/qos_policy_rule_test.yaml b/tricircle/tempestplugin/qos_policy_rule_test.yaml
deleted file mode 100644
index 8040dd0f..00000000
--- a/tricircle/tempestplugin/qos_policy_rule_test.yaml
+++ /dev/null
@@ -1,861 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: policy1
- type: qos_policy
- region: central
- params:
- name: policy1
- - task_id: bandwidth_limit_rule1
- region: central
- type: qos_bandwidth_limit_rule
- depend: [policy1]
- params:
- max_kbps: 3000
- max_burst_kbps: 300
- qos_policy: policy1@id
- - task_id: policy2
- type: qos_policy
- region: central
- params:
- name: policy2
- - task_id: bandwidth_limit_rule2
- region: central
- type: qos_bandwidth_limit_rule
- depend: [policy2]
- params:
- max_kbps: 3000
- max_burst_kbps: 300
- qos_policy: policy2@id
- - task_id: policy3
- type: qos_policy
- region: central
- params:
- name: policy3
- - task_id: policy4
- type: qos_policy
- region: central
- params:
- name: policy4
- - task_id: policy5
- type: qos_policy
- region: central
- params:
- name: policy5
- - task_id: bandwidth_limit_rule5
- region: central
- type: qos_bandwidth_limit_rule
- depend: [policy5]
- params:
- max_kbps: 3000
- max_burst_kbps: 300
- qos_policy: policy5@id
- - task_id: dscp_marking_rule1
- region: central
- type: qos_dscp_marking_rule
- depend: [policy1]
- params:
- dscp_mark: 30
- qos_policy: policy1@id
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: port1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- name: port1
- network_id: net1@id
- - task_id: net2
- region: central
- type: network
- params:
- name: net2
- - task_id: subnet2
- region: central
- type: subnet
- depend: [net2]
- params:
- name: subnet2
- ip_version: 4
- cidr: 10.0.2.0/24
- network_id: net2@id
- - task_id: port2
- region: central
- type: port
- depend:
- - net2
- - subnet2
- params:
- name: port2
- network_id: net2@id
- - task_id: net3
- region: central
- type: network
- params:
- name: net3
- - task_id: subnet3
- region: central
- type: subnet
- depend: [net3]
- params:
- name: subnet3
- ip_version: 4
- cidr: 10.0.3.0/24
- network_id: net3@id
- - task_id: port3
- region: central
- type: port
- depend:
- - net3
- - subnet3
- params:
- name: port3
- network_id: net3@id
- - task_id: net4
- region: central
- type: network
- params:
- name: net4
- - task_id: subnet4
- region: central
- type: subnet
- depend: [net4]
- params:
- name: subnet4
- ip_version: 4
- cidr: 10.0.4.0/24
- network_id: net4@id
- - task_id: port4
- region: central
- type: port
- depend:
- - net4
- - subnet4
- params:
- name: port4
- network_id: net4@id
- - task_id: net5
- region: central
- type: network
- params:
- name: net5
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
-- task_set_id: check_qos_create
- depend: [preparation]
- tasks:
- - task_id: check_policy1_central
- region: central
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy1
- - task_id: check_bandwidth_limit_rule1
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: any
- condition:
- - id: preparation@bandwidth_limit_rule1@id
- - task_id: check_dscp_marking_rule1
- region: central
- type: qos_dscp_marking_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: any
- condition:
- - id: preparation@dscp_marking_rule1@id
- - task_id: check_policy1_region
- region: region1
- type: qos_policy
- validate:
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: policy_update_only_central
- depend: [preparation]
- tasks:
- - task_id: policy1_update_only_central
- region: central
- type: qos_policy
- action:
- target: preparation@policy1@id
- method: update
- params:
- name: policy1_update_only_central
- - task_id: bandwidth_limit_rule1_update_only_central
- region: central
- type: qos_bandwidth_limit_rule
- action:
- target: preparation@bandwidth_limit_rule1@id
- method: update
- params:
- qos_policy: preparation@policy1@id
- max_kbps: 4000
- - task_id: dscp_marking_rule1_update_only_central
- region: central
- type: qos_dscp_marking_rule
- action:
- target: preparation@dscp_marking_rule1@id
- method: update
- params:
- qos_policy: preparation@policy1@id
- dscp_mark: 40
-- task_set_id: check_qos_update_only_central
- depend: [preparation]
- tasks:
- - task_id: check_policy1_update_only_central
- region: central
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy1_update_only_central
- - task_id: check_limit_rule1_update_only_central
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: any
- condition:
- - id: preparation@bandwidth_limit_rule1@id
- max_kbps: 4000
- - task_id: check_dscp_rule1_update_only_central
- region: central
- type: qos_dscp_marking_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: any
- condition:
- - id: preparation@dscp_marking_rule1@id
- dscp_mark: 40
-- task_set_id: central_bound_policy
- depend: [preparation]
- tasks:
- - task_id: net1_policy
- region: central
- type: network
- action:
- target: preparation@net1@id
- method: update
- params:
- qos_policy_id: preparation@policy1@id
- - task_id: net5_policy
- region: central
- type: network
- action:
- target: preparation@net5@id
- method: update
- params:
- qos_policy_id: preparation@policy5@id
- - task_id: port3_policy
- region: central
- type: port
- action:
- target: preparation@port3@id
- method: update
- params:
- qos_policy_id: preparation@policy3@id
-- task_set_id: create_vm
- depend: [preparation]
- tasks:
- - task_id: vm1
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm1
- networks:
- - uuid: preparation@net1@id
- port: preparation@port1@id
- - task_id: vm2
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm2
- networks:
- - uuid: preparation@net2@id
- - task_id: vm3
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm3
- networks:
- - uuid: preparation@net3@id
- port: preparation@port3@id
- - task_id: vm4
- region: region1
- type: server
- params:
- flavor_id: 1
- image_id: preparation@image1@id
- name: vm4
- networks:
- - uuid: preparation@net4@id
- port: preparation@port4@id
-- task_set_id: check_vm
- depend: [preparation]
- tasks:
- - task_id: check_vm1
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
- - task_id: check_vm2
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm2
- - task_id: check_vm3
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm3
- - task_id: check_vm4
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm4
-- task_set_id: wait_for_vm
- tasks:
- - task_id: check_job_vm
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: local_bound_policy
- depend: [preparation]
- tasks:
- - task_id: net2_policy
- region: central
- type: network
- action:
- target: preparation@net2@id
- method: update
- params:
- qos_policy_id: preparation@policy2@id
- - task_id: port4_policy
- region: central
- type: port
- action:
- target: preparation@port4@id
- method: update
- params:
- qos_policy_id: preparation@policy4@id
-- task_set_id: wait_for_bound
- tasks:
- - task_id: check_job_bound
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check_bound_policy
- depend: [preparation]
- tasks:
- - task_id: check_net1_policy_central
- region: central
- type: network
- validate:
- predicate: any
- condition:
- - qos_policy_id: preparation@policy1@id
- - task_id: check_net2_policy_central
- region: central
- type: network
- validate:
- predicate: any
- condition:
- - qos_policy_id: preparation@policy2@id
- - task_id: check_net5_policy_central
- region: central
- type: network
- validate:
- predicate: any
- condition:
- - qos_policy_id: preparation@policy5@id
- - task_id: check_port3_policy_central
- region: central
- type: port
- validate:
- predicate: any
- condition:
- - qos_policy_id: preparation@policy3@id
- - task_id: check_port4_policy_central
- region: central
- type: port
- validate:
- predicate: any
- condition:
- - qos_policy_id: preparation@policy4@id
- - task_id: check_policy1_region
- region: region1
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy1_update_only_central
- - task_id: check_policy2_region
- region: region1
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy2
- - task_id: check_policy3_region
- region: region1
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy3
- - task_id: check_policy4_region
- region: region1
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy4
-- task_set_id: policy_update_with_local
- depend: [preparation]
- tasks:
- - task_id: policy4_update_with_local
- region: central
- type: qos_policy
- action:
- target: preparation@policy4@id
- method: update
- params:
- name: policy4_update_with_local
- - task_id: bandwidth_limit_rule2_update_with_local
- region: central
- type: qos_bandwidth_limit_rule
- action:
- target: preparation@bandwidth_limit_rule2@id
- method: update
- params:
- qos_policy: preparation@policy2@id
- max_kbps: 5000
-- task_set_id: wait_for_job_update
- tasks:
- - task_id: check_job_update
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check_qos_update_with_local
- depend: [preparation]
- tasks:
- - task_id: check_policy4_update_with_local
- region: central
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy4_update_with_local
- - task_id: check_policy4_update_region
- region: region1
- type: qos_policy
- validate:
- predicate: any
- condition:
- - name: policy4_update_with_local
- - task_id: check_limit_rule2_update_with_local
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy2@id
- validate:
- predicate: any
- condition:
- - id: preparation@bandwidth_limit_rule2@id
- max_kbps: 5000
-- task_set_id: unbound_policy
- depend: [preparation]
- tasks:
- - task_id: net1_no_policy
- region: central
- type: network
- action:
- target: preparation@net1@id
- method: update
- params:
- qos_policy_id:
- - task_id: net2_no_policy
- region: central
- type: network
- action:
- target: preparation@net2@id
- method: update
- params:
- qos_policy_id:
- - task_id: port3_no_policy
- region: central
- type: port
- action:
- target: preparation@port3@id
- method: update
- params:
- qos_policy_id:
- - task_id: port4_no_policy
- region: central
- type: port
- action:
- target: preparation@port4@id
- method: update
- params:
- qos_policy_id:
- - task_id: net5_no_policy
- region: central
- type: network
- action:
- target: preparation@net5@id
- method: update
- params:
- qos_policy_id:
-- task_set_id: wait_for_qos_unbound
- tasks:
- - task_id: check_job_qos_unbound
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: qos-rule-delete
- depend: [preparation]
- tasks:
- - task_id: bandwidth_limit_rule1_delete
- region: central
- type: qos_bandwidth_limit_rule
- action:
- target: preparation@bandwidth_limit_rule1@id
- method: delete
- params:
- qos_policy: preparation@policy1@id
- - task_id: bandwidth_limit_rule2_delete
- region: central
- type: qos_bandwidth_limit_rule
- action:
- target: preparation@bandwidth_limit_rule2@id
- method: delete
- params:
- qos_policy: preparation@policy2@id
- - task_id: dscp_marking_rule1_delete
- region: central
- type: qos_dscp_marking_rule
- action:
- target: preparation@dscp_marking_rule1@id
- method: delete
- params:
- qos_policy: preparation@policy1@id
- - task_id: bandwidth_limit_rule5_delete
- region: central
- type: qos_bandwidth_limit_rule
- action:
- target: preparation@bandwidth_limit_rule5@id
- method: delete
- params:
- qos_policy: preparation@policy5@id
-- task_set_id: wait_for_rule_delete
- depend: [preparation]
- tasks:
- - task_id: check_job_rule_delete
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check_for_bandwidth_limit1_delete_central
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: all
- retries: 10
- condition:
- id: invalid-id
- - task_id: check_for_bandwidth_limit2_delete_central
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy2@id
- validate:
- predicate: all
- retries: 10
- condition:
- id: invalid-id
- - task_id: check_for_bandwidth_limit5_delete_central
- region: central
- type: qos_bandwidth_limit_rule
- params:
- qos_policy: preparation@policy5@id
- validate:
- predicate: all
- retries: 10
- condition:
- id: invalid-id
- - task_id: check_for_dscp_marking1_delete_central
- region: central
- type: qos_dscp_marking_rule
- params:
- qos_policy: preparation@policy1@id
- validate:
- predicate: all
- retries: 10
- condition:
- id: invalid-id
-- task_set_id: qos-policy-delete
- depend: [preparation]
- tasks:
- - task_id: policy1_delete
- region: central
- type: qos_policy
- action:
- target: preparation@policy1@id
- method: delete
- - task_id: policy2_delete
- region: central
- type: qos_policy
- action:
- target: preparation@policy2@id
- method: delete
- - task_id: policy3_delete
- region: central
- type: qos_policy
- action:
- target: preparation@policy3@id
- method: delete
- - task_id: policy4_delete
- region: central
- type: qos_policy
- action:
- target: preparation@policy4@id
- method: delete
- - task_id: policy5_delete
- region: central
- type: qos_policy
- action:
- target: preparation@policy5@id
- method: delete
-- task_set_id: wait_for_policy_delete
- tasks:
- - task_id: check_for_policy_delete_central
- region: central
- type: qos_policy
- validate:
- predicate: all
- retries: 10
- condition:
- name: invalid-name
- - task_id: check_for_policy_delete_region
- region: region1
- type: qos_policy
- validate:
- predicate: all
- retries: 10
- condition:
- name: invalid-name
-- task_set_id: delete_vm
- depend: [create_vm]
- tasks:
- - task_id: delete_vm1
- region: region1
- type: server
- action:
- target: create_vm@vm1@id
- method: delete
- - task_id: delete_vm2
- region: region1
- type: server
- action:
- target: create_vm@vm2@id
- method: delete
- - task_id: delete_vm3
- region: region1
- type: server
- action:
- target: create_vm@vm3@id
- method: delete
- - task_id: delete_vm4
- region: region1
- type: server
- action:
- target: create_vm@vm4@id
- method: delete
-- task_set_id: wait_for_vm_delete
- tasks:
- - task_id: check_for_vm_delete
- region: region1
- type: server
- validate:
- predicate: all
- retries: 10
- condition:
- name: invalid-name
-- task_set_id: delete_net
- depend: [preparation]
- tasks:
- - task_id: delete_port1
- region: central
- type: port
- action:
- target: preparation@port1@id
- method: delete
- - task_id: delete_port2
- region: central
- type: port
- action:
- target: preparation@port2@id
- method: delete
- - task_id: delete_port3
- region: central
- type: port
- action:
- target: preparation@port3@id
- method: delete
- - task_id: delete_port4
- region: central
- type: port
- action:
- target: preparation@port4@id
- method: delete
- - task_id: delete_subnet1
- region: central
- type: subnet
- depend: [delete_port1]
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- - task_id: delete_subnet2
- region: central
- type: subnet
- depend: [delete_port2]
- action:
- target: preparation@subnet2@id
- method: delete
- retries: 3
- - task_id: delete_subnet3
- region: central
- type: subnet
- depend: [delete_port3]
- action:
- target: preparation@subnet3@id
- method: delete
- retries: 3
- - task_id: delete_subnet4
- region: central
- type: subnet
- depend: [delete_port4]
- action:
- target: preparation@subnet4@id
- method: delete
- retries: 3
- - task_id: delete_net1
- region: central
- type: network
- depend: [delete_subnet1]
- action:
- target: preparation@net1@id
- method: delete
- - task_id: delete_net2
- region: central
- type: network
- depend: [delete_subnet2]
- action:
- target: preparation@net2@id
- method: delete
- - task_id: delete_net3
- region: central
- type: network
- depend: [delete_subnet3]
- action:
- target: preparation@net3@id
- method: delete
- - task_id: delete_net4
- region: central
- type: network
- depend: [delete_subnet4]
- action:
- target: preparation@net4@id
- method: delete
- - task_id: delete_net5
- region: central
- type: network
- action:
- target: preparation@net5@id
- method: delete
-- task_set_id: check_net_delete
- tasks:
- - task_id: check_net_delete_job
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
diff --git a/tricircle/tempestplugin/run_yaml_test.py b/tricircle/tempestplugin/run_yaml_test.py
deleted file mode 100644
index ef8044ed..00000000
--- a/tricircle/tempestplugin/run_yaml_test.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-import task_runner
-
-
-if __name__ == '__main__':
- (yaml_path, auth_url, project, user, password,
- project_domain_id, user_domain_id) = sys.argv[1:]
- runner = task_runner.SDKRunner(auth_url, project, user,
- password, project_domain_id, user_domain_id)
- engine = task_runner.RunnerEngine(yaml_path, runner)
-
- error_msg = engine.run_task_sets()
- if error_msg:
- sys.exit(error_msg)
diff --git a/tricircle/tempestplugin/sfc_test.yaml b/tricircle/tempestplugin/sfc_test.yaml
deleted file mode 100644
index 6b278508..00000000
--- a/tricircle/tempestplugin/sfc_test.yaml
+++ /dev/null
@@ -1,786 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: image1
- type: image
- region: region1
- query:
- get_one: true
- - task_id: image2
- type: image
- region: region2
- query:
- get_one: true
- - task_id: net1
- type: network
- region: central
- params:
- name: net1
- provider_network_type: vxlan
- - task_id: subnet1
- type: subnet
- region: central
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: p1
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p1
- network_id: net1@id
- - task_id: p2
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p2
- network_id: net1@id
- - task_id: p3
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p3
- network_id: net1@id
- - task_id: p4
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p4
- network_id: net1@id
- - task_id: p5
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p5
- network_id: net1@id
- - task_id: p6
- type: port
- region: central
- depend:
- - net1
- - subnet1
- params:
- name: p6
- network_id: net1@id
- - task_id: vm_sfc1
- region: region1
- type: server
- depend:
- - p2
- - p3
- - image1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm_sfc1
- networks:
- - port: p2@id
- - port: p3@id
- - task_id: vm_sfc2
- region: region2
- type: server
- depend:
- - p4
- - p5
- - image2
- params:
- flavor_id: 1
- image_id: image2@id
- name: vm_sfc2
- networks:
- - port: p4@id
- - port: p5@id
- - task_id: vm_src
- region: region1
- type: server
- depend:
- - p1
- - image1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm_src
- networks:
- - port: p1@id
- - task_id: vm_dst
- region: region2
- type: server
- depend:
- - p6
- - image2
- params:
- flavor_id: 1
- image_id: image2@id
- name: vm_dst
- networks:
- - port: p6@id
- - task_id: wait-servers1
- region: region1
- type: server
- depend:
- - vm_src
- - vm_sfc1
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm_src
- - status: ACTIVE
- name: vm_sfc1
- - task_id: wait-servers2
- region: region2
- type: server
- depend:
- - wait-servers1
- - vm_dst
- - vm_sfc2
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm_dst
- - status: ACTIVE
- name: vm_sfc2
- - task_id: pp1
- region: central
- type: port_pair
- depend:
- - vm_sfc1
- - p2
- - p3
- - wait-servers2
- params:
- name: pp1
- ingress: p2@id
- egress: p3@id
- - task_id: pp2
- region: central
- type: port_pair
- depend:
- - vm_sfc2
- - p4
- - p5
- - wait-servers2
- params:
- name: pp2
- ingress: p4@id
- egress: p5@id
- - task_id: ppg1
- region: central
- type: port_pair_group
- depend: [pp1]
- params:
- name: ppg1
- port_pairs: [pp1@id]
- - task_id: ppg2
- region: central
- type: port_pair_group
- depend: [pp2]
- params:
- name: ppg2
- port_pairs: [pp2@id]
- - task_id: fc
- region: central
- type: flow_classifier
- depend: [p1]
- params:
- name: fc
- logical_source_port: p1@id
- source_ip_prefix: 10.0.1.0/24
- - task_id: pc
- region: central
- type: port_chain
- depend:
- - ppg1
- - ppg2
- - fc
- params:
- name: pc
- flow_classifiers: [fc@id]
- port_pair_groups:
- - ppg1@id
- - ppg2@id
-- task_set_id: wait-for-job
- tasks:
- - task_id: check-job
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: update-port-chain
- depend: [preparation]
- tasks:
- - task_id: update-pc
- region: central
- type: port_chain
- action:
- target: preparation@pc@id
- method: update
- params:
- name: pc-update
- port_pair_groups: [preparation@ppg1@id]
- flow_classifiers: []
- - task_id: check-job
- region: central
- type: job
- depend: [update-pc]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: ppg1-1
- region: region1
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg1
- - task_id: ppg1-2
- region: region2
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg1
- - task_id: check-update-pc-1
- region: region1
- type: port_chain
- depend:
- - check-job
- - ppg1-1
- validate:
- predicate: any
- condition:
- - name: pc-update
- port_pair_groups: [ppg1-1@id]
- flow_classifiers: []
- - task_id: check-update-pc-2
- region: region2
- type: port_chain
- depend:
- - check-job
- - ppg1-2
- validate:
- predicate: any
- condition:
- - name: pc-update
- port_pair_groups: [ppg1-2@id]
- flow_classifiers: []
- - task_id: update-pc-to-original
- region: central
- type: port_chain
- depend:
- - check-update-pc-1
- - check-update-pc-2
- action:
- target: preparation@pc@id
- method: update
- params:
- name: pc
- flow_classifiers: [preparation@fc@id]
- port_pair_groups:
- - preparation@ppg1@id
- - preparation@ppg2@id
-- task_set_id: update-flow-classifier
- depend: [preparation]
- tasks:
- - task_id: update-fc
- region: central
- type: flow_classifier
- action:
- target: preparation@fc@id
- method: update
- params:
- name: fc-update
- - task_id: check-job
- region: central
- type: job
- depend: [update-fc]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check-update-fc-1
- region: region1
- type: flow_classifier
- depend: [check-job]
- query:
- get_one: true
- params:
- name: fc-update
- - task_id: check-update-fc-2
- region: region2
- type: flow_classifier
- depend: [check-job]
- query:
- get_one: true
- params:
- name: fc-update
- - task_id: update-fc-to-original
- region: central
- type: flow_classifier
- depend:
- - check-update-fc-1
- - check-update-fc-2
- action:
- target: preparation@fc@id
- method: update
- params:
- name: fc
-- task_set_id: update-port-pair-group
- depend: [preparation]
- tasks:
- - task_id: update-ppg1
- region: central
- type: port_pair_group
- action:
- target: preparation@ppg1@id
- method: update
- params:
- name: ppg1-update
- port_pairs: []
- - task_id: check-job
- region: central
- type: job
- depend: [update-ppg1]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check-update-ppg1-1
- region: region1
- type: port_pair_group
- depend: [check-job]
- validate:
- predicate: any
- condition:
- - name: ppg1-update
- port_pairs: []
- - task_id: check-update-ppg1-2
- region: region2
- type: port_pair_group
- depend: [check-job]
- validate:
- predicate: any
- condition:
- - name: ppg1-update
- port_pairs: []
- - task_id: update-ppg1-to-original
- region: central
- type: port_pair_group
- depend:
- - check-update-ppg1-1
- - check-update-ppg1-2
- action:
- target: preparation@ppg1@id
- method: update
- params:
- name: ppg1
- port_pairs: [preparation@pp1@id]
-- task_set_id: update-port-pair
- depend: [preparation]
- tasks:
- - task_id: update-pp1
- region: central
- type: port_pair
- action:
- target: preparation@pp1@id
- method: update
- params:
- name: pp1-update
- - task_id: check-job
- region: central
- type: job
- depend: [update-pp1]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check-update-pp1-1
- region: region1
- type: port_pair
- depend: [check-job]
- query:
- get_one: true
- params:
- name: pp1-update
- - task_id: check-update-pp1-2
- region: region2
- type: port_pair
- depend: [check-job]
- query:
- get_one: true
- params:
- name: pp1-update
- - task_id: update-pp1-to-original
- region: central
- type: port_pair
- depend:
- - check-update-pp1-1
- - check-update-pp1-2
- action:
- target: preparation@pp1@id
- method: update
- params:
- name: pp1
-- task_set_id: wait-for-job
- tasks:
- - task_id: check-job
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check
- depend: [preparation]
- tasks:
- - task_id: pp1-1
- region: region1
- type: port_pair
- query:
- get_one: true
- params:
- name: pp1
- - task_id: pp1-2
- region: region2
- type: port_pair
- query:
- get_one: true
- params:
- name: pp1
- - task_id: pp2-1
- region: region1
- type: port_pair
- query:
- get_one: true
- params:
- name: pp2
- - task_id: pp2-2
- region: region2
- type: port_pair
- query:
- get_one: true
- params:
- name: pp2
- - task_id: ppg1-1
- region: region1
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg1
- - task_id: ppg1-2
- region: region2
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg1
- - task_id: ppg2-1
- region: region1
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg2
- - task_id: ppg2-2
- region: region2
- type: port_pair_group
- query:
- get_one: true
- params:
- name: ppg2
- - task_id: fc-1
- region: region1
- type: flow_classifier
- query:
- get_one: true
- params:
- name: fc
- - task_id: fc-2
- region: region2
- type: flow_classifier
- query:
- get_one: true
- params:
- name: fc
- - task_id: check-pp-1
- region: region1
- type: port_pair
- validate:
- predicate: any
- condition:
- - ingress: preparation@p2@id
- egress: preparation@p3@id
- name: pp1
- - ingress: preparation@p4@id
- egress: preparation@p5@id
- name: pp2
- - task_id: check-pp-2
- region: region2
- type: port_pair
- validate:
- predicate: any
- condition:
- - ingress: preparation@p2@id
- egress: preparation@p3@id
- name: pp1
- - ingress: preparation@p4@id
- egress: preparation@p5@id
- name: pp2
- - task_id: check-ppg-1
- region: region1
- type: port_pair_group
- depend:
- - pp1-1
- - pp2-1
- validate:
- predicate: any
- condition:
- - name: ppg1
- port_pairs: [pp1-1@id]
- - name: ppg2
- port_pairs: [pp2-1@id]
- - task_id: check-ppg-2
- region: region2
- type: port_pair_group
- depend:
- - pp1-2
- - pp2-2
- validate:
- predicate: any
- condition:
- - name: ppg1
- port_pairs: [pp1-2@id]
- - name: ppg2
- port_pairs: [pp2-2@id]
- - task_id: check-pc-1
- region: region1
- type: port_chain
- depend:
- - ppg1-1
- - ppg2-1
- - fc-1
- validate:
- predicate: any
- condition:
- - name: pc
- port_pair_groups:
- - ppg1-1@id
- - ppg2-1@id
- flow_classifiers: [fc-1@id]
- - task_id: check-pc-2
- region: region2
- type: port_chain
- depend:
- - ppg1-2
- - ppg2-2
- - fc-2
- validate:
- predicate: any
- condition:
- - name: pc
- port_pair_groups:
- - ppg1-2@id
- - ppg2-2@id
- flow_classifiers: [fc-2@id]
-- task_set_id: clean
- depend: [preparation]
- tasks:
- - task_id: delete-pc
- region: central
- type: port_chain
- action:
- target: preparation@pc@id
- method: delete
- - task_id: delete-fc
- region: central
- type: flow_classifier
- depend: [delete-pc]
- action:
- target: preparation@fc@id
- method: delete
- - task_id: delete-ppg1
- region: central
- type: port_pair_group
- depend: [delete-pc]
- action:
- target: preparation@ppg1@id
- method: delete
- - task_id: delete-ppg2
- region: central
- type: port_pair_group
- depend: [delete-pc]
- action:
- target: preparation@ppg2@id
- method: delete
- - task_id: delete-pp1
- region: central
- type: port_pair
- depend: [delete-ppg1]
- action:
- target: preparation@pp1@id
- method: delete
- - task_id: delete-pp2
- region: central
- type: port_pair
- depend: [delete-ppg2]
- action:
- target: preparation@pp2@id
- method: delete
- - task_id: delete-vm-src
- region: region1
- type: server
- action:
- target: preparation@vm_src@id
- method: delete
- - task_id: delete-vm-sfc1
- region: region1
- type: server
- depend: [delete-pp1]
- action:
- target: preparation@vm_sfc1@id
- method: delete
- - task_id: delete-vm-dst
- region: region2
- type: server
- action:
- target: preparation@vm_dst@id
- method: delete
- - task_id: delete-vm-sfc2
- region: region2
- type: server
- depend: [delete-pp2]
- action:
- target: preparation@vm_sfc2@id
- method: delete
-- task_set_id: wait-server-delete
- tasks:
- - task_id: check-no-servers-1
- region: region1
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-servers-2
- region: region2
- type: server
- validate:
- retries: 10
- predicate: all
- condition:
- - name: invalid-name
-- task_set_id: clean-cont
- depend: [preparation]
- tasks:
- - task_id: delete-p1
- region: central
- type: port
- action:
- target: preparation@p1@id
- method: delete
- - task_id: delete-p2
- region: central
- type: port
- action:
- target: preparation@p2@id
- method: delete
- - task_id: delete-p3
- region: central
- type: port
- action:
- target: preparation@p3@id
- method: delete
- - task_id: delete-p4
- region: central
- type: port
- action:
- target: preparation@p4@id
- method: delete
- - task_id: delete-p5
- region: central
- type: port
- action:
- target: preparation@p5@id
- method: delete
- - task_id: delete-p6
- region: central
- type: port
- action:
- target: preparation@p6@id
- method: delete
- - task_id: delete-subnet
- region: central
- depend:
- - delete-p1
- - delete-p2
- - delete-p3
- - delete-p4
- - delete-p5
- - delete-p6
- type: subnet
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- - task_id: delete-net
- region: central
- depend: [delete-subnet]
- type: network
- action:
- target: preparation@net1@id
- method: delete
-- task_set_id: clean-check
- tasks:
- - task_id: check-no-networks1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
diff --git a/tricircle/tempestplugin/single_gw_topology_test.yaml b/tricircle/tempestplugin/single_gw_topology_test.yaml
deleted file mode 100644
index 2638d0e6..00000000
--- a/tricircle/tempestplugin/single_gw_topology_test.yaml
+++ /dev/null
@@ -1,537 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: image1
- region: region1
- type: image
- query:
- get_one: true
- - task_id: image2
- region: region2
- type: image
- query:
- get_one: true
- - task_id: ext-net
- region: central
- type: network
- params:
- name: ext-net
- is_router_external: true
- provider_network_type: vlan
- provider_physical_network: extern
- availability_zone_hints: [RegionTwo]
- - task_id: ext-subnet
- region: central
- type: subnet
- depend: [ext-net]
- params:
- name: ext-subnet
- ip_version: 4
- cidr: 163.3.124.0/24
- is_dhcp_enabled: false
- network_id: ext-net@id
- - task_id: router
- region: central
- type: router
- params:
- name: router
- - task_id: add-gateway
- region: central
- type: router
- action:
- target: router@id
- method: update
- depend:
- - ext-net
- - ext-subnet
- - router
- params:
- external_gateway_info:
- network_id: ext-net@id
- enable_snat: true
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- allocation_pools:
- - start: 10.0.1.10
- end: 10.0.1.90
- network_id: net1@id
- - task_id: net3
- region: central
- type: network
- params:
- name: net3
- - task_id: subnet3
- region: central
- type: subnet
- depend: [net3]
- params:
- name: subnet3
- ip_version: 4
- cidr: 10.0.1.0/24
- allocation_pools:
- - start: 10.0.1.110
- end: 10.0.1.190
- network_id: net3@id
- - task_id: port1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- network_id: net1@id
- - task_id: add-subnet1
- region: central
- type: router
- depend:
- - subnet1
- - router
- action:
- target: router@id
- method: add_interface_to_router
- params:
- subnet_id: subnet1@id
- - task_id: fip1
- region: central
- type: fip
- depend:
- - ext-net
- - port1
- - add-gateway
- - add-subnet1
- params:
- port_id: port1@id
- floating_ip_address: 163.3.124.15
- floating_network_id: ext-net@id
- - task_id: vm1
- region: region1
- type: server
- depend:
- - fip1
- - port1
- - image1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm1
- networks:
- - port: port1@id
- - task_id: net2
- region: central
- type: network
- params:
- name: net2
- - task_id: subnet2
- region: central
- type: subnet
- depend: [net2]
- params:
- name: subnet2
- ip_version: 4
- cidr: 10.0.2.0/24
- network_id: net2@id
- - task_id: port2
- region: central
- type: port
- depend:
- - net2
- - subnet2
- params:
- network_id: net2@id
- - task_id: fip2
- region: central
- type: fip
- depend:
- - vm2
- - ext-net
- - port2
- - add-gateway
- - add-subnet2
- params:
- port_id: port2@id
- floating_ip_address: 163.3.124.20
- floating_network_id: ext-net@id
- - task_id: vm2
- region: region2
- type: server
- depend:
- - port2
- - image2
- - vm1
- params:
- flavor_id: 1
- image_id: image2@id
- name: vm2
- networks:
- - port: port2@id
- - task_id: add-subnet2
- region: central
- type: router
- depend:
- - vm2
- - subnet2
- - router
- action:
- target: router@id
- method: add_interface_to_router
- params:
- subnet_id: subnet2@id
- - task_id: net4
- region: central
- type: network
- params:
- name: net4
- - task_id: subnet4
- region: central
- type: subnet
- depend: [net4]
- params:
- name: subnet4
- ip_version: 4
- cidr: 10.0.4.0/24
- network_id: net4@id
- gateway_ip: null
- - task_id: vm3
- region: region1
- type: server
- depend:
- - net4
- - subnet4
- - image1
- - vm2
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm3
- networks:
- - uuid: net4@id
-- task_set_id: wait-for-job
- tasks:
- - task_id: check-job
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check
- depend: [preparation]
- tasks:
- - task_id: router1
- region: region1
- type: router
- query:
- get_one: true
- params:
- name: preparation@router@id
- - task_id: router2
- region: region2
- type: router
- query:
- get_one: true
- params:
- name: preparation@router@id
- - task_id: check-servers1
- region: region1
- type: server
- validate:
- predicate: any
- condition:
- - status: ACTIVE
- name: vm1
- - status: ACTIVE
- name: vm3
- - task_id: check-ports1
- region: region1
- type: port
- depend: [router1]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.1*
- - fixed_ips:
- - ip_address: 100.0.0*
- params:
- device_id: router1@id
- - task_id: check-routers1
- region: region1
- type: router
- validate:
- predicate: any
- condition:
- - routes:
- - destination: 0.0.0.0/0
- nexthop: 100.0.0.1
- - destination: 10.0.2*
- nexthop: 100.0.0*
- - task_id: check-subnets1
- region: region1
- type: subnet
- validate:
- predicate: any
- condition:
- - cidr: 10.0.1.0/24
- - cidr: 10.0.4.0/24
- - cidr: 100.0.0.0/24
- - task_id: check-servers2
- region: region2
- type: server
- validate:
- predicate: any
- condition:
- - status: ACTIVE
- name: vm2
- - task_id: check-ports2
- region: region2
- type: port
- depend: [router2]
- validate:
- predicate: any
- condition:
- - fixed_ips:
- - ip_address: 10.0.2*
- - fixed_ips:
- - ip_address: 100.0.0*
- params:
- device_id: router2@id
- - task_id: check-routers2
- region: region2
- type: router
- validate:
- predicate: any
- condition:
- - routes:
- - destination: 0.0.0.0/0
- nexthop: 100.0.0.1
- - destination: 10.0.1*
- nexthop: 100.0.0*
- - task_id: check-subnets2
- region: region2
- type: subnet
- validate:
- predicate: any
- condition:
- - cidr: 10.0.1.0/24
- - cidr: 10.0.2.0/24
- - cidr: 100.0.0.0/24
- - cidr: 163.3.124.0/24
- - task_id: check-fips2
- region: region2
- type: fip
- validate:
- predicate: any
- condition:
- - floating_ip_address: 163.3.124.15
- - floating_ip_address: 163.3.124.20
-- task_set_id: clean
- depend: [preparation]
- tasks:
- - task_id: delete-fip1
- region: central
- type: fip
- action:
- target: preparation@fip1@id
- method: delete
- - task_id: delete-fip2
- region: central
- type: fip
- action:
- target: preparation@fip2@id
- method: delete
- - task_id: delete-vm1
- region: region1
- type: server
- depend: [delete-fip1]
- action:
- target: preparation@vm1@id
- method: delete
- - task_id: delete-vm3
- region: region1
- type: server
- action:
- target: preparation@vm3@id
- method: delete
- - task_id: delete-vm2
- region: region2
- type: server
- depend: [delete-fip2]
- action:
- target: preparation@vm2@id
- method: delete
- - task_id: remove-gateway
- region: central
- type: router
- action:
- target: preparation@router@id
- method: update
- depend:
- - delete-fip1
- - delete-fip2
- params:
- external_gateway_info: null
- - task_id: remove-subnet1
- region: central
- type: router
- action:
- target: preparation@router@id
- method: remove_interface_from_router
- depend: [remove-gateway]
- params:
- subnet_id: preparation@subnet1@id
- - task_id: remove-subnet2
- region: central
- type: router
- action:
- target: preparation@router@id
- method: remove_interface_from_router
- depend: [remove-gateway]
- params:
- subnet_id: preparation@subnet2@id
- - task_id: delete-router
- region: central
- type: router
- action:
- target: preparation@router@id
- method: delete
- retries: 3
- depend:
- - remove-gateway
- - remove-subnet1
- - remove-subnet2
- - task_id: delete-port1
- region: central
- type: port
- action:
- target: preparation@port1@id
- method: delete
- depend: [delete-router]
- - task_id: delete-port2
- region: central
- type: port
- action:
- target: preparation@port2@id
- method: delete
- depend: [delete-router]
- - task_id: delete-subnet1
- region: central
- type: subnet
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- depend: [delete-port1]
- - task_id: delete-subnet2
- region: central
- type: subnet
- action:
- target: preparation@subnet2@id
- method: delete
- retries: 3
- depend: [delete-port2]
- - task_id: delete-subnet3
- region: central
- type: subnet
- action:
- target: preparation@subnet3@id
- method: delete
- retries: 3
- - task_id: delete-subnet4
- region: central
- type: subnet
- action:
- target: preparation@subnet4@id
- method: delete
- retries: 3
- depend: [delete-vm3]
- - task_id: delete-net1
- region: central
- type: network
- action:
- target: preparation@net1@id
- method: delete
- depend: [delete-subnet1]
- - task_id: delete-net2
- region: central
- type: network
- action:
- target: preparation@net2@id
- method: delete
- depend: [delete-subnet2]
- - task_id: delete-net3
- region: central
- type: network
- action:
- target: preparation@net3@id
- method: delete
- depend: [delete-subnet3]
- - task_id: delete-net4
- region: central
- type: network
- action:
- target: preparation@net4@id
- method: delete
- depend: [delete-subnet4]
- - task_id: delete-ext-subnet
- region: central
- type: subnet
- action:
- target: preparation@ext-subnet@id
- method: delete
- depend: [delete-router]
- - task_id: delete-ext-net
- region: central
- type: network
- action:
- target: preparation@ext-net@id
- method: delete
- depend: [delete-ext-subnet]
-- task_set_id: clean-check
- tasks:
- - task_id: check-no-routers1
- region: region1
- type: router
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-routers2
- region: region2
- type: router
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
diff --git a/tricircle/tempestplugin/smoke_test.sh b/tricircle/tempestplugin/smoke_test.sh
deleted file mode 100644
index 701628cf..00000000
--- a/tricircle/tempestplugin/smoke_test.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash -xe
-
-DEST=$BASE/new
-DEVSTACK_DIR=$DEST/devstack
-source $DEVSTACK_DIR/openrc admin admin
-unset OS_REGION_NAME
-
-echo "Start to run single gateway topology test"
-python run_yaml_test.py single_gw_topology_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in single gateway topology test"
-fi
-echo "Start to run multi gateway topology test"
-python run_yaml_test.py multi_gw_topology_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in multi gateway topology test"
-fi
-echo "Start to run trunk test"
-python run_yaml_test.py trunk_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in trunk test"
-fi
-echo "Start to run service function chain test"
-python run_yaml_test.py sfc_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in service function chain test"
-fi
-echo "Start to run qos policy function test"
-python run_yaml_test.py qos_policy_rule_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in qos service function test"
-fi
-echo "Start to test port delete when vm create in other region"
-python run_yaml_test.py port_delete_with_vm_create.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in port delete test when create vm in other region"
-fi
-echo "Start to test port delete when vm create in other region with shadow port"
-python run_yaml_test.py port_delete_with_vm_create_shadow.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD" "$OS_PROJECT_DOMAIN_ID" "$OS_USER_DOMAIN_ID"
-if [ $? != 0 ]; then
- die $LINENO "Smoke test fails, error in port delete test when create vm in other region with shadow port"
-fi
diff --git a/tricircle/tempestplugin/task_runner.py b/tricircle/tempestplugin/task_runner.py
deleted file mode 100644
index 92da0af8..00000000
--- a/tricircle/tempestplugin/task_runner.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import itertools
-import logging
-import time
-import traceback
-import yaml
-
-from openstack import connection
-
-from tricircle.tests.network_sdk import network_service
-from tricircle.tests.tricircle_sdk import multiregion_network_service
-
-LOG = logging.getLogger(__name__)
-logging.basicConfig(level=logging.INFO)
-
-SLEEP_INTERVAL = 20
-
-
-class DummyRunner(object):
- class DummyResource(object):
- def __init__(self, _id):
- self.id = _id
-
- def __getattr__(self, item):
- return item
-
- def __init__(self):
- self.id_pool = {}
-
- def _get_id(self, _type):
- if _type not in self.id_pool:
- self.id_pool[_type] = 0
- self.id_pool[_type] += 1
- return '%s%d_id' % (_type, self.id_pool[_type])
-
- def create(self, region, _type, params):
- _id = self._get_id(_type)
- msg = 'create %s with id %s in %s, params: %s' % (
- _type, _id, region, params)
- LOG.info(msg)
- return self.DummyResource(_id)
-
- def action(self, region, _type, target, method, params):
- msg = '%s %s with id %s in %s, params: %s' % (
- method, _type, target, region, params)
- LOG.info(msg)
-
- def query(self, region, _type, get_one, params):
- if get_one:
- return self.DummyResource(self._get_id(_type))
- return []
-
- def validate(self, region, _type, predicate, conditions, params):
- msg = 'validate %s, conditions: %s' % (_type, conditions)
- LOG.info(msg)
-
-
-class SDKRunner(object):
- region_map = {'central': 'CentralRegion',
- 'region1': 'RegionOne',
- 'region2': 'RegionTwo'}
- serv_reslist_map = {
- 'network_sdk': ['network', 'subnet', 'port', 'router', 'fip', 'trunk',
- 'flow_classifier', 'port_pair', 'port_pair_group',
- 'port_chain', 'qos_policy', 'qos_bandwidth_limit_rule',
- 'qos_dscp_marking_rule', 'qos_minimum_bandwidth_rule'],
- 'compute': ['server'],
- 'image': ['image'],
- 'tricircle_sdk': ['job']}
- res_alias_map = {
- 'fip': 'ip'}
- type_plural_map = {
- 'qos_policy': 'qos_policie'}
-
- def __init__(self, auth_url, project, username, password,
- project_domain_id, user_domain_id):
- self.res_serv_map = {}
- for serv in self.serv_reslist_map:
- for res in self.serv_reslist_map[serv]:
- self.res_serv_map[res] = serv
-
- self.connection_map = {}
- auth = {
- 'auth_url': auth_url,
- 'project_name': project,
- 'user_domain_name': 'default',
- 'project_domain_name': 'default',
- 'username': username,
- 'password': password,
- 'project_domain_id': project_domain_id,
- 'user_domain_id': user_domain_id}
-
- for region in ('CentralRegion', 'RegionOne', 'RegionTwo'):
- extra_services = []
- if region == 'CentralRegion':
- serv = multiregion_network_service.MultiregionNetworkService(
- version='v1')
- extra_services.append(serv)
- net_serv = network_service.NetworkService(version='v2')
- extra_services.append(net_serv)
- conn = connection.Connection(region_name=region,
- auth=auth,
- extra_services=extra_services)
- conn.config.config['network_sdk_service_type'] = 'network'
- conn.config.config['tricircle_sdk_service_type'] = 'tricircle'
- conn.config.config['network_sdk_api_version'] = 'v2'
- conn.config.config['tricircle_sdk_api_version'] = 'v1'
- for service in extra_services:
- conn.add_service(service)
- self.connection_map[region] = conn
-
- def create(self, region, _type, params):
- conn = self.connection_map[self.region_map[region]]
- serv = self.res_serv_map[_type]
- _type = self.res_alias_map.get(_type, _type)
- desc = getattr(conn, serv)
- try:
- proxy = desc.__get__(conn, '')
- except Exception:
- proxy = desc
- return getattr(proxy, 'create_%s' % _type)(**params)
-
- def action(self, region, _type, target, method, params):
- conn = self.connection_map[self.region_map[region]]
- serv = self.res_serv_map[_type]
- _type = self.res_alias_map.get(_type, _type)
- desc = getattr(conn, serv)
- try:
- proxy = desc.__get__(conn, '')
- except Exception:
- proxy = desc
- if method in ('update', 'delete'):
- method = '%s_%s' % (method, _type)
- getattr(proxy, method)(target, **params)
-
- def query(self, region, _type, get_one, params):
- conn = self.connection_map[self.region_map[region]]
- serv = self.res_serv_map[_type]
- _type = self.res_alias_map.get(_type, _type)
- desc = getattr(conn, serv)
- try:
- proxy = desc.__get__(conn, '')
- except Exception:
- proxy = desc
- _type = self.type_plural_map.get(_type, _type)
- _list = list(getattr(proxy, '%ss' % _type)(**params))
- if get_one:
- return _list[0]
- return _list
-
- def validate(self, region, _type, predicate, conditions, params):
- def validate_value(actual, expected):
- if isinstance(expected, list):
- actual_len = len(actual)
- expected_len = len(expected)
- if actual_len != expected_len:
- return False
- for actual_list in itertools.permutations(actual, actual_len):
- for expected_list in itertools.permutations(expected,
- expected_len):
- match = True
- for i, actual_ele in enumerate(actual_list):
- if not validate_value(actual_ele,
- expected_list[i]):
- match = False
- break
- if match:
- return True
- return False
- elif isinstance(expected, dict):
- for k in expected:
- if not validate_value(actual[k], expected[k]):
- return False
- return True
- elif isinstance(expected, str):
- tokens = expected.split('*')
- if tokens[0] == '' and tokens[-1] == '':
- return actual.find(tokens[1]) != -1
- elif tokens[0] == '':
- return actual.endswith(tokens[-1])
- elif tokens[-1] == '':
- return actual.startswith(tokens[0])
- return actual == expected
- else:
- return actual == expected
-
- def validate_any_condition(results, condition):
- for result in results:
- if all(validate_value(
- getattr(result, key),
- value) for (key, value) in condition.items()):
- return True
- return False
-
- def validate_all_condition(results, condition):
- for result in results:
- if not all(validate_value(
- getattr(result, key),
- value) for (key, value) in condition.items()):
- return False
- return True
-
- results = self.query(region, _type, False, params)
- if predicate == 'any':
- for condition in conditions:
- if not validate_any_condition(results, condition):
- raise Exception(
- 'Validation fail, acutal results: %s, '
- 'expected results: %s' % (results, condition))
- elif predicate == 'all':
- for condition in conditions:
- if not validate_all_condition(results, condition):
- raise Exception(
- 'Validation fail, acutal results: %s, '
- 'expected results: %s' % (results, condition))
-
-
-class RunnerEngine(object):
- def __init__(self, yaml_path, runner):
- self.task_set_map = {}
- self.task_set_id_list = []
- self.runner = runner
-
- with open(yaml_path) as f:
- data = yaml.safe_load(f)
- self._parse_data(data)
-
- def _validate_task(self, task):
- def collect_require_from_dict(requires, _dict):
- for v in _dict.values():
- if isinstance(v, list):
- collect_require_from_list(requires, v)
- elif isinstance(v, dict):
- collect_require_from_dict(requires, v)
- elif not isinstance(v, str):
- continue
- elif '@' in v:
- requires.append(v)
-
- def collect_require_from_list(requires, _list):
- for v in _list:
- if isinstance(v, list):
- collect_require_from_list(requires, v)
- elif isinstance(v, dict):
- collect_require_from_dict(requires, v)
- elif not isinstance(v, str):
- continue
- elif '@' in v:
- requires.append(v)
-
- for field in ('task_id', 'region', 'type'):
- if field not in task:
- raise Exception('Required field %s not set' % field)
- for sub_section, fields in [('action', ['target', 'method']),
- ('query', ['get_one']),
- ('validate', ['predicate', 'condition'])]:
- if sub_section in task:
- for field in fields:
- if field not in task[sub_section]:
- raise Exception('Required field %s for %s '
- 'not set' % (field, sub_section))
-
- requires = []
- if 'params' in task:
- collect_require_from_dict(requires, task['params'])
- if 'validate' in task:
- collect_require_from_dict(requires, task['validate'])
- if 'action' in task:
- requires.append(task['action']['target'])
- depend = task.get('depend', [])
- for value in requires:
- tokens = value.split('@')
- if len(tokens) == 2 and tokens[0] not in depend:
- raise Exception(
- 'Depend list not complete for %s: %s not in %s' % (
- task['task_id'], tokens[0], depend))
- elif len(tokens) == 3:
- task_set_id, task_id = tokens[:2]
- if task_set_id not in self.task_set_map:
- raise Exception(
- 'Depend task set %s for %s not found' % (
- task_set_id, task['task_id']))
- task_map, _, _ = self.task_set_map[task_set_id]
- if task_id not in task_map:
- raise Exception(
- 'Depend task %s for %s not found' % (
- task_id, task['task_id']))
-
- @staticmethod
- def _parse_dependency(depend_map):
- depend_map = copy.deepcopy(depend_map)
- ordered_list = []
- while len(depend_map):
- pop_list = []
- for _id in depend_map:
- if not depend_map[_id]:
- ordered_list.append(_id)
- pop_list.append(_id)
- for _id in pop_list:
- depend_map.pop(_id)
- for depend in depend_map.values():
- for _id in pop_list:
- if _id in depend:
- depend.remove(_id)
- if not pop_list:
- raise Exception('Unresolved dependency, '
- 'left s: %s' % depend_map.keys())
- return ordered_list
-
- def _parse_data(self, data):
- task_set_depend_map = {}
- task_set_tasks_map = {}
- for task_set in data:
- task_set_id = task_set['task_set_id']
- self.task_set_id_list.append(task_set_id)
- task_set_depend_map[task_set_id] = set(
- task_set.get('depend', []))
- task_set_tasks_map[task_set_id] = task_set['tasks']
- ordered_task_set_list = self._parse_dependency(task_set_depend_map)
- for task_set_id in ordered_task_set_list:
- task_map = {}
- task_depend_map = {}
- for task in task_set_tasks_map[task_set_id]:
- task_map[task['task_id']] = task
- task_depend_map[task['task_id']] = set(task.get('depend', []))
- self._validate_task(task)
- ordered_task_list = self._parse_dependency(task_depend_map)
- self.task_set_map[task_set_id] = (task_map, ordered_task_list,
- task_set_depend_map[task_set_id])
-
- @staticmethod
- def _fill_depend_field_in_list(_list, task_result_map,
- depend_task_result_map):
- if not _list:
- return
- for i, e in enumerate(_list):
- if isinstance(e, list):
- RunnerEngine._fill_depend_field_in_list(e, task_result_map,
- depend_task_result_map)
- elif isinstance(e, dict):
- RunnerEngine._fill_depend_filed_in_dict(e, task_result_map,
- depend_task_result_map)
- if not isinstance(e, str):
- continue
- tokens = e.split('@')
- if len(tokens) == 2:
- task_id, task_filed = tokens
- _list[i] = getattr(task_result_map[task_id], task_filed)
- elif len(tokens) == 3:
- task_set_id, task_id, task_filed = tokens
- _list[i] = getattr(
- depend_task_result_map[task_set_id][task_id], task_filed)
-
- @staticmethod
- def _fill_depend_filed_in_dict(_dict, task_result_map,
- depend_task_result_map):
- if not _dict:
- return
- for k, v in _dict.items():
- if isinstance(v, list):
- RunnerEngine._fill_depend_field_in_list(v, task_result_map,
- depend_task_result_map)
- elif isinstance(v, dict):
- RunnerEngine._fill_depend_filed_in_dict(v, task_result_map,
- depend_task_result_map)
- if not isinstance(v, str):
- continue
- tokens = v.split('@')
- if len(tokens) == 2:
- task_id, task_filed = tokens
- _dict[k] = getattr(task_result_map[task_id], task_filed)
- elif len(tokens) == 3:
- task_set_id, task_id, task_filed = tokens
- _dict[k] = getattr(
- depend_task_result_map[task_set_id][task_id], task_filed)
-
- @staticmethod
- def _fill_depend_field(params, task_result_map, depend_task_result_map):
- RunnerEngine._fill_depend_filed_in_dict(params, task_result_map,
- depend_task_result_map)
-
- @staticmethod
- def _retry(task_id, retry_num, func, *args):
- run_time = retry_num + 1
- for i in range(run_time):
- try:
- func(*args)
- break
- except Exception:
- if i == run_time - 1:
- raise
- else:
- time.sleep(SLEEP_INTERVAL)
- LOG.info('Redo failed task %s', task_id)
-
- def run_tasks(self, task_set_id, depend_task_set_result={}):
- if task_set_id not in self.task_set_map:
- raise Exception('Task set %s not found' % task_set_id)
- (task_map, ordered_task_list,
- task_set_depend) = self.task_set_map[task_set_id]
- for set_id in task_set_depend:
- if set_id not in depend_task_set_result:
- raise Exception('Task set %s fails, reason: result for depend '
- 'task set %s not given' % (task_set_id,
- set_id))
- task_result_map = {}
- for task_id in ordered_task_list:
- task = task_map[task_id]
- params = task.get('params', {})
- self._fill_depend_field(params, task_result_map,
- depend_task_set_result)
- try:
- if 'action' in task:
- self._fill_depend_field(task['action'], task_result_map,
- depend_task_set_result)
- self._retry(task_id, task['action'].get('retries', 0),
- self.runner.action, task['region'],
- task['type'], task['action']['target'],
- task['action']['method'], params)
- elif 'query' in task:
- result = self.runner.query(
- task['region'], task['type'],
- task['query']['get_one'], params)
- task_result_map[task_id] = result
- elif 'validate' in task:
- self._fill_depend_field(task['validate'], task_result_map,
- depend_task_set_result)
- self._retry(task_id, task['validate'].get('retries', 0),
- self.runner.validate, task['region'],
- task['type'], task['validate']['predicate'],
- task['validate']['condition'], params)
- else:
- result = self.runner.create(task['region'],
- task['type'], params)
- task_result_map[task_id] = result
- LOG.info('Task %s done\n' % task_id)
- except Exception:
- error_msg = 'Task %s fails, reason: %s' % (
- task_id, traceback.format_exc())
- return task_result_map, error_msg
- return task_result_map, None
-
- def run_task_sets(self):
- task_set_result_map = {}
- for task_set_id in self.task_set_id_list:
- _, _, task_set_depend = self.task_set_map[task_set_id]
- depend_task_set_result = dict(
- [(_id, task_set_result_map[_id]) for _id in task_set_depend])
- task_result_map, error_msg = self.run_tasks(
- task_set_id, depend_task_set_result)
- if error_msg:
- return error_msg
- task_set_result_map[task_set_id] = task_result_map
diff --git a/tricircle/tempestplugin/tempest_network.sh b/tricircle/tempestplugin/tempest_network.sh
deleted file mode 100755
index 7c62e9d9..00000000
--- a/tricircle/tempestplugin/tempest_network.sh
+++ /dev/null
@@ -1,268 +0,0 @@
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_add_remove_network_from_dhcp_agent[id-a0856713-6549-470c-a656-e97c8df9a14d]
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_list_dhcp_agent_hosting_network[id-5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d]
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_list_networks_hosted_by_one_dhcp[id-30c48f98-e45d-4ffb-841c-b8aad57c7587]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_create_external_network[id-462be770-b310-4df9-9c42-773217e4c8b1]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_delete_external_networks_with_floating_ip[id-82068503-2cf2-4ed4-b3be-ecb89432e4bb]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_list_external_networks[id-39be4c9b-a57e-4ff9-b7c7-b218e209dfcc]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_show_external_networks_attribute[id-2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_update_external_network[id-4db5417a-e11c-474d-a361-af00ebef57c5]
-# tempest.api.network.admin.test_external_networks_negative.ExternalNetworksAdminNegativeTestJSON.test_create_port_with_precreated_floatingip_as_fixed_ip[id-d402ae6c-0be0-4d8e-833b-a738895d98d0,negative]
-# tempest.api.network.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_create_list_show_floating_ip_with_tenant_id_by_admin[id-32727cc3-abe2-4485-a16e-48f2d54c14f2]
-# tempest.api.network.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_list_floating_ips_from_admin_and_nonadmin[id-64f2100b-5471-4ded-b46c-ddeeeb4f231b]
-# tempest.api.network.admin.test_negative_quotas.QuotasNegativeTest.test_network_quota_exceeding[id-644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf]
-# tempest.api.network.admin.test_quotas.QuotasTest.test_quotas[id-2390f766-836d-40ef-9aeb-e810d78207fb]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_centralized_router_creation[id-8a0a72b4-7290-4677-afeb-b4ffe37bc352]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_centralized_router_update_to_dvr[id-acd43596-c1fb-439d-ada8-31ad48ae3c2e]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_distributed_router_creation[id-08a2a0a8-f1e4-4b34-8e30-e522e836c44e]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_create_list_port_with_address_pair[id-86c3529b-1231-40de-803c-00e40882f043]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_address_pair[id-9599b337-272c-47fd-b3cf-509414414ac4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_cidr_address_pair[id-4d6d178f-34f6-4bff-a01c-0a2f8fe909e4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_multiple_ip_mac_address_pair[id-b3f20091-6cd5-472b-8487-3516137df933]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_create_list_port_with_address_pair[id-86c3529b-1231-40de-803c-00e40882f043]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_address_pair[id-9599b337-272c-47fd-b3cf-509414414ac4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_cidr_address_pair[id-4d6d178f-34f6-4bff-a01c-0a2f8fe909e4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_multiple_ip_mac_address_pair[id-b3f20091-6cd5-472b-8487-3516137df933]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful[id-4ab211a0-276f-4552-9070-51e27f58fecf]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips[id-51a5e97f-f02e-4e4e-9a17-a69811d300e3]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips_duplicate[id-57b8302b-cba9-4fbb-8835-9168df029051]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips_outrange[id-98244d88-d990-4570-91d4-6b25d70d08af]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_router[id-e98f65db-68f4-4330-9fea-abd8c5192d4d]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_64_subnets[id-4256c61d-c538-41ea-9147-3c450c36669e]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_invalid_options[id-81f18ef6-95b5-4584-9966-10d480b7496a]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_eui64[id-e5517e62-6f16-430d-a672-f80875493d4c]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra[id-ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra_no_dhcp[id-21635b6f-165a-4d42-bf49-7d195e47342f]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_two_subnets[id-4544adf7-bb5f-4bdc-b769-b3e77026cef2]
-# tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions[id-ef28c7e6-e646-4979-9d67-deb207bc5564,smoke]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON.test_create_list_port_with_extra_dhcp_options[id-d2c17063-3767-4a24-be4f-a23dbfa133c9]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON.test_update_show_port_with_extra_dhcp_options[id-9a6aebf4-86ee-4f47-b07a-7f7232c55607]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsTestJSON.test_create_list_port_with_extra_dhcp_options[id-d2c17063-3767-4a24-be4f-a23dbfa133c9]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsTestJSON.test_update_show_port_with_extra_dhcp_options[id-9a6aebf4-86ee-4f47-b07a-7f7232c55607]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address[id-36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5,smoke]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip[id-62595970-ab1c-4b7f-8fcc-fddfe55e8718,smoke]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_update_floatingip_with_port_multiple_ip_address[id-45c4c683-ea97-41ef-9c51-5e9802f2f3d7]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_floating_ip_delete_port[id-e1f6bffd-442f-4668-b30e-df13f2705e77]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_floating_ip_update_different_router[id-1bb2f731-fe5a-4b8c-8409-799ade1bed4d]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable[id-6b3b8797-6d43-4191-985c-c48b773eb429,negative]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_in_private_network[id-50b9aeb4-9f0b-48ee-aa31-fa955a48ff54,negative]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable[id-22996ea8-4a81-4b27-b6e1-fa5df92fa5e8,negative]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_create_delete_metering_label_rule_with_filters[id-f4d547cd-3aee-408f-bf36-454f8825e045]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_create_delete_metering_label_with_filters[id-ec8e15ff-95d0-433b-b8a6-b466bddb1e50]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_list_metering_label_rules[id-cc832399-6681-493b-9d79-0202831a1281]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_list_metering_labels[id-e2fb2f8c-45bf-429a-9f17-171c70444612]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_show_metering_label[id-30abb445-0eea-472e-bd02-8649f54a5968]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_show_metering_label_rule[id-b7354489-96ea-41f3-9452-bace120fb4a7]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_create_delete_metering_label_rule_with_filters[id-f4d547cd-3aee-408f-bf36-454f8825e045]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_create_delete_metering_label_with_filters[id-ec8e15ff-95d0-433b-b8a6-b466bddb1e50]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_list_metering_label_rules[id-cc832399-6681-493b-9d79-0202831a1281]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_list_metering_labels[id-e2fb2f8c-45bf-429a-9f17-171c70444612]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_show_metering_label[id-30abb445-0eea-472e-bd02-8649f54a5968]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_show_metering_label_rule[id-b7354489-96ea-41f3-9452-bace120fb4a7]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_default_gw[id-ebb4fd95-524f-46af-83c1-0305b239338f]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_gw[id-e41a4888-65a6-418c-a095-f7c2ef4ad59a]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_list_subnet_with_no_gw64_one_network[id-a9653883-b2a4-469b-8c3c-4518430a7e55]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_slaac_subnet_with_ports[id-88554555-ebf8-41ef-9300-4926d45e06e9]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_stateless_subnet_with_ports[id-2de6ab5a-fcf0-4144-9813-f91a940291f1]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_default_gw[id-ebb4fd95-524f-46af-83c1-0305b239338f]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_gw[id-e41a4888-65a6-418c-a095-f7c2ef4ad59a]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_slaac[id-176b030f-a923-4040-a755-9dc94329e60c]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_stateful[id-da40cd1b-a833-4354-9a85-cd9b8a3b74ca]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_stateless[id-7d410310-8c86-4902-adf9-865d08e31adb]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_list_subnet_with_no_gw64_one_network[id-a9653883-b2a4-469b-8c3c-4518430a7e55]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksTest.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_create_port_on_non_existent_network[id-13d3b106-47e6-4b9b-8d53-dae947f092fe,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_network[id-03795047-4a94-4120-a0a1-bd376e36fd4e,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_port[id-49ec2bbd-ac2e-46fd-8054-798e679ff894,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_subnet[id-a176c859-99fb-42ec-a208-8a85b552a239,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_network[id-9293e937-824d-42d2-8d5b-e985ea67002a,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_port[id-a954861d-cbfd-44e8-b0a9-7fab111f235d,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_subnet[id-d746b40c-5e09-4043-99f7-cba1be8b70df,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_network[id-98bfe4e3-574e-4012-8b17-b2647063de87,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_port[id-cf8eef21-4351-4f53-adcd-cc5cb1e76b92,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_subnet[id-1cc47884-ac52-4415-a31c-e7ce5474a868,negative]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_create_port_binding_ext_attr[id-8e8569c1-9ac7-44db-8bc1-f5fb2814f29b]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_list_ports_binding_ext_attr[id-1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_show_port_binding_ext_attr[id-b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_update_port_binding_ext_attr[id-6f6c412c-711f-444d-8502-0ac30fbf5dd5]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_create_port_binding_ext_attr[id-8e8569c1-9ac7-44db-8bc1-f5fb2814f29b]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_list_ports_binding_ext_attr[id-1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_show_port_binding_ext_attr[id-b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_update_port_binding_ext_attr[id-6f6c412c-711f-444d-8502-0ac30fbf5dd5]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_show_delete_port_user_defined_mac[id-13e95171-6cbd-489c-9d7c-3f9c58215c18]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_port_with_second_ip[id-63aeadd4-3b49-427f-a3b1-19ca81f06270]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_port_list_filter_by_ip[id-e7fe260b-1e79-4dd3-86d9-bec6a7959fc5]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_port_list_filter_by_router_id[id-5ad01ed0-0e6e-4c5d-8194-232801b15c72]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_show_delete_port_user_defined_mac[id-13e95171-6cbd-489c-9d7c-3f9c58215c18]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_update_port_with_second_ip[id-63aeadd4-3b49-427f-a3b1-19ca81f06270]
-# tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-# tempest.api.network.test_ports.PortsTestJSON.test_port_list_filter_by_ip[id-e7fe260b-1e79-4dd3-86d9-bec6a7959fc5]
-# tempest.api.network.test_ports.PortsTestJSON.test_port_list_filter_by_router_id[id-5ad01ed0-0e6e-4c5d-8194-232801b15c72]
-# tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-# tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
-# tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
-# tempest.api.network.test_routers.DvrRoutersTest.test_convert_centralized_router[id-644d7a4a-01a1-4b68-bb8d-0c0042cb1729]
-# tempest.api.network.test_routers.DvrRoutersTest.test_create_distributed_router[id-141297aa-3424-455d-aa8d-f2d95731e00a]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_setting_project_id[id-e54dd3a3-4352-4921-b09d-44369ae17397]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_with_default_snat_value[id-847257cc-6afd-4154-b8fb-af49f5670ce8]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_with_snat_explicit[id-ea74068d-09e9-4fd7-8995-9b6a1ace920f]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_router_interface_port_update_with_fixed_ip[id-96522edf-b4b5-45d9-8443-fa11c26e6eff]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_delete_extra_route[id-c86ac3a8-50bd-4b00-a6b8-62af84a0765c]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_admin_state[id-a8902683-c788-4246-95c7-ad9c6d63a4d9]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_reset_gateway_without_snat[id-f2faf994-97f4-410b-a831-9bc977b64374]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway[id-6cc285d8-46bf-4f36-9b1a-783e3008ba79]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway_with_snat_explicit[id-b386c111-3b21-466d-880c-5e72b01e1a33]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway_without_snat[id-96536bc7-8262-4fb2-9967-5c46940fa279]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_unset_gateway[id-ad81b7ee-4f81-407b-a19c-17e623f763e8]
-# tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_setting_project_id[id-e54dd3a3-4352-4921-b09d-44369ae17397]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_with_default_snat_value[id-847257cc-6afd-4154-b8fb-af49f5670ce8]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_with_snat_explicit[id-ea74068d-09e9-4fd7-8995-9b6a1ace920f]
-# tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_router_interface_port_update_with_fixed_ip[id-96522edf-b4b5-45d9-8443-fa11c26e6eff]
-# tempest.api.network.test_routers.RoutersTest.test_update_delete_extra_route[id-c86ac3a8-50bd-4b00-a6b8-62af84a0765c]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_admin_state[id-a8902683-c788-4246-95c7-ad9c6d63a4d9]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_reset_gateway_without_snat[id-f2faf994-97f4-410b-a831-9bc977b64374]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway[id-6cc285d8-46bf-4f36-9b1a-783e3008ba79]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway_with_snat_explicit[id-b386c111-3b21-466d-880c-5e72b01e1a33]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway_without_snat[id-96536bc7-8262-4fb2-9967-5c46940fa279]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_unset_gateway[id-ad81b7ee-4f81-407b-a19c-17e623f763e8]
-# tempest.api.network.test_routers_negative.DvrRoutersNegativeTest.test_router_create_tenant_distributed_returns_forbidden[id-4990b055-8fc7-48ab-bba7-aa28beaad0b9,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_add_router_interfaces_on_overlapping_subnets_returns_400[id-957751a3-3c68-4fa2-93b6-eb52ea10db6e,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_delete_non_existent_router_returns_404[id-c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_add_gateway_invalid_network_returns_404[id-37a94fc0-a834-45b9-bd23-9a81d2fd1e22,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_add_gateway_net_not_external_returns_400[id-11836a18-0b15-4327-a50b-f0d9dc66bddd,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_remove_interface_in_use_returns_409[id-04df80f9-224d-47f5-837a-bf23e33d1c20,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_show_non_existent_router_returns_404[id-c2a70d72-8826-43a7-8208-0209e6360c47,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_update_non_existent_router_returns_404[id-b23d1569-8b0c-4169-8d4b-6abd34fad5c7,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_add_router_interfaces_on_overlapping_subnets_returns_400[id-957751a3-3c68-4fa2-93b6-eb52ea10db6e,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_delete_non_existent_router_returns_404[id-c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_add_gateway_invalid_network_returns_404[id-37a94fc0-a834-45b9-bd23-9a81d2fd1e22,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_add_gateway_net_not_external_returns_400[id-11836a18-0b15-4327-a50b-f0d9dc66bddd,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_remove_interface_in_use_returns_409[id-04df80f9-224d-47f5-837a-bf23e33d1c20,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_show_non_existent_router_returns_404[id-c2a70d72-8826-43a7-8208-0209e6360c47,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_update_non_existent_router_returns_404[id-b23d1569-8b0c-4169-8d4b-6abd34fad5c7,negative]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_remote_ip_and_group[id-b5c4b247-6b02-435b-b088-d10d45650881,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_wrong_ip_prefix_version[id-7607439c-af73-499e-bf64-f687fd12a842,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_remote_ip_and_group[id-b5c4b247-6b02-435b-b088-d10d45650881,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6,negative]
-# tempest.api.network.test_service_type_management.ServiceTypeManagementTestJSON.test_service_provider_list[id-2cbbeea9-f010-40f6-8df5-4eaa0c918ea6]
-# tempest.api.network.test_subnetpools_extensions.SubnetPoolsTestJSON.test_create_list_show_update_delete_subnetpools[id-62595970-ab1c-4b7f-8fcc-fddfe55e9811,smoke]
diff --git a/tricircle/tempestplugin/tempest_scenario.sh b/tricircle/tempestplugin/tempest_scenario.sh
deleted file mode 100755
index 2915c477..00000000
--- a/tricircle/tempestplugin/tempest_scenario.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-# tempest.scenario.test_aggregates_basic_ops.TestAggregatesBasicOps.test_aggregate_basic_ops[compute,id-cb2b4c4f-0c7c-4164-bdde-6285b302a081]
-# tempest.scenario.test_baremetal_basic_ops.BaremetalBasicOps.test_baremetal_server_ops[baremetal,compute,id-549173a5-38ec-42bb-b0e2-c8b9f4a08943,image,network]
-# tempest.scenario.test_dashboard_basic_ops.TestDashboardBasicOps.test_basic_scenario[dashboard,id-4f8851b1-0e69-482b-b63b-84c6e76f6c80]
-# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup[compute,id-cbc752ed-b716-4717-910f-956cce965722,image,volume]
-# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks[compute,id-79165fb4-5534-4b9d-8429-97ccffb8f86e,image,volume]
-# tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario[compute,id-bdbb5441-9204-419d-a225-b4fdbfb1a1a8,image,network,volume]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_pause_unpause[compute,id-2b2642db-6568-4b35-b812-eceed3fa20ce,network]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_reboot[compute,id-7b6860c2-afa3-4846-9522-adeb38dfbe08,network]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_rebuild[compute,id-88a529c2-1daa-4c85-9aec-d541ba3eb699,network]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_resize[compute,id-719eb59d-2f42-4b66-b8b1-bb1254473967,network]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_stop_start[compute,id-61f1aa9a-1573-410e-9054-afa557cab021,network,stress]
-# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume[compute,id-5cdf9499-541d-4923-804e-b9a60620a7f0,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_connectivity_between_vms_on_different_networks[compute,id-1546850e-fbaa-42f5-8b5f-03d8a6a95f15,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_hotplug_nic[compute,id-c5adff73-e961-41f1-b4a9-343614f18cfa,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port[compute,id-7c0bb1a2-d053-49a4-98f9-ca1a1d849f63,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_preserve_preexisting_port[compute,id-759462e1-8535-46b0-ab3a-33aa45c55aaa,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_router_rescheduling[compute,id-2e788c46-fb3f-4ac9-8f82-0561555bea73,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_subnet_details[compute,id-d8bb918e-e2df-48b2-97cd-b73c95450980,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_instance_port_admin_state[compute,id-f5dfcc22-45fd-409f-954c-5bd500d7890b,network]
-# tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state[compute,id-04b9fe4e-85e8-4aea-b937-ea93885ac59f,network]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os[compute,id-d7e1f858-187c-45a6-89c9-bdafde619a9f,network,slow]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os[compute,id-76f26acd-9688-42b4-bc3e-cd134c4cb09e,network,slow]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless[compute,id-cf1c4425-766b-45b8-be35-e2959728eb00,network]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac[compute,id-9178ad42-10e4-47e9-8987-e02b170cc5cd,network]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os[compute,id-b6399d76-4438-4658-bcf5-0d6c8584fde2,network,slow]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless[compute,id-7ab23f41-833b-4a16-a7c9-5b42fe6d4123,network,slow]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac[compute,id-dec222b1-180c-4098-b8c5-cc1b8342d611,network,slow]
-# tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os[compute,id-2c92df61-29f0-4eaa-bee3-7c65bef62a43,network,slow]
-# tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download[id-916c7111-cb1f-44b2-816d-8f760e4ea910,object_storage]
-# tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops[id-b920faf1-7b8a-4657-b9fe-9c4512bfb381,object_storage]
-# tempest.scenario.test_object_storage_telemetry_middleware.TestObjectStorageTelemetry.test_swift_middleware_notifies[id-6d6b88e5-3e38-41bc-b34a-79f713a6cb84,object_storage,telemetry]
-# tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_cross_tenant_traffic[compute,id-e79f879e-debb-440c-a7e4-efeda05b6848,network]
-# tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_in_tenant_traffic[compute,id-63163892-bbf6-4249-aa12-d5ea1f8f421b,network]
-# tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_multiple_security_groups[compute,id-d2f77418-fcc4-439d-b935-72eca704e293,network]
-# tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_security_disable_security_group[compute,id-7c811dcc-263b-49a3-92d2-1b4d8405f50c,network]
-# tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_update_new_security_group[compute,id-f4d556d7-1526-42ad-bafb-6bebf48568f6,network]
-# tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_resize_volume_backed_server_confirm[compute,id-e6c28180-7454-4b59-b188-0257af08a63b,volume]
-# tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_server_sequence_suspend_resume[compute,id-949da7d5-72c8-4808-8802-e3d70df98e2c]
-# tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
-# tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes[compute,id-9cecbe35-b9d4-48da-a37e-7ce70aa43d30,network,smoke]
-# tempest.scenario.test_shelve_instance.TestShelveInstance.test_shelve_instance[compute,id-1164e700-0af0-4a4c-8792-35909a88743c,image,network]
-# tempest.scenario.test_shelve_instance.TestShelveInstance.test_shelve_volume_backed_instance[compute,id-c1b6318c-b9da-490b-9c67-9339b627271f,image,network,volume]
-# tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern[compute,id-608e604b-1d63-4a82-8e3e-91bc665c90b4,image,network]
-# tempest.scenario.test_stamp_pattern.TestStampPattern.test_stamp_pattern[compute,id-10fd234a-515c-41e5-b092-8323060598c5,image,network,volume]
-# tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_ebs_image_and_check_boot[compute,id-36c34c67-7b54-4b59-b188-02a2f458a63b,image,volume]
-# tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
-# tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_create_ebs_image_and_check_boot[compute,id-36c34c67-7b54-4b59-b188-02a2f458a63b,image,volume]
-# tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
diff --git a/tricircle/tempestplugin/trunk_test.yaml b/tricircle/tempestplugin/trunk_test.yaml
deleted file mode 100644
index 82e8beff..00000000
--- a/tricircle/tempestplugin/trunk_test.yaml
+++ /dev/null
@@ -1,296 +0,0 @@
-- task_set_id: preparation
- tasks:
- - task_id: image1
- type: image
- region: region1
- query:
- get_one: true
- - task_id: net1
- region: central
- type: network
- params:
- name: net1
- provider_network_type: vlan
- - task_id: subnet1
- region: central
- type: subnet
- depend: [net1]
- params:
- name: subnet1
- ip_version: 4
- cidr: 10.0.1.0/24
- network_id: net1@id
- - task_id: net2
- region: central
- type: network
- params:
- name: net2
- provider_network_type: vlan
- - task_id: subnet2
- region: central
- type: subnet
- depend: [net2]
- params:
- name: subnet2
- ip_version: 4
- cidr: 10.0.2.0/24
- network_id: net2@id
- - task_id: p1
- region: central
- type: port
- depend:
- - net1
- - subnet1
- params:
- name: p1
- network_id: net1@id
- - task_id: p2
- region: central
- type: port
- depend:
- - net2
- - subnet2
- params:
- name: p2
- network_id: net2@id
- - task_id: vm1
- region: region1
- type: server
- depend:
- - p1
- - image1
- - trunk1
- params:
- flavor_id: 1
- image_id: image1@id
- name: vm1
- networks:
- - port: p1@id
- - task_id: trunk1
- region: central
- depend:
- - p1
- - p2
- - net2
- type: trunk
- params:
- name: trunk1
- port_id: p1@id
- sub_ports:
- - port_id: p2@id
- segmentation_type: vlan
- segmentation_id: net2@provider_segmentation_id
-- task_set_id: wait-for-job
- tasks:
- # ensure server is active and thus sync_trunk job is registered
- - task_id: check-servers
- region: region1
- type: server
- validate:
- predicate: any
- retries: 10
- condition:
- - status: ACTIVE
- name: vm1
- - task_id: check-job
- region: central
- type: job
- depend: [check-servers]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
-- task_set_id: check
- depend: [preparation]
- tasks:
- - task_id: check-ports
- region: region1
- type: port
- validate:
- predicate: any
- condition:
- - name: p1
- fixed_ips:
- - ip_address: 10.0.1*
- - name: p2
- fixed_ips:
- - ip_address: 10.0.2*
- - task_id: check-trunks
- region: region1
- type: trunk
- validate:
- predicate: any
- condition:
- - name: trunk1
- port_id: preparation@p1@id
- sub_ports:
- - port_id: preparation@p2@id
- segmentation_type: vlan
- segmentation_id: preparation@net2@provider_segmentation_id
-- task_set_id: remove-subports
- depend: [preparation]
- tasks:
- - task_id: remove-subport
- region: central
- type: trunk
- action:
- target: preparation@trunk1@id
- method: remove_subports
- params:
- subports:
- - port_id: preparation@p2@id
- - task_id: check-job
- region: central
- type: job
- depend: [remove-subport]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check-remove-subport
- region: region1
- type: trunk
- depend: [check-job]
- validate:
- predicate: any
- condition:
- - name: trunk1
- port_id: preparation@p1@id
- sub_ports: []
-- task_set_id: add-subports
- depend: [preparation]
- tasks:
- - task_id: add-subport
- region: central
- type: trunk
- action:
- target: preparation@trunk1@id
- method: add_subports
- params:
- subports:
- - port_id: preparation@p2@id
- segmentation_type: vlan
- segmentation_id: preparation@net2@provider_segmentation_id
- - task_id: check-job
- region: central
- type: job
- depend: [add-subport]
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
- - task_id: check-add-subport
- region: region1
- type: trunk
- depend: [check-job]
- validate:
- predicate: any
- condition:
- - name: trunk1
- port_id: preparation@p1@id
- sub_ports:
- - port_id: preparation@p2@id
- segmentation_type: vlan
- segmentation_id: preparation@net2@provider_segmentation_id
-- task_set_id: clean
- depend: [preparation]
- tasks:
- - task_id: delete-server
- region: region1
- type: server
- action:
- target: preparation@vm1@id
- method: delete
- - task_id: delete-trunk
- region: central
- type: trunk
- depend: [delete-server]
- action:
- target: preparation@trunk1@id
- method: delete
- retries: 3
- - task_id: delete-p1
- region: central
- type: port
- depend: [delete-trunk]
- action:
- target: preparation@p1@id
- method: delete
- - task_id: delete-p2
- region: central
- type: port
- depend: [delete-trunk]
- action:
- target: preparation@p2@id
- method: delete
- - task_id: delete-subnet1
- region: central
- type: subnet
- depend: [delete-p1]
- action:
- target: preparation@subnet1@id
- method: delete
- retries: 3
- - task_id: delete-subnet2
- region: central
- type: subnet
- depend: [delete-p2]
- action:
- target: preparation@subnet2@id
- method: delete
- retries: 3
- - task_id: delete-net1
- region: central
- type: network
- depend: [delete-subnet1]
- action:
- target: preparation@net1@id
- method: delete
- - task_id: delete-net2
- region: central
- type: network
- depend: [delete-subnet2]
- action:
- target: preparation@net2@id
- method: delete
-- task_set_id: clean-check
- tasks:
- - task_id: check-no-trunks1
- region: region1
- type: trunk
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-trunks2
- region: region2
- type: trunk
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks1
- region: region1
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-no-networks2
- region: region2
- type: network
- validate:
- predicate: all
- condition:
- - name: invalid-name
- - task_id: check-jobs
- region: central
- type: job
- validate:
- predicate: all
- retries: 10
- condition:
- - status: SUCCESS
diff --git a/tricircle/tests/__init__.py b/tricircle/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/base.py b/tricircle/tests/base.py
deleted file mode 100644
index 6cc4741b..00000000
--- a/tricircle/tests/base.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.conf import common as n_conf
-from oslo_config import cfg
-from oslotest import base
-
-
-CONFLICT_OPT_NAMES = [
- 'api_extensions_path',
- 'bind_port',
- 'bind_host',
- 'allow_pagination',
- 'allow_sorting',
- 'pagination_max_limit',
-]
-
-
-class TestCase(base.BaseTestCase):
- """Test case base class for all unit tests."""
- def setUp(self):
- # neutron has configuration options "api_extensions_path",
- # "bind_port" and "bind_host"which conflicts with tricircle
- # configuration option, so unregister this option before
- # running tricircle tests
- for opt in n_conf.core_opts:
- if opt.name in CONFLICT_OPT_NAMES:
- cfg.CONF.unregister_opt(opt)
- super(TestCase, self).setUp()
diff --git a/tricircle/tests/functional/__init__.py b/tricircle/tests/functional/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/functional/api/__init__.py b/tricircle/tests/functional/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/functional/api/controllers/__init__.py b/tricircle/tests/functional/api/controllers/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/functional/api/controllers/test_job.py b/tricircle/tests/functional/api/controllers/test_job.py
deleted file mode 100755
index 0f3a967a..00000000
--- a/tricircle/tests/functional/api/controllers/test_job.py
+++ /dev/null
@@ -1,783 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-from mock import patch
-from oslo_config import cfg
-from oslo_config import fixture as fixture_config
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-from six.moves import xrange
-
-import pecan
-from pecan.configuration import set_config
-from pecan.testing import load_test_app
-
-from tricircle.api import app
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import policy
-from tricircle.common import xrpcapi
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.tests import base
-
-
-OPT_GROUP_NAME = 'keystone_authtoken'
-cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
-
-
-def fake_admin_context():
- context_paras = {'is_admin': True}
- return context.Context(**context_paras)
-
-
-def fake_non_admin_context():
- context_paras = {}
- return context.Context(**context_paras)
-
-
-class API_FunctionalTest(base.TestCase):
-
- def setUp(self):
- super(API_FunctionalTest, self).setUp()
-
- self.addCleanup(set_config, {}, overwrite=True)
-
- cfg.CONF.clear()
- cfg.CONF.register_opts(app.common_opts)
-
- self.CONF = self.useFixture(fixture_config.Config()).conf
-
- self.CONF.set_override('auth_strategy', 'noauth')
- self.CONF.set_override('tricircle_db_connection', 'sqlite://')
-
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
-
- self.context = context.get_admin_context()
-
- policy.populate_default_rules()
-
- self.app = self._make_app()
-
- def _make_app(self, enable_acl=False):
- self.config = {
- 'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
- 'enable_acl': enable_acl,
- },
- }
-
- return load_test_app(self.config)
-
- def tearDown(self):
- super(API_FunctionalTest, self).tearDown()
- cfg.CONF.unregister_opts(app.common_opts)
- pecan.set_config({}, overwrite=True)
- core.ModelBase.metadata.drop_all(core.get_engine())
- policy.reset()
-
-
-class TestAsyncJobController(API_FunctionalTest):
- """Test version listing on root URI."""
-
- def setUp(self):
- super(TestAsyncJobController, self).setUp()
- self.job_resource_map = constants.job_resource_map
- self.all_job_types = list(self.job_resource_map.keys())
-
- def fake_new_job(context, project_id, type, resource_id):
- raise Exception
-
- def fake_invoke_method(self, context, project_id, method, type, id):
- db_api.new_job(context, project_id, type, id)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_no_input(self):
- job = self._prepare_job_element(constants.JT_CONFIGURE_ROUTE)
-
- jobs = [
- # missing job
- {
- "job_xxx": job,
- "expected_error": 400
- },
- ]
-
- for test_job in jobs:
- response = self.app.post_json(
- '/v1.0/jobs',
- dict(job_xxx=test_job['job_xxx']),
- expect_errors=True)
-
- self.assertEqual(response.status_int,
- test_job['expected_error'])
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- @patch.object(db_api, 'new_job',
- new=fake_new_job)
- def test_post_exception(self):
- job = self._prepare_job_element(constants.JT_CONFIGURE_ROUTE)
-
- jobs = [
- {
- "job": job,
- "expected_error": 500
- },
- ]
- self._test_and_check(jobs)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_invalid_input(self):
- for job_type in self.all_job_types:
- job = self._prepare_job_element(job_type)
-
- # wrong job type parameter: no job type is provided
- job_1 = copy.deepcopy(job)
- job_1.pop('type')
-
- # wrong job type parameter: job type is empty
- job_2 = copy.deepcopy(job)
- job_2['type'] = ''
-
- # wrong job type parameter: job type is wrong
- job_3 = copy.deepcopy(job)
- job_3['type'] = job['type'] + '_1'
-
- # wrong resource parameter: no resource is provided
- job_4 = copy.deepcopy(job)
- job_4.pop('resource')
-
- # wrong resource parameter: lack of necessary resource
- job_5 = copy.deepcopy(job)
- job_5['resource'].popitem()
-
- # wrong resource parameter: redundant resource
- job_6 = copy.deepcopy(job)
- job_6['resource']['fake_resource'] = 'fake_resource'
-
- # wrong project id parameter: no project id is provided
- job_7 = copy.deepcopy(job)
- job_7.pop('project_id')
-
- # wrong project id parameter: project id is empty
- job_8 = copy.deepcopy(job)
- job_8['project_id'] = ''
-
- # wrong project id parameter: project is not the
- # owner of resource
- job_9 = copy.deepcopy(job)
- job_9['project_id'] = uuidutils.generate_uuid()
-
- jobs = [
- {
- "job": job_1,
- "expected_error": 400
- },
- {
- "job": job_2,
- "expected_error": 400
- },
- {
- "job": job_3,
- "expected_error": 400
- },
- {
- "job": job_4,
- "expected_error": 400
- },
- {
- "job": job_5,
- "expected_error": 400
- },
- {
- "job": job_6,
- "expected_error": 400
- },
- {
- "job": job_7,
- "expected_error": 400
- },
- {
- "job": job_8,
- "expected_error": 400
- },
- {
- "job": job_9,
- "expected_error": 400
- },
- ]
-
- self._test_and_check(jobs)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- @patch.object(xrpcapi.XJobAPI, 'invoke_method',
- new=fake_invoke_method)
- def test_post_job(self):
- for job_type in self.all_job_types:
- job = self._prepare_job_element(job_type)
-
- jobs = [
- # create an entirely new job
- {
- "job": job,
- "expected_error": 200
- },
- # target job already exists in the job table and its status
- # is NEW, then this newer job will be picked by job handler.
- {
- "job": job,
- "expected_error": 200
- },
- ]
-
- self._test_and_check(jobs)
-
- @patch.object(xrpcapi.XJobAPI, 'invoke_method',
- new=fake_invoke_method)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_one_and_get_all(self, mock_context):
- self.context.project_id = "fake_project_id"
- mock_context.return_value = self.context
-
- all_job_ids = {}
- all_job_project_ids = {}
- index = 0
- for job_type in self.all_job_types:
- if index == 0:
- # the first job has a project ID that differs from
- # context.project_id
- job = self._prepare_job_element(job_type)
- else:
- job = self._prepare_job_element(job_type,
- self.context.project_id)
-
- job = {"job": job, "expected_error": 200}
-
- back_jobid = self._test_and_obtain_id(job)
-
- all_job_ids[index] = back_jobid
- all_job_project_ids[job_type] = job['job']['project_id']
-
- index = index + 1
- service_uris = ['jobs', 'jobs/detail']
- amount_of_all_jobs = len(self.all_job_types)
- # with no filters all jobs are returned
- for service_uri in service_uris:
- response_1 = self.app.get('/v1.0/%(service_uri)s' % {
- 'service_uri': service_uri})
- return_jobs_1 = response_1.json
-
- self.assertEqual(amount_of_all_jobs - 1,
- len(return_jobs_1['jobs']))
- self.assertIn('status', response_1)
- self.assertIn('resource', response_1)
- self.assertIn('project_id', response_1)
- self.assertIn('id', response_1)
- self.assertIn('timestamp', response_1)
- self.assertIn('type', response_1)
-
- self.assertNotIn('extra_id', response_1)
- self.assertNotIn('resource_id', response_1)
-
- # use job status filter
- response_2 = self.app.get('/v1.0/jobs?status=new')
- return_jobs_2 = response_2.json
-
- self.assertEqual(amount_of_all_jobs - 1, len(return_jobs_2['jobs']))
-
- response = self.app.get('/v1.0/jobs?status=fail')
- return_jobs_3 = response.json
-
- self.assertEqual(0, len(return_jobs_3['jobs']))
-
- amount_of_fail_jobs = int(amount_of_all_jobs / 3)
- for i in xrange(amount_of_fail_jobs):
- db_api.finish_job(self.context,
- all_job_ids[i], False,
- timeutils.utcnow())
-
- amount_of_succ_jobs = int(amount_of_all_jobs / 3)
- for i in xrange(amount_of_succ_jobs):
- db_api.finish_job(self.context,
- all_job_ids[amount_of_fail_jobs + i], True,
- timeutils.utcnow())
-
- for service_uri in service_uris:
- response = self.app.get('/v1.0/%(service_uri)s?status=fail' % {
- 'service_uri': service_uri})
- return_jobs = response.json
-
- self.assertEqual(amount_of_fail_jobs - 1, len(return_jobs['jobs']))
-
- response = self.app.get('/v1.0/%(service_uri)s?status=success'
- '' % {'service_uri': service_uri})
- return_jobs = response.json
-
- self.assertEqual(amount_of_succ_jobs, len(return_jobs['jobs']))
-
- # project ID filter in URL query string will be ignored, and
- # only the project ID in which the user is authorized will
- # be used as filter.
- response = self.app.get(
- '/v1.0/%(service_uri)s' % {'service_uri': service_uri})
- return_job = response.json
-
- response1 = self.app.get(
- '/v1.0/%(service_uri)s?project_id=%(project_id)s' % {
- 'service_uri': service_uri,
- 'project_id': uuidutils.generate_uuid()})
- return_job1 = response1.json
-
- response2 = self.app.get(
- '/v1.0/%(service_uri)s?project_id=%(project_id)s' % {
- 'service_uri': service_uri,
- 'project_id': 'fake_project_id'})
- return_job2 = response2.json
-
- self.assertEqual(len(return_job2['jobs']),
- len(return_job1['jobs']))
- self.assertEqual(len(return_job['jobs']),
- len(return_job2['jobs']))
-
- # use job type filter
- count = 1
- for job_type in self.all_job_types:
- response = self.app.get('/v1.0/%(service_uri)s?type=%(type)s'
- '' % {'service_uri': service_uri,
- 'type': job_type})
- return_job = response.json
- if count == 1:
- self.assertEqual(0, len(return_job['jobs']))
- else:
- self.assertEqual(1, len(return_job['jobs']))
- count += 1
-
- # combine job type and job status filter
- for i in xrange(1, amount_of_all_jobs):
- if i < amount_of_fail_jobs:
- # this aims to test service "/v1.0/jobs/{id}"
- response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
- 'id': all_job_ids[i]})
- return_job_1 = response_1.json
-
- response_2 = self.app.get(
- '/v1.0/%(service_uri)s?'
- 'type=%(type)s&'
- 'status=%(status)s' % {
- 'service_uri': service_uri,
- 'type': return_job_1['job']['type'],
- 'status': 'fail'})
-
- return_job_2 = response_2.json
-
- self.assertEqual(1, len(return_job_2['jobs']))
-
- elif ((i >= amount_of_fail_jobs
- ) and (i < amount_of_fail_jobs + amount_of_succ_jobs)):
- # those jobs are set to 'success' and they are moved to
- # job log. their job ids are not stored in all_job_ids
- job_type = self.all_job_types[i]
- response = self.app.get(
- '/v1.0/%(service_uri)s?'
- 'type=%(type)s&status=%(status)s' % {
- 'service_uri': service_uri,
- 'type': job_type,
- 'status': 'success'})
-
- return_job = response.json
-
- self.assertEqual(1, len(return_job['jobs']))
-
- response_2 = self.app.get(
- '/v1.0/%(service_uri)s?status=%(status)s'
- '&type=%(type)s' % {
- 'service_uri': service_uri,
- 'status': "success-x",
- 'type': job_type})
- return_job_2 = response_2.json
- self.assertEqual(0, len(return_job_2['jobs']))
-
- else:
- response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
- 'id': all_job_ids[i]})
- return_job_1 = response_1.json
-
- response_2 = self.app.get(
- '/v1.0/%(service_uri)s?'
- 'type=%(type)s&status=%(status)s' % {
- 'service_uri': service_uri,
- 'type': return_job_1['job']['type'],
- 'status': 'new'})
-
- return_job_2 = response_2.json
-
- self.assertEqual(1, len(return_job_2['jobs']))
-
- response_3 = self.app.get(
- '/v1.0/%(service_uri)s?status=%(status)s'
- '&type=%(type)s' % {
- 'service_uri': service_uri,
- 'status': "new-x",
- 'type': return_job_1['job']['type']})
- return_job_3 = response_3.json
- self.assertEqual(0, len(return_job_3['jobs']))
-
- # use unsupported filter, it will raise 400 error
- response = self.app.get('/v1.0/%(service_uri)s?'
- 'fake_filter=%(fake_filter)s'
- '' % {'service_uri': service_uri,
- 'fake_filter': "fake_filter"},
- expect_errors=True)
-
- self.assertEqual(response.status_int, 400)
-
- # use invalid filter, it will return empty set
- response = self.app.get('/v1.0/%(service_uri)s?status=%(status)s'
- '' % {'service_uri': service_uri,
- 'status': "new-x"})
- return_job = response.json
- self.assertEqual(0, len(return_job['jobs']))
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_get_job_schemas(self):
- response = self.app.get('/v1.0/jobs/schemas')
- return_job_schemas = response.json
-
- job_schemas = []
- for job_type in self.all_job_types:
- job = {}
- resource = []
- for resource_type, resource_id in (
- self.job_resource_map[job_type]):
- resource.append(resource_id)
- job['resource'] = resource
- job['type'] = job_type
- job_schemas.append(job)
-
- self.assertEqual(job_schemas, return_job_schemas['schemas'])
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- @patch.object(xrpcapi.XJobAPI, 'invoke_method',
- new=fake_invoke_method)
- def test_delete_job(self):
-
- for job_type in self.all_job_types:
- job = self._prepare_job_element(job_type)
-
- jobs = [
- {
- "job": job,
- "expected_error": 200
- },
- ]
-
- self._test_and_check(jobs)
-
- response = self.app.get('/v1.0/jobs')
- return_job = response.json
-
- jobs = return_job['jobs']
-
- # delete a new job
- for job in jobs:
- response_1 = self.app.delete(
- '/v1.0/jobs/%(id)s' % {'id': job['id']},
- expect_errors=True)
- return_value_1 = response_1.json
-
- self.assertEqual(response_1.status_int, 200)
- self.assertEqual(return_value_1, {})
-
- response_2 = self.app.get('/v1.0/jobs')
- return_job_2 = response_2.json
- self.assertEqual(0, len(return_job_2['jobs']))
-
- response_3 = self.app.delete('/v1.0/jobs/123', expect_errors=True)
- self.assertEqual(response_3.status_int, 404)
-
- # delete a running job
- job_type_4 = constants.JT_NETWORK_UPDATE
- job_4 = self._prepare_job_element(job_type_4)
- resource_id_4 = '#'.join([job_4['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type_4]])
- job_running_4 = db_api.register_job(self.context,
- job_4['project_id'],
- job_type_4,
- resource_id_4)
-
- self.assertEqual(constants.JS_Running, job_running_4['status'])
- response_4 = self.app.delete('/v1.0/jobs/%(id)s' % {
- 'id': job_running_4['id']}, expect_errors=True)
-
- self.assertEqual(response_4.status_int, 400)
-
- # delete a failed job
- job_type_5 = constants.JT_NETWORK_UPDATE
- job_5 = self._prepare_job_element(job_type_5)
-
- job_dict_5 = {
- "job": job_5,
- "expected_error": 200
- }
-
- response_5 = self.app.post_json('/v1.0/jobs',
- dict(job=job_dict_5['job']),
- expect_errors=True)
- return_job_5 = response_5.json
-
- self.assertEqual(response_5.status_int, 200)
-
- db_api.finish_job(self.context,
- return_job_5['job']['id'],
- False, timeutils.utcnow())
-
- job_fail_5 = db_api.get_job(self.context, return_job_5['job']['id'])
- self.assertEqual(constants.JS_Fail, job_fail_5['status'])
- response_6 = self.app.delete('/v1.0/jobs/%(id)s' % {
- 'id': return_job_5['job']['id']}, expect_errors=True)
-
- self.assertEqual(response_6.status_int, 200)
-
- # delete a successful job
- job_type_6 = constants.JT_NETWORK_UPDATE
- job_6 = self._prepare_job_element(job_type_6)
-
- job_dict_6 = {
- "job": job_6,
- "expected_error": 200
- }
-
- response_6 = self.app.post_json('/v1.0/jobs',
- dict(job=job_dict_6['job']),
- expect_errors=True)
- return_job_6 = response_6.json
-
- with self.context.session.begin():
- job_dict = {'status': constants.JS_Success,
- 'timestamp': timeutils.utcnow(),
- 'extra_id': uuidutils.generate_uuid()}
- core.update_resource(self.context, models.AsyncJob,
- return_job_6['job']['id'], job_dict)
-
- job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
- self.assertEqual(constants.JS_Success, job_succ_6['status'])
- response_7 = self.app.delete('/v1.0/jobs/%(id)s' % {
- 'id': return_job_6['job']['id']}, expect_errors=True)
-
- self.assertEqual(response_7.status_int, 200)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- @patch.object(xrpcapi.XJobAPI, 'invoke_method',
- new=fake_invoke_method)
- def test_redo_job(self):
-
- for job_type in self.all_job_types:
- job = self._prepare_job_element(job_type)
-
- jobs = [
- # create an entirely new job
- {
- "job": job,
- "expected_error": 200
- },
- ]
-
- self._test_and_check(jobs)
-
- response = self.app.get('/v1.0/jobs')
- return_job = response.json
-
- jobs = return_job['jobs']
-
- # redo a new job
- for job in jobs:
- response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']},
- expect_errors=True)
-
- self.assertEqual(response_1.status_int, 200)
-
- response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True)
- self.assertEqual(response_2.status_int, 404)
-
- # redo a running job
- job_type_3 = constants.JT_NETWORK_UPDATE
- job_3 = self._prepare_job_element(job_type_3)
- resource_id_3 = '#'.join([job_3['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type_3]])
- job_running_3 = db_api.register_job(self.context,
- job_3['project_id'],
- job_type_3,
- resource_id_3)
-
- self.assertEqual(constants.JS_Running, job_running_3['status'])
- response_3 = self.app.put('/v1.0/jobs/%(id)s' % {
- 'id': job_running_3['id']}, expect_errors=True)
-
- self.assertEqual(response_3.status_int, 400)
-
- # redo a failed job
- job_type_4 = constants.JT_NETWORK_UPDATE
- job_4 = self._prepare_job_element(job_type_4)
-
- job_dict_4 = {
- "job": job_4,
- "expected_error": 200
- }
-
- response_4 = self.app.post_json('/v1.0/jobs',
- dict(job=job_dict_4['job']),
- expect_errors=True)
- return_job_4 = response_4.json
-
- self.assertEqual(response_4.status_int, 200)
-
- db_api.finish_job(self.context,
- return_job_4['job']['id'],
- False, timeutils.utcnow())
-
- job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id'])
- self.assertEqual(constants.JS_Fail, job_fail_4['status'])
- response_5 = self.app.put('/v1.0/jobs/%(id)s' % {
- 'id': return_job_4['job']['id']}, expect_errors=True)
-
- self.assertEqual(response_5.status_int, 200)
-
- # redo a successful job
- job_type_6 = constants.JT_NETWORK_UPDATE
- job_6 = self._prepare_job_element(job_type_6)
-
- job_dict_6 = {
- "job": job_6,
- "expected_error": 200
- }
-
- response_6 = self.app.post_json('/v1.0/jobs',
- dict(job=job_dict_6['job']),
- expect_errors=True)
- return_job_6 = response_6.json
-
- with self.context.session.begin():
- job_dict = {'status': constants.JS_Success,
- 'timestamp': timeutils.utcnow(),
- 'extra_id': uuidutils.generate_uuid()}
- core.update_resource(self.context, models.AsyncJob,
- return_job_6['job']['id'], job_dict)
-
- job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
- self.assertEqual(constants.JS_Success, job_succ_6['status'])
- response_7 = self.app.put('/v1.0/jobs/%(id)s' % {
- 'id': return_job_6['job']['id']}, expect_errors=True)
-
- self.assertEqual(response_7.status_int, 400)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_non_admin_context)
- def test_non_admin_action(self):
- job_type = constants.JT_NETWORK_UPDATE
- job = self._prepare_job_element(job_type)
-
- jobs = [
- {
- "job": job,
- "expected_error": 403
- },
- ]
- self._test_and_check(jobs)
-
- response_1 = self.app.get('/v1.0/jobs/1234567890',
- expect_errors=True)
- self.assertEqual(response_1.status_int, 403)
-
- response_2 = self.app.get('/v1.0/jobs',
- expect_errors=True)
- self.assertEqual(response_2.status_int, 403)
-
- response_3 = self.app.delete('/v1.0/jobs/1234567890',
- expect_errors=True)
- self.assertEqual(response_3.status_int, 403)
-
- response_4 = self.app.put('/v1.0/jobs/1234567890',
- expect_errors=True)
- self.assertEqual(response_4.status_int, 403)
-
- def _test_and_check(self, jobs):
-
- for test_job in jobs:
- response = self.app.post_json(
- '/v1.0/jobs', dict(job=test_job['job']),
- expect_errors=True)
- self.assertEqual(response.status_int, test_job['expected_error'])
-
- def _test_and_obtain_id(self, job):
- response = self.app.post_json(
- '/v1.0/jobs', dict(job=job['job']),
- expect_errors=True)
- self.assertEqual(response.status_int, job['expected_error'])
-
- back_job = response.json
- return back_job['job']['id']
-
- def _prepare_job_element(self, job_type, project_id=None):
- # in order to create a job, we need three elements: job type,
- # job resource and project id. If project_id parameter is not
- # None then we create resource and job for that project,
- # or else we create resource and job for an entirely new project.
- if project_id is None:
- project_id = uuidutils.generate_uuid()
- job = {}
- job['resource'] = {}
- job['type'] = job_type
-
- # these two jobs need no resource routings. We only need to ensure
- # that job['resource']['project_id'] equals to job['project_id'], which
- # keeps consistent with job_primary_resource_map in common/constant.py
- if job_type in (constants.JT_SEG_RULE_SETUP,
- constants.JT_RESOURCE_RECYCLE):
- job['resource']['project_id'] = project_id
- else:
- for resource_type, resource_id in self.job_resource_map[job_type]:
- job['resource'][resource_id] = uuidutils.generate_uuid()
-
- self._create_resource_for_project(job, project_id)
- job['project_id'] = project_id
-
- return job
-
- def _create_resource_for_project(self, job, project_id):
- # create resource for project ${project_id}
- pod_id = uuidutils.generate_uuid()
-
- resource_type, resource_id = (
- constants.job_primary_resource_map[job['type']])
- routing = db_api.create_resource_mapping(
- self.context, job['resource'][resource_id],
- job['resource'][resource_id], pod_id, project_id,
- resource_type)
- self.assertIsNotNone(routing)
-
- def _validate_error_code(self, res, code):
- self.assertEqual(res[list(res.keys())[0]]['code'], code)
diff --git a/tricircle/tests/functional/api/controllers/test_pod.py b/tricircle/tests/functional/api/controllers/test_pod.py
deleted file mode 100644
index 7b12da01..00000000
--- a/tricircle/tests/functional/api/controllers/test_pod.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from mock import patch
-import pecan
-from pecan.configuration import set_config
-from pecan.testing import load_test_app
-
-from oslo_config import cfg
-from oslo_config import fixture as fixture_config
-import oslo_db.exception as db_exc
-
-from tricircle.api import app
-from tricircle.common import context
-from tricircle.common import policy
-from tricircle.db import core
-from tricircle.tests import base
-
-
-OPT_GROUP_NAME = 'keystone_authtoken'
-cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
-
-
-def fake_admin_context():
- context_paras = {'is_admin': True}
- return context.Context(**context_paras)
-
-
-def fake_non_admin_context():
- context_paras = {}
- return context.Context(**context_paras)
-
-
-class API_FunctionalTest(base.TestCase):
-
- def setUp(self):
- super(API_FunctionalTest, self).setUp()
-
- self.addCleanup(set_config, {}, overwrite=True)
-
- cfg.CONF.clear()
- cfg.CONF.register_opts(app.common_opts)
-
- self.CONF = self.useFixture(fixture_config.Config()).conf
-
- self.CONF.set_override('auth_strategy', 'noauth')
- self.CONF.set_override('tricircle_db_connection', 'sqlite://')
-
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
-
- self.context = context.get_admin_context()
-
- policy.populate_default_rules()
-
- self.app = self._make_app()
-
- def _make_app(self, enable_acl=False):
- self.config = {
- 'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
- 'enable_acl': enable_acl,
- 'errors': {
- 400: '/error',
- '__force_dict__': True
- }
- },
- }
-
- return load_test_app(self.config)
-
- def tearDown(self):
- super(API_FunctionalTest, self).tearDown()
- cfg.CONF.unregister_opts(app.common_opts)
- pecan.set_config({}, overwrite=True)
- core.ModelBase.metadata.drop_all(core.get_engine())
- policy.reset()
-
-
-class TestPodController(API_FunctionalTest):
- """Test version listing on root URI."""
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_no_input(self):
- pods = [
- # missing pod
- {
- "pod_xxx":
- {
- "dc_name": "dc1",
- "pod_az_name": "az1"
- },
- "expected_error": 400
- }]
-
- for test_pod in pods:
- response = self.app.post_json(
- '/v1.0/pods',
- dict(pod_xxx=test_pod['pod_xxx']),
- expect_errors=True)
-
- self.assertEqual(response.status_int,
- test_pod['expected_error'])
-
- def fake_create_ag_az(context, ag_name, az_name):
- raise db_exc.DBDuplicateEntry
-
- def fake_create_ag_az_exp(context, ag_name, az_name):
- raise Exception
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- @patch.object(core, 'create_resource',
- new=fake_create_ag_az_exp)
- def test_post_exception(self):
- pods = [
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": "AZ1"
- },
- "expected_error": 500
- },
- ]
-
- self._test_and_check(pods)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_invalid_input(self):
-
- pods = [
-
- # missing az and pod
- {
- "pod":
- {
- "dc_name": "dc1",
- "pod_az_name": "az1"
- },
- "expected_error": 422
- },
-
- # missing pod
- {
- "pod":
- {
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": "az1"
- },
- "expected_error": 422
- },
-
- # missing pod
- {
- "pod":
- {
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": "",
- },
- "expected_error": 422
- },
-
- # missing az
- {
- "pod":
- {
- "region_name": "",
- "pod_az_name": "az1",
- "dc_name": "dc1"
- },
- "expected_error": 422
- },
-
- # az & pod == ""
- {
- "pod":
- {
- "region_name": "",
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": ""
- },
- "expected_error": 422
- },
-
- # invalid pod
- {
- "pod":
- {
- "region_name": "",
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": "az1"
-
- },
- "expected_error": 422
- }
-
- ]
-
- self._test_and_check(pods)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_duplicate_top_region(self):
-
- pods = [
-
- # the first time to create TopRegion
- {
- "pod":
- {
- "region_name": "TopRegion",
- "pod_az_name": "az1",
- "dc_name": "dc1"
- },
- "expected_error": 200
- },
-
- {
- "pod":
- {
- "region_name": "TopRegion2",
- "pod_az_name": "",
- "dc_name": "dc1"
- },
- "expected_error": 409
- },
-
- ]
-
- self._test_and_check(pods)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_duplicate_pod(self):
-
- pods = [
-
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az1",
- "dc_name": "dc1",
- "az_name": "AZ1"
- },
- "expected_error": 200
- },
-
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az2",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 409
- },
-
- ]
-
- self._test_and_check(pods)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_post_pod_duplicate_top_region(self):
-
- pods = [
-
- # the first time to create TopRegion
- {
- "pod":
- {
- "region_name": "TopRegion",
- "pod_az_name": "az1",
- "dc_name": "dc1"
- },
- "expected_error": 200
- },
-
- {
- "pod":
- {
- "region_name": "TopRegion",
- "pod_az_name": "az2",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 409
- },
-
- ]
-
- self._test_and_check(pods)
-
- def _test_and_check(self, pods):
-
- for test_pod in pods:
- response = self.app.post_json(
- '/v1.0/pods',
- dict(pod=test_pod['pod']),
- expect_errors=True)
-
- self.assertEqual(response.status_int,
- test_pod['expected_error'])
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_get_all(self):
-
- pods = [
-
- # the first time to create TopRegion
- {
- "pod":
- {
- "region_name": "TopRegion",
- "pod_az_name": "",
- "dc_name": "dc1",
- "az_name": ""
- },
- "expected_error": 200
- },
-
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 200
- },
-
- {
- "pod":
- {
- "region_name": "Pod2",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 200
- },
-
- ]
-
- self._test_and_check(pods)
-
- response = self.app.get('/v1.0/pods')
-
- self.assertEqual(response.status_int, 200)
- self.assertIn('TopRegion', response)
- self.assertIn('Pod1', response)
- self.assertIn('Pod2', response)
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_admin_context)
- def test_get_delete_one(self):
-
- pods = [
-
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 200,
- },
-
- {
- "pod":
- {
- "region_name": "Pod2",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 200,
- },
-
- {
- "pod":
- {
- "region_name": "Pod3",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ2"
- },
- "expected_error": 200,
- },
-
- ]
-
- self._test_and_check(pods)
-
- response = self.app.get('/v1.0/pods')
- self.assertEqual(response.status_int, 200)
-
- return_pods = response.json
-
- for ret_pod in return_pods['pods']:
-
- _id = ret_pod['pod_id']
- single_ret = self.app.get('/v1.0/pods/' + str(_id))
-
- self.assertEqual(single_ret.status_int, 200)
-
- one_pod_ret = single_ret.json
- get_one_pod = one_pod_ret['pod']
-
- self.assertEqual(get_one_pod['pod_id'],
- ret_pod['pod_id'])
-
- self.assertEqual(get_one_pod['region_name'],
- ret_pod['region_name'])
-
- self.assertEqual(get_one_pod['pod_az_name'],
- ret_pod['pod_az_name'])
-
- self.assertEqual(get_one_pod['dc_name'],
- ret_pod['dc_name'])
-
- self.assertEqual(get_one_pod['az_name'],
- ret_pod['az_name'])
-
- @patch.object(context, 'extract_context_from_environ',
- new=fake_non_admin_context)
- def test_non_admin_action(self):
-
- pods = [
- {
- "pod":
- {
- "region_name": "Pod1",
- "pod_az_name": "az1",
- "dc_name": "dc2",
- "az_name": "AZ1"
- },
- "expected_error": 401,
- },
- ]
- self._test_and_check(pods)
-
- response = self.app.get('/v1.0/pods/1234567890',
- expect_errors=True)
- self.assertEqual(response.status_int, 401)
-
- response = self.app.get('/v1.0/pods',
- expect_errors=True)
- self.assertEqual(response.status_int, 401)
-
- response = self.app.delete('/v1.0/pods/1234567890',
- expect_errors=True)
- self.assertEqual(response.status_int, 401)
diff --git a/tricircle/tests/functional/api/controllers/test_root.py b/tricircle/tests/functional/api/controllers/test_root.py
deleted file mode 100644
index db071521..00000000
--- a/tricircle/tests/functional/api/controllers/test_root.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-from pecan.configuration import set_config
-from pecan.testing import load_test_app
-
-from oslo_config import cfg
-from oslo_config import fixture as fixture_config
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-
-from tricircle.api import app
-from tricircle.tests import base
-
-
-OPT_GROUP_NAME = 'keystone_authtoken'
-cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
-
-
-class API_FunctionalTest(base.TestCase):
-
- def setUp(self):
- super(API_FunctionalTest, self).setUp()
-
- self.addCleanup(set_config, {}, overwrite=True)
-
- cfg.CONF.register_opts(app.common_opts)
-
- self.CONF = self.useFixture(fixture_config.Config()).conf
-
- self.CONF.set_override('auth_strategy', 'noauth')
-
- self.app = self._make_app()
-
- def _make_app(self, enable_acl=False):
- self.config = {
- 'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
- 'enable_acl': enable_acl,
- 'errors': {
- 400: '/error',
- '__force_dict__': True
- }
- },
- }
-
- return load_test_app(self.config)
-
- def tearDown(self):
- super(API_FunctionalTest, self).tearDown()
- cfg.CONF.unregister_opts(app.common_opts)
- pecan.set_config({}, overwrite=True)
-
-
-class TestRootController(API_FunctionalTest):
- """Test version listing on root URI."""
-
- def test_get(self):
- response = self.app.get('/')
- self.assertEqual(response.status_int, 200)
- json_body = jsonutils.loads(response.body)
- versions = json_body.get('versions')
- self.assertEqual(1, len(versions))
- self.assertEqual(versions[0]["id"], "v1.0")
-
- def _test_method_returns_405(self, method):
- api_method = getattr(self.app, method)
- response = api_method('/', expect_errors=True)
- self.assertEqual(response.status_int, 405)
-
- def test_post(self):
- self._test_method_returns_405('post')
-
- def test_put(self):
- self._test_method_returns_405('put')
-
- def test_patch(self):
- self._test_method_returns_405('patch')
-
- def test_delete(self):
- self._test_method_returns_405('delete')
-
- def test_head(self):
- self._test_method_returns_405('head')
-
-
-class TestV1Controller(API_FunctionalTest):
-
- def test_get(self):
- response = self.app.get('/v1.0')
- self.assertEqual(response.status_int, 200)
- json_body = jsonutils.loads(response.body)
- version = json_body.get('version')
- self.assertEqual(version, "1.0")
-
- def _test_method_returns_405(self, method):
- api_method = getattr(self.app, method)
- response = api_method('/v1.0', expect_errors=True)
- self.assertEqual(response.status_int, 405)
-
- def test_post(self):
- self._test_method_returns_405('post')
-
- def test_put(self):
- self._test_method_returns_405('put')
-
- def test_patch(self):
- self._test_method_returns_405('patch')
-
- def test_delete(self):
- self._test_method_returns_405('delete')
-
- def test_head(self):
- self._test_method_returns_405('head')
-
-
-class TestErrors(API_FunctionalTest):
-
- def test_404(self):
- response = self.app.get('/fake_path', expect_errors=True)
- self.assertEqual(response.status_int, 404)
-
- def test_bad_method(self):
- response = self.app.patch('/v1.0/123',
- expect_errors=True)
- self.assertEqual(response.status_int, 404)
-
-
-class TestRequestID(API_FunctionalTest):
-
- def test_request_id(self):
- response = self.app.get('/')
- self.assertIn('x-openstack-request-id', response.headers)
- self.assertTrue(
- response.headers['x-openstack-request-id'].startswith('req-'))
- id_part = response.headers['x-openstack-request-id'].split('req-')[1]
- self.assertTrue(uuidutils.is_uuid_like(id_part))
-
-
-class TestKeystoneAuth(API_FunctionalTest):
-
- def setUp(self):
- super(API_FunctionalTest, self).setUp()
-
- self.addCleanup(set_config, {}, overwrite=True)
-
- cfg.CONF.register_opts(app.common_opts)
-
- self.CONF = self.useFixture(fixture_config.Config()).conf
-
- cfg.CONF.set_override('auth_strategy', 'keystone')
-
- self.app = self._make_app()
-
- def test_auth_enforced(self):
- response = self.app.get('/', expect_errors=True)
- self.assertEqual(response.status_int, 401)
diff --git a/tricircle/tests/network_sdk/__init__.py b/tricircle/tests/network_sdk/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/network_sdk/network_service.py b/tricircle/tests/network_sdk/network_service.py
deleted file mode 100644
index a923a243..00000000
--- a/tricircle/tests/network_sdk/network_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import service_description
-from openstack import service_filter
-
-from tricircle.tests.network_sdk.v2 import _proxy
-
-
-class NetworkService(service_description.ServiceDescription):
- """The network service."""
-
- valid_versions = [service_filter.ValidVersion('v2', 'v2.0')]
- proxy_class = _proxy.Proxy
-
- def __init__(self, version=None):
- """Create a network service."""
- super(NetworkService, self).__init__(service_type='network_sdk')
diff --git a/tricircle/tests/network_sdk/v2/__init__.py b/tricircle/tests/network_sdk/v2/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/network_sdk/v2/_proxy.py b/tricircle/tests/network_sdk/v2/_proxy.py
deleted file mode 100644
index bef4ee97..00000000
--- a/tricircle/tests/network_sdk/v2/_proxy.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack.network.v2 import _proxy
-
-import tricircle.tests.network_sdk.v2.flow_classifier as _fc
-import tricircle.tests.network_sdk.v2.port_chain as _pc
-import tricircle.tests.network_sdk.v2.port_pair as _pp
-import tricircle.tests.network_sdk.v2.port_pair_group as _ppg
-import tricircle.tests.network_sdk.v2.trunk as _trunk
-
-
-class Proxy(_proxy.Proxy):
- # trunk
- def create_trunk(self, **attrs):
- return self._create(_trunk.Trunk, **attrs)
-
- def delete_trunk(self, trunk, ignore_missing=True):
- self._delete(_trunk.Trunk, trunk, ignore_missing=ignore_missing)
-
- def update_trunk(self, trunk, **attrs):
- return self._update(_trunk.Trunk, trunk, **attrs)
-
- def trunks(self, **query):
- return self._list(_trunk.Trunk, **query)
-
- def add_subports(self, trunk, subports=[]):
- trunk = self._get_resource(_trunk.Trunk, trunk)
- body = {'sub_ports': subports}
- return trunk.add_subports(self, **body)
-
- def remove_subports(self, trunk, subports=[]):
- trunk = self._get_resource(_trunk.Trunk, trunk)
- body = {'sub_ports': subports}
- return trunk.remove_subports(self, **body)
-
- # port pair
- def create_port_pair(self, **attrs):
- return self._create(_pp.PortPair, **attrs)
-
- def delete_port_pair(self, pp, ignore_missing=True):
- self._delete(_pp.PortPair, pp, ignore_missing=ignore_missing)
-
- def update_port_pair(self, pp, **attrs):
- return self._update(_pp.PortPair, pp, **attrs)
-
- def port_pairs(self, **query):
- return self._list(_pp.PortPair, **query)
-
- # port pair group
- def create_port_pair_group(self, **attrs):
- return self._create(_ppg.PortPairGroup, **attrs)
-
- def delete_port_pair_group(self, ppg, ignore_missing=True):
- self._delete(_ppg.PortPairGroup, ppg, ignore_missing=ignore_missing)
-
- def update_port_pair_group(self, ppg, **attrs):
- return self._update(_ppg.PortPairGroup, ppg, **attrs)
-
- def port_pair_groups(self, **query):
- return self._list(_ppg.PortPairGroup, **query)
-
- # port chain
- def create_port_chain(self, **attrs):
- return self._create(_pc.PortChain, **attrs)
-
- def delete_port_chain(self, pc, ignore_missing=True):
- self._delete(_pc.PortChain, pc, ignore_missing=ignore_missing)
-
- def update_port_chain(self, pc, **attrs):
- return self._update(_pc.PortChain, pc, **attrs)
-
- def port_chains(self, **query):
- return self._list(_pc.PortChain, **query)
-
- # flow classifier
- def create_flow_classifier(self, **attrs):
- return self._create(_fc.FlowClassifier, **attrs)
-
- def delete_flow_classifier(self, fc, ignore_missing=True):
- self._delete(_fc.FlowClassifier, fc, ignore_missing=ignore_missing)
-
- def update_flow_classifier(self, fc, **attrs):
- return self._update(_fc.FlowClassifier, fc, **attrs)
-
- def flow_classifiers(self, **query):
- return self._list(_fc.FlowClassifier, **query)
diff --git a/tricircle/tests/network_sdk/v2/flow_classifier.py b/tricircle/tests/network_sdk/v2/flow_classifier.py
deleted file mode 100644
index cf40ba27..00000000
--- a/tricircle/tests/network_sdk/v2/flow_classifier.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource
-
-
-class FlowClassifier(resource.Resource):
- resource_key = 'flow_classifier'
- resources_key = 'flow_classifiers'
- base_path = '/sfc/flow_classifiers'
-
- allow_create = True
- allow_get = True
- allow_update = True
- allow_delete = True
- allow_list = True
-
- _query_mapping = resource.QueryParameters('name')
-
- name = resource.Body('name')
- description = resource.Body('description')
- ethertype = resource.Body('ingress')
- protocol = resource.Body('protocol')
- source_port_range_min = resource.Body('source_port_range_min')
- source_port_range_max = resource.Body('source_port_range_max')
- destination_port_range_min = resource.Body('destination_port_range_min')
- destination_port_range_max = resource.Body('destination_port_range_max')
- source_ip_prefix = resource.Body('source_ip_prefix')
- destination_ip_prefix = resource.Body('destination_ip_prefix')
- logical_source_port = resource.Body('logical_source_port')
- logical_destination_port = resource.Body('logical_destination_port')
- l7_parameters = resource.Body('l7_parameters', type=dict)
diff --git a/tricircle/tests/network_sdk/v2/port_chain.py b/tricircle/tests/network_sdk/v2/port_chain.py
deleted file mode 100644
index a7ab67e9..00000000
--- a/tricircle/tests/network_sdk/v2/port_chain.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource
-
-
-class PortChain(resource.Resource):
- resource_key = 'port_chain'
- resources_key = 'port_chains'
- base_path = '/sfc/port_chains'
-
- allow_create = True
- allow_get = True
- allow_update = True
- allow_delete = True
- allow_list = True
-
- _query_mapping = resource.QueryParameters('name')
-
- name = resource.Body('name')
- description = resource.Body('description')
- port_pair_groups = resource.Body('port_pair_groups', type=list)
- flow_classifiers = resource.Body('flow_classifiers', type=list)
- chain_parameters = resource.Body('chain_parameters', type=dict)
- chain_id = resource.Body('chain_id')
diff --git a/tricircle/tests/network_sdk/v2/port_pair.py b/tricircle/tests/network_sdk/v2/port_pair.py
deleted file mode 100644
index a78e7373..00000000
--- a/tricircle/tests/network_sdk/v2/port_pair.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource
-
-
-class PortPair(resource.Resource):
- resource_key = 'port_pair'
- resources_key = 'port_pairs'
- base_path = '/sfc/port_pairs'
-
- allow_create = True
- allow_get = True
- allow_update = True
- allow_delete = True
- allow_list = True
-
- _query_mapping = resource.QueryParameters('name')
-
- name = resource.Body('name')
- description = resource.Body('description')
- ingress = resource.Body('ingress')
- egress = resource.Body('egress')
- service_function_parameters = resource.Body('service_function_parameters',
- type=dict)
diff --git a/tricircle/tests/network_sdk/v2/port_pair_group.py b/tricircle/tests/network_sdk/v2/port_pair_group.py
deleted file mode 100644
index 990da55b..00000000
--- a/tricircle/tests/network_sdk/v2/port_pair_group.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource
-
-
-class PortPairGroup(resource.Resource):
- resource_key = 'port_pair_group'
- resources_key = 'port_pair_groups'
- base_path = '/sfc/port_pair_groups'
-
- allow_create = True
- allow_get = True
- allow_update = True
- allow_delete = True
- allow_list = True
-
- _query_mapping = resource.QueryParameters('name')
-
- name = resource.Body('name')
- description = resource.Body('description')
- port_pairs = resource.Body('port_pairs', type=list)
- port_pair_group_parameters = resource.Body('port_pair_group_parameters',
- type=dict)
diff --git a/tricircle/tests/network_sdk/v2/trunk.py b/tricircle/tests/network_sdk/v2/trunk.py
deleted file mode 100644
index a93d7057..00000000
--- a/tricircle/tests/network_sdk/v2/trunk.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource
-from openstack import utils
-
-
-class Trunk(resource.Resource):
- resource_key = 'trunk'
- resources_key = 'trunks'
- base_path = '/trunks'
-
- allow_create = True
- allow_get = True
- allow_update = True
- allow_delete = True
- allow_list = True
-
- status = resource.Body('status')
- name = resource.Body('name')
- port_id = resource.Body('port_id')
- sub_ports = resource.Body('sub_ports', type=list)
-
- def add_subports(self, session, **body):
- url = utils.urljoin(self.base_path, self.id, 'add_subports')
- resp = session.put(url, endpoint_filter={'service_type': 'network'},
- json=body)
- return resp.json()
-
- def remove_subports(self, session, **body):
- url = utils.urljoin(self.base_path, self.id, 'remove_subports')
- resp = session.put(url, endpoint_filter={'service_type': 'network'},
- json=body)
- return resp.json()
diff --git a/tricircle/tests/tricircle_sdk/__init__.py b/tricircle/tests/tricircle_sdk/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/tricircle_sdk/multiregion_network_service.py b/tricircle/tests/tricircle_sdk/multiregion_network_service.py
deleted file mode 100644
index aad1bcaa..00000000
--- a/tricircle/tests/tricircle_sdk/multiregion_network_service.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import service_description
-from openstack import service_filter
-
-from tricircle.tests.tricircle_sdk.v1 import _proxy
-
-
-class MultiregionNetworkService(service_description.ServiceDescription):
- valid_versions = [service_filter.ValidVersion('v1')]
- proxy_class = _proxy.Proxy
-
- def __init__(self, version=None):
- # TODO(zhiyuan) register a proper service type in keystone
- super(MultiregionNetworkService, self).__init__(
- service_type='tricircle_sdk')
diff --git a/tricircle/tests/tricircle_sdk/v1/__init__.py b/tricircle/tests/tricircle_sdk/v1/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/tricircle_sdk/v1/_proxy.py b/tricircle/tests/tricircle_sdk/v1/_proxy.py
deleted file mode 100644
index 1f808903..00000000
--- a/tricircle/tests/tricircle_sdk/v1/_proxy.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import proxy
-
-from tricircle.tests.tricircle_sdk.v1 import job
-
-
-class Proxy(proxy.BaseProxy):
- def jobs(self, **query):
- return self._list(job.Job, **query)
diff --git a/tricircle/tests/tricircle_sdk/v1/job.py b/tricircle/tests/tricircle_sdk/v1/job.py
deleted file mode 100644
index 662b4a29..00000000
--- a/tricircle/tests/tricircle_sdk/v1/job.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack import resource as resource2
-
-
-class Job(resource2.Resource):
- resource_key = 'job'
- resources_key = 'jobs'
- base_path = '/jobs'
-
- allow_list = True
- allow_get = True
-
- resource = resource2.Body('resource', type=dict)
- type = resource2.Body('type')
- timestamp = resource2.Body('timestamp')
- project_id = resource2.Body('project_id')
- status = resource2.Body('status')
diff --git a/tricircle/tests/unit/__init__.py b/tricircle/tests/unit/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/api/__init__.py b/tricircle/tests/unit/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/api/controllers/__init__.py b/tricircle/tests/unit/api/controllers/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/api/controllers/test_job.py b/tricircle/tests/unit/api/controllers/test_job.py
deleted file mode 100755
index e512e8fe..00000000
--- a/tricircle/tests/unit/api/controllers/test_job.py
+++ /dev/null
@@ -1,755 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-from mock import patch
-from oslo_config import cfg
-import oslo_db.exception as db_exc
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-import re
-from six.moves import xrange
-import time
-
-import pecan
-
-from tricircle.api import app
-from tricircle.api.controllers import job
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import policy
-from tricircle.common import xrpcapi
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.tests import base
-
-
-class FakeRPCAPI(xrpcapi.XJobAPI):
- def invoke_method(self, ctxt, project_id, method, _type, id):
- db_api.new_job(ctxt, project_id, _type, id)
-
-
-class FakeAsyncJobController(job.AsyncJobController):
- def __init__(self):
- self.xjob_handler = FakeRPCAPI()
-
-
-class FakeResponse(object):
- def __new__(cls, code=500):
- cls.status = code
- cls.status_code = code
- return super(FakeResponse, cls).__new__(cls)
-
-
-def mock_db_test_stub(i):
- if i == 0:
- raise db_exc.DBDeadlock
-
-
-class AsyncJobControllerTest(base.TestCase):
- def setUp(self):
- super(AsyncJobControllerTest, self).setUp()
-
- cfg.CONF.clear()
- cfg.CONF.register_opts(app.common_opts)
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.controller = FakeAsyncJobController()
- self.context = context.get_admin_context()
- self.job_resource_map = constants.job_resource_map
- policy.populate_default_rules()
-
- @patch.object(db_api, 'db_test_stub', new=mock_db_test_stub)
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_post(self, mock_context):
- mock_context.return_value = self.context
-
- # cover all job types
- for job_type in self.job_resource_map.keys():
- job = self._prepare_job_element(job_type)
-
- kw_job = {'job': job}
-
- # failure case, only admin can create the job
- self.context.is_admin = False
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, request body not found
- kw_job_1 = {'job_1': job}
- res = self.controller.post(**kw_job_1)
- self._validate_error_code(res, 400)
-
- # failure case, wrong job type parameter
- job_type_backup = job.pop('type')
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['type'] = ''
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['type'] = job_type_backup + '_1'
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['type'] = job_type_backup
-
- # failure case, wrong resource parameter
- job_resource_backup = job.pop('resource')
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['resource'] = copy.deepcopy(job_resource_backup)
- job['resource'].popitem()
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- fake_resource = 'fake_resource'
- job['resource'][fake_resource] = fake_resource
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['resource'] = job_resource_backup
-
- # failure case, wrong project id parameter
- project_id_backup = job.pop('project_id')
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['project_id'] = ''
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['project_id'] = uuidutils.generate_uuid()
- res = self.controller.post(**kw_job)
- self._validate_error_code(res, 400)
-
- job['project_id'] = project_id_backup
-
- # successful case, create an entirely new job. Because the job
- # status returned from controller has been formatted, so we not
- # only validate the database records, but also validate the return
- # value of the controller.
- job_1 = self.controller.post(**kw_job)['job']
- job_in_db_1 = db_api.get_job(self.context, job_1['id'])
- self.assertEqual(job_type, job_in_db_1['type'])
- self.assertEqual(job['project_id'], job_in_db_1['project_id'])
- self.assertEqual(constants.JS_New, job_in_db_1['status'])
-
- self.assertEqual('NEW', job_1['status'])
- self.assertEqual(len(constants.job_resource_map[job['type']]),
- len(job_1['resource']))
- self.assertFalse('resource_id' in job_1)
- self.assertFalse('extra_id' in job_1)
- db_api.delete_job(self.context, job_1['id'])
-
- # successful case, target job already exists in the job table
- # and its status is NEW, then this newer job will be picked by
- # job handler.
- job_2 = self.controller.post(**kw_job)['job']
- job_in_db_2 = db_api.get_job(self.context, job_2['id'])
- job_3 = self.controller.post(**kw_job)['job']
- job_in_db_3 = db_api.get_job(self.context, job_3['id'])
-
- self.assertEqual(job_type, job_in_db_2['type'])
- self.assertEqual(job['project_id'], job_in_db_2['project_id'])
- self.assertEqual(constants.JS_New, job_in_db_2['status'])
-
- self.assertEqual('NEW', job_2['status'])
- self.assertEqual(len(constants.job_resource_map[job['type']]),
- len(job_2['resource']))
- self.assertFalse('resource_id' in job_2)
- self.assertFalse('extra_id' in job_2)
-
- self.assertEqual(job_type, job_in_db_3['type'])
- self.assertEqual(job['project_id'], job_in_db_3['project_id'])
- self.assertEqual(constants.JS_New, job_in_db_3['status'])
-
- self.assertEqual('NEW', job_3['status'])
- self.assertEqual(len(constants.job_resource_map[job['type']]),
- len(job_3['resource']))
- self.assertFalse('resource_id' in job_3)
- self.assertFalse('extra_id' in job_3)
-
- db_api.finish_job(self.context, job_3['id'], False,
- timeutils.utcnow())
- db_api.delete_job(self.context, job_3['id'])
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_one_and_get_all(self, mock_context):
- self.context.project_id = uuidutils.generate_uuid()
- mock_context.return_value = self.context
-
- # failure case, only admin can list the job's info
- self.context.is_admin = False
- res = self.controller.get_one("schemas")
- self._validate_error_code(res, 403)
- res = self.controller.get_one("detail")
- self._validate_error_code(res, 403)
- res = self.controller.get_one(uuidutils.generate_uuid())
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, parameter error
- res = self.controller.get_one("schemas_1")
- self._validate_error_code(res, 404)
-
- res = self.controller.get_one(uuidutils.generate_uuid())
- self._validate_error_code(res, 404)
-
- # successful case, set id="schemas" to get job schemas
- job_schemas_2 = self.controller.get_one("schemas")
- job_schemas_3 = []
- for job_type in self.job_resource_map.keys():
- job = {}
- resource = []
- for resource_type, resource_id in self.job_resource_map[job_type]:
- resource.append(resource_id)
- job['resource'] = resource
- job['type'] = job_type
- job_schemas_3.append(job)
-
- self.assertEqual(job_schemas_3, job_schemas_2['schemas'])
-
- # successful case, set id="detail" to get all jobs.
- # first, we need to create jobs in job table.
- amount_of_all_jobs = len(self.job_resource_map.keys())
- all_job_ids = {}
- index = 0
- for job_type in self.job_resource_map.keys():
- job = self._prepare_job_element(job_type)
- # for test convenience, all jobs have same project ID
- job['project_id'] = self.context.project_id
-
- resource_id = '#'.join([job['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type]])
- job_1 = db_api.new_job(self.context,
- job['project_id'], job_type,
- resource_id)
- all_job_ids[index] = job_1['id']
- index = index + 1
- time.sleep(1)
-
- # validate if the id=job_id, get_one(id=job_id) can take effective
- job_2 = self.controller.get_one(job_1['id'])['job']
- self.assertEqual(job_1['type'], job_2['type'])
- self.assertEqual(job_1['project_id'], job_2['project_id'])
- self.assertEqual("NEW", job_2['status'])
-
- jobs_1 = self.controller.get_one("detail")
- self.assertEqual(amount_of_all_jobs, len(jobs_1['jobs']))
-
- # create jobs in job log table, in order to validate
- # get_one(id=detail) can also get the jobs from job log
- amount_of_succ_jobs = int(len(all_job_ids) / 2)
- for i in xrange(amount_of_succ_jobs):
- db_api.finish_job(self.context, all_job_ids[i], True,
- timeutils.utcnow())
- time.sleep(1)
-
- jobs_2 = self.controller.get_one("detail")
- self.assertEqual(amount_of_all_jobs, len(jobs_2['jobs']))
-
- job_status_filter_1 = {'status': 'success'}
- jobs_3 = self.controller.get_one("detail", **job_status_filter_1)
- self.assertEqual(amount_of_succ_jobs, len(jobs_3['jobs']))
-
- # set marker in job log
- res = self.controller.get_all(marker=jobs_3['jobs'][0]['id'],
- limit=amount_of_succ_jobs)
- self.assertEqual(amount_of_succ_jobs - 1, len(res['jobs']))
-
- job_status_filter_2 = {'status': 'new'}
- amount_of_new_jobs = amount_of_all_jobs - amount_of_succ_jobs
- jobs_4 = self.controller.get_one("detail", **job_status_filter_2)
- self.assertEqual(amount_of_new_jobs, len(jobs_4['jobs']))
-
- # set marker in job
- res = self.controller.get_all(marker=jobs_4['jobs'][0]['id'],
- limit=amount_of_new_jobs)
- self.assertEqual(amount_of_new_jobs, len(res['jobs']))
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_all_jobs_with_pagination(self, mock_context):
- self.context.project_id = uuidutils.generate_uuid()
- mock_context.return_value = self.context
-
- # map job type to project id for later project id filter validation.
- job_project_id_map = {}
- amount_of_all_jobs = len(self.job_resource_map.keys())
- amount_of_running_jobs = 3
- count = 1
-
- # cover all job types.
- for job_type in self.job_resource_map.keys():
- job = self._prepare_job_element(job_type)
- if count > 1:
- # for test convenience, the first job has a project ID
- # that is different from the context.project_id
- job['project_id'] = self.context.project_id
-
- job_project_id_map[job_type] = job['project_id']
-
- resource_id = '#'.join([job['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type]])
- if count <= amount_of_running_jobs:
- db_api.register_job(self.context,
- job['project_id'], job_type,
- resource_id)
- # because jobs are sorted by timestamp, without time delay then
- # all jobs are created at the same time, paginate_query can't
- # identify them
- time.sleep(1)
- else:
- db_api.new_job(self.context,
- job['project_id'], job_type,
- resource_id)
- time.sleep(1)
- count = count + 1
-
- # query the jobs with several kinds of filters.
- # supported filters: project id, job status, job type.
- job_status_filter_1 = {'status': 'new'}
- job_status_filter_2 = {'status': 'fail'}
- job_status_filter_3 = {'status': 'running'}
- invalid_filter = {'status': "new-x"}
- unsupported_filter = {'fake_filter': "fake_filter"}
- count = 1
- for job_type in self.job_resource_map.keys():
- job_type_filter_1 = {'type': job_type}
- job_type_filter_2 = {'type': job_type + '_1'}
-
- # failure case, only admin can list the jobs
- self.context.is_admin = False
- res = self.controller.get_all()
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # test when specify project ID filter from client, if this
- # project ID is different from the one from context, then
- # it will be ignored, project ID from context will be
- # used instead.
- filter1 = {'project_id': uuidutils.generate_uuid()}
- res1 = self.controller.get_all(**filter1)
-
- filter2 = {'project_id': self.context.project_id}
- res2 = self.controller.get_all(**filter2)
- self.assertEqual(len(res2['jobs']), len(res1['jobs']))
-
- res3 = self.controller.get_all()
- # there is one job whose project ID is different from
- # context.project_id. As the list operation only retrieves the
- # jobs whose project ID equals to context.project_id, so this
- # special job entry won't be retrieved.
- self.assertEqual(len(res3['jobs']), len(res2['jobs']))
-
- # successful case, filter by job type
- jobs_job_type_filter_1 = self.controller.get_all(
- **job_type_filter_1)
- if count == 1:
- self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
- else:
- self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))
-
- jobs_job_type_filter_2 = self.controller.get_all(
- **job_type_filter_2)
- self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))
-
- # successful case, filter by job status and job type
- if count <= amount_of_running_jobs:
- all_filters = dict(list(job_status_filter_3.items()) +
- list(job_type_filter_1.items()))
- jobs_all_filters = self.controller.get_all(**all_filters)
- if count == 1:
- self.assertEqual(0, len(jobs_all_filters['jobs']))
- else:
- self.assertEqual(1, len(jobs_all_filters['jobs']))
- else:
- all_filters = dict(list(job_status_filter_1.items()) +
- list(job_type_filter_1.items()))
- jobs_all_filters = self.controller.get_all(**all_filters)
- self.assertEqual(1, len(jobs_all_filters['jobs']))
-
- # successful case, contradictory filter
- contradict_filters = dict(list(job_status_filter_2.items()) +
- list((job_type_filter_2.items())))
- jobs_contradict_filters = self.controller.get_all(
- **contradict_filters)
- self.assertEqual(0, len(jobs_contradict_filters['jobs']))
- count = count + 1
-
- # failure case, unsupported filter
- res = self.controller.get_all(**unsupported_filter)
- self._validate_error_code(res, 400)
-
- # successful case, invalid filter
- jobs_invalid_filter = self.controller.get_all(**invalid_filter)
- self.assertEqual(0, len(jobs_invalid_filter['jobs']))
-
- # successful case, list jobs without filters
- jobs_empty_filters = self.controller.get_all()
- self.assertEqual(amount_of_all_jobs - 1,
- len(jobs_empty_filters['jobs']))
-
- # successful case, filter by job status
- jobs_job_status_filter_1 = self.controller.get_all(
- **job_status_filter_1)
- self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
- len(jobs_job_status_filter_1['jobs']))
-
- jobs_job_status_filter_2 = self.controller.get_all(
- **job_status_filter_2)
- self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))
-
- jobs_job_status_filter_3 = self.controller.get_all(
- **job_status_filter_3)
- self.assertEqual(amount_of_running_jobs - 1,
- len(jobs_job_status_filter_3['jobs']))
-
- # test for paginate query
- job_paginate_no_filter_1 = self.controller.get_all()
- self.assertEqual(amount_of_all_jobs - 1,
- len(job_paginate_no_filter_1['jobs']))
-
- # no limit no marker
- job_paginate_filter_1 = {'status': 'new'}
- jobs_paginate_filter_1 = self.controller.get_all(
- **job_paginate_filter_1)
- self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
- len(jobs_paginate_filter_1['jobs']))
-
- # failed cases, unsupported limit type
- job_paginate_filter_2 = {'limit': '2test'}
- res = self.controller.get_all(**job_paginate_filter_2)
- self._validate_error_code(res, 400)
-
- # successful cases
- job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
- res = self.controller.get_all(**job_paginate_filter_4)
- self.assertEqual(2, len(res['jobs']))
-
- job_paginate_filter_5 = {'status': 'new', 'limit': 2}
- res = self.controller.get_all(**job_paginate_filter_5)
- self.assertEqual(2, len(res['jobs']))
-
- job_paginate_filter_6 = {'status': 'running', 'limit': 1}
- res1 = self.controller.get_all(**job_paginate_filter_6)
-
- marker = res1['jobs'][0]['id']
- job_paginate_filter_7 = {'status': 'running', 'marker': marker}
- res2 = self.controller.get_all(**job_paginate_filter_7)
- self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))
-
- job_paginate_filter_8 = {'status': 'new', 'limit': 3}
- res = self.controller.get_all(**job_paginate_filter_8)
- self.assertGreaterEqual(res['jobs'][0]['timestamp'],
- res['jobs'][1]['timestamp'])
- self.assertGreaterEqual(res['jobs'][1]['timestamp'],
- res['jobs'][2]['timestamp'])
-
- # unsupported marker type
- res = self.controller.get_all(marker=None)
- self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))
-
- res = self.controller.get_all(marker='-123')
- self._validate_error_code(res, 400)
-
- # marker not in job table and job log table
- job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
- res = self.controller.get_all(**job_paginate_filter_9)
- self._validate_error_code(res, 400)
-
- # test marker and limit
- limit = 2
- pt = r'/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
- job_paginate_filter = {'status': 'new', 'limit': limit}
- res = self.controller.get_all(**job_paginate_filter)
- while 'jobs_links' in res:
- m = re.match(pt, res['jobs_links'][0]['href'])
- marker = m.group(1)
- self.assertEqual(limit, len(res['jobs']))
- job_paginate_filter = {'status': 'new', 'limit': limit,
- 'marker': marker}
- res = self.controller.get_all(**job_paginate_filter)
-
- job_paginate_filter_10 = {'status': 'running'}
- res = self.controller.get_all(**job_paginate_filter_10)
- self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
- # add some rows to job log table
- for i in xrange(amount_of_running_jobs - 1):
- db_api.finish_job(self.context, res['jobs'][i]['id'], True,
- timeutils.utcnow())
- time.sleep(1)
- res_success_log = db_api.list_jobs_from_log(self.context, None)
- self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))
-
- res_in_job = db_api.list_jobs(self.context, None)
- self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
- len(res_in_job))
-
- job_paginate_filter_11 = {'limit': 2}
- res = self.controller.get_all(**job_paginate_filter_11)
- self.assertIsNotNone(res['jobs_links'][0]['href'])
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(pecan, 'response', new=mock.Mock)
- @patch.object(context, 'extract_context_from_environ')
- def test_delete(self, mock_context):
- mock_context.return_value = self.context
-
- # cover all job types.
- # each 'for' loop adds one item in job log table, we set count variable
- # to record dynamic total job entries in job log table.
- count = 1
- for job_type in self.job_resource_map.keys():
- job = self._prepare_job_element(job_type)
-
- resource_id = '#'.join([job['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type]])
-
- # failure case, only admin can delete the job
- job_1 = db_api.new_job(self.context, job['project_id'],
- job_type,
- resource_id)
- self.context.is_admin = False
- res = self.controller.delete(job_1['id'])
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
- db_api.delete_job(self.context, job_1['id'])
-
- # failure case, job not found
- res = self.controller.delete(-123)
- self._validate_error_code(res, 404)
-
- # failure case, delete a running job
- job_2 = db_api.register_job(self.context,
- job['project_id'],
- job_type, resource_id)
- job = db_api.get_job(self.context, job_2['id'])
- res = self.controller.delete(job_2['id'])
- self._validate_error_code(res, 400)
-
- # finish the job and delete it
- db_api.finish_job(self.context, job_2['id'], False,
- timeutils.utcnow())
- db_api.delete_job(self.context, job_2['id'])
-
- # successful case, delete a successful job. successful job from
- # job log can't be deleted, here this successful job is from
- # job table.
- job_3 = self._prepare_job_element(job_type)
- resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
- for resource_type_3, resource_id_3
- in self.job_resource_map[job_type]])
-
- job_4 = db_api.new_job(self.context,
- job_3['project_id'],
- job_type, resource_id_3)
-
- with self.context.session.begin():
- job_dict = {'status': constants.JS_Success,
- 'timestamp': timeutils.utcnow(),
- 'extra_id': uuidutils.generate_uuid()}
- core.update_resource(self.context, models.AsyncJob,
- job_4['id'], job_dict)
-
- job_4_succ = db_api.get_job(self.context, job_4['id'])
- self.controller.delete(job_4['id'])
-
- filters_job_4 = [
- {'key': 'type', 'comparator': 'eq',
- 'value': job_4_succ['type']},
- {'key': 'status', 'comparator': 'eq',
- 'value': job_4_succ['status']},
- {'key': 'resource_id', 'comparator': 'eq',
- 'value': job_4_succ['resource_id']},
- {'key': 'extra_id', 'comparator': 'eq',
- 'value': job_4_succ['extra_id']}]
- self.assertEqual(0, len(db_api.list_jobs(self.context,
- filters_job_4)))
- self.assertEqual(count,
- len(db_api.list_jobs_from_log(self.context)))
- count = count + 1
-
- # successful case, delete a new job
- job_5 = db_api.new_job(self.context,
- job['project_id'], job_type,
- resource_id)
- self.controller.delete(job_5['id'])
-
- filters_job_5 = [
- {'key': 'type', 'comparator': 'eq', 'value': job_5['type']},
- {'key': 'status', 'comparator': 'eq',
- 'value': job_5['status']},
- {'key': 'resource_id', 'comparator': 'eq',
- 'value': job_5['resource_id']},
- {'key': 'extra_id', 'comparator': 'eq',
- 'value': job_5['extra_id']}]
- self.assertEqual(0, len(db_api.list_jobs(self.context,
- filters_job_5)))
-
- # successful case, delete a failed job
- job_6 = db_api.new_job(self.context,
- job['project_id'], job_type,
- resource_id)
- db_api.finish_job(self.context, job_6['id'], False,
- timeutils.utcnow())
- job_6_failed = db_api.get_job(self.context, job_6['id'])
- self.controller.delete(job_6['id'])
- filters_job_6 = [
- {'key': 'type', 'comparator': 'eq',
- 'value': job_6_failed['type']},
- {'key': 'status', 'comparator': 'eq',
- 'value': job_6_failed['status']},
- {'key': 'resource_id', 'comparator': 'eq',
- 'value': job_6_failed['resource_id']},
- {'key': 'extra_id', 'comparator': 'eq',
- 'value': job_6_failed['extra_id']}]
- self.assertEqual(0, len(db_api.list_jobs(self.context,
- filters_job_6)))
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(pecan, 'response', new=mock.Mock)
- @patch.object(context, 'extract_context_from_environ')
- def test_put(self, mock_context):
- mock_context.return_value = self.context
-
- # cover all job types
- for job_type in self.job_resource_map.keys():
- job = self._prepare_job_element(job_type)
-
- resource_id = '#'.join([job['resource'][resource_id]
- for resource_type, resource_id
- in self.job_resource_map[job_type]])
-
- # failure case, only admin can redo the job
- job_1 = db_api.new_job(self.context,
- job['project_id'],
- job_type, resource_id)
- self.context.is_admin = False
- res = self.controller.put(job_1['id'])
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
- db_api.delete_job(self.context, job_1['id'])
-
- # failure case, job not found
- res = self.controller.put(-123)
- self._validate_error_code(res, 404)
-
- # failure case, redo a running job
- job_2 = db_api.register_job(self.context,
- job['project_id'],
- job_type, resource_id)
- res = self.controller.put(job_2['id'])
- self._validate_error_code(res, 400)
- db_api.finish_job(self.context, job_2['id'], False,
- timeutils.utcnow())
- db_api.delete_job(self.context, job_2['id'])
-
- # failure case, redo a successful job
- job_3 = self._prepare_job_element(job_type)
-
- resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
- for resource_type_3, resource_id_3
- in self.job_resource_map[job_type]])
-
- job_4 = db_api.new_job(self.context,
- job_3['project_id'],
- job_type, resource_id_3)
- with self.context.session.begin():
- job_dict = {'status': constants.JS_Success,
- 'timestamp': timeutils.utcnow(),
- 'extra_id': uuidutils.generate_uuid()}
- core.update_resource(self.context, models.AsyncJob,
- job_4['id'], job_dict)
-
- res = self.controller.put(job_4['id'])
- self._validate_error_code(res, 400)
- db_api.finish_job(self.context, job_4['id'], True,
- timeutils.utcnow())
-
- # successful case, redo a failed job
- job_5 = db_api.new_job(self.context,
- job['project_id'],
- job_type, resource_id)
- db_api.finish_job(self.context, job_5['id'], False,
- timeutils.utcnow())
- self.controller.put(job_5['id'])
-
- db_api.delete_job(self.context, job_5['id'])
-
- # successful case, redo a new job
- job_6 = db_api.new_job(self.context,
- job['project_id'],
- job_type, resource_id)
- self.controller.put(job_6['id'])
-
- db_api.delete_job(self.context, job_6['id'])
-
- def _prepare_job_element(self, job_type):
- # in order to create a job, we need three elements: job type,
- # job resource and project id.
- job = {}
- job['resource'] = {}
- job['type'] = job_type
-
- for resource_type, resource_id in self.job_resource_map[job_type]:
- job['resource'][resource_id] = uuidutils.generate_uuid()
-
- job['project_id'] = self._prepare_project_id_for_job(job)
-
- return job
-
- def _prepare_project_id_for_job(self, job):
- # prepare the project id for job creation, currently job parameter
- # contains job type and job resource information.
- job_type = job['type']
- if job_type in (constants.JT_SEG_RULE_SETUP,
- constants.JT_RESOURCE_RECYCLE):
- project_id = job['resource']['project_id']
- else:
- project_id = uuidutils.generate_uuid()
- pod_id = uuidutils.generate_uuid()
-
- resource_type, resource_id = (
- constants.job_primary_resource_map[job_type])
- routing = db_api.create_resource_mapping(
- self.context, job['resource'][resource_id],
- job['resource'][resource_id], pod_id, project_id,
- resource_type)
- self.assertIsNotNone(routing)
-
- return project_id
-
- def _validate_error_code(self, res, code):
- self.assertEqual(res[list(res.keys())[0]]['code'], code)
-
- def tearDown(self):
- cfg.CONF.unregister_opts(app.common_opts)
- core.ModelBase.metadata.drop_all(core.get_engine())
-
- super(AsyncJobControllerTest, self).tearDown()
diff --git a/tricircle/tests/unit/api/controllers/test_pod.py b/tricircle/tests/unit/api/controllers/test_pod.py
deleted file mode 100644
index e84bebb5..00000000
--- a/tricircle/tests/unit/api/controllers/test_pod.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) 2015 Huawei Tech. Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from mock import patch
-import six
-import unittest
-
-import pecan
-
-from tricircle.api.controllers import pod
-from tricircle.common import context
-from tricircle.common import policy
-from tricircle.db import core
-from tricircle.db import models
-
-
-class PodsControllerTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.controller = pod.PodsController()
- self.context = context.get_admin_context()
- policy.populate_default_rules()
-
- @patch.object(context, 'extract_context_from_environ')
- def test_post_top_pod(self, mock_context):
- mock_context.return_value = self.context
- kw = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
- pod_id = self.controller.post(**kw)['pod']['pod_id']
-
- with self.context.session.begin():
- pod = core.get_resource(self.context, models.Pod, pod_id)
- self.assertEqual('TopPod', pod['region_name'])
- self.assertEqual('', pod['az_name'])
- pods = core.query_resource(self.context, models.Pod,
- [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': 'TopPod'}], [])
- self.assertEqual(1, len(pods))
-
- @patch.object(context, 'extract_context_from_environ')
- def test_post_bottom_pod(self, mock_context):
- mock_context.return_value = self.context
- kw = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
- pod_id = self.controller.post(**kw)['pod']['pod_id']
-
- with self.context.session.begin():
- pod = core.get_resource(self.context, models.Pod, pod_id)
- self.assertEqual('BottomPod', pod['region_name'])
- self.assertEqual('TopAZ', pod['az_name'])
- pods = core.query_resource(self.context, models.Pod,
- [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': 'BottomPod'}], [])
- self.assertEqual(1, len(pods))
-
- @patch.object(context, 'extract_context_from_environ')
- def test_get_one(self, mock_context):
- mock_context.return_value = self.context
- kw = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
- pod_id = self.controller.post(**kw)['pod']['pod_id']
-
- pod = self.controller.get_one(pod_id)
- self.assertEqual('TopPod', pod['pod']['region_name'])
- self.assertEqual('', pod['pod']['az_name'])
-
- @patch.object(context, 'extract_context_from_environ')
- def test_get_all(self, mock_context):
- mock_context.return_value = self.context
- kw1 = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
- kw2 = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
- self.controller.post(**kw1)
- self.controller.post(**kw2)
-
- pods = self.controller.get_all()
- actual = [(pod['region_name'],
- pod['az_name']) for pod in pods['pods']]
- expect = [('TopPod', ''), ('BottomPod', 'TopAZ')]
- six.assertCountEqual(self, expect, actual)
-
- @patch.object(pecan, 'response', new=mock.Mock)
- @patch.object(context, 'extract_context_from_environ')
- def test_delete(self, mock_context):
- mock_context.return_value = self.context
- kw = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
- pod_id = self.controller.post(**kw)['pod']['pod_id']
- self.controller.delete(pod_id)
-
- with self.context.session.begin():
- pods = core.query_resource(self.context, models.Pod,
- [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': 'BottomPod'}], [])
- self.assertEqual(0, len(pods))
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- policy.reset()
diff --git a/tricircle/tests/unit/api/controllers/test_routing.py b/tricircle/tests/unit/api/controllers/test_routing.py
deleted file mode 100644
index ee4d5aad..00000000
--- a/tricircle/tests/unit/api/controllers/test_routing.py
+++ /dev/null
@@ -1,440 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from mock import patch
-from oslo_utils import uuidutils
-import six
-from six.moves import xrange
-
-from oslo_config import cfg
-import pecan
-
-from tricircle.api import app
-from tricircle.api.controllers import pod
-from tricircle.api.controllers import routing
-from tricircle.common import context
-from tricircle.common import policy
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.tests import base
-
-
-class FakeResponse(object):
- def __new__(cls, code=500):
- cls.status = code
- cls.status_code = code
- return super(FakeResponse, cls).__new__(cls)
-
-
-class RoutingControllerTest(base.TestCase):
- def setUp(self):
- super(RoutingControllerTest, self).setUp()
-
- cfg.CONF.clear()
- cfg.CONF.register_opts(app.common_opts)
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.controller = routing.RoutingController()
- self.context = context.get_admin_context()
- policy.populate_default_rules()
-
- def _validate_error_code(self, res, code):
- self.assertEqual(res[list(res.keys())[0]]['code'], code)
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_post(self, mock_context):
- mock_context.return_value = self.context
-
- kw_routing = self._prepare_routing_element('subnet')
- id = self.controller.post(**kw_routing)['routing']['id']
- routing = db_api.get_resource_routing(self.context, id)
-
- self.assertEqual('subnet', routing['resource_type'])
-
- routings = db_api.list_resource_routings(self.context,
- [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value':
- 'subnet'
- },
- ])
- self.assertEqual(1, len(routings))
-
- # failure case, only admin can create resource routing
- self.context.is_admin = False
- kw_routing = self._prepare_routing_element('subnet')
- res = self.controller.post(**kw_routing)
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, request body not found
- kw_routing1 = {'route':
- {'top_id': uuidutils.generate_uuid(),
- 'bottom_id': uuidutils.generate_uuid(),
- }}
- res = self.controller.post(**kw_routing1)
- self._validate_error_code(res, 400)
-
- # failure case, top_id is not given
- kw_routing2 = self._prepare_routing_element('router')
- kw_routing2['routing'].pop('top_id')
- res = self.controller.post(**kw_routing2)
- self._validate_error_code(res, 400)
-
- # failure case, top_id is empty
- kw_routing3 = self._prepare_routing_element('router')
- kw_routing3['routing'].update({'top_id': ''})
- res = self.controller.post(**kw_routing3)
- self._validate_error_code(res, 400)
-
- # failure case, top_id is given value 'None'
- kw_routing4 = self._prepare_routing_element('security_group')
- kw_routing4['routing'].update({'top_id': None})
- res = self.controller.post(**kw_routing4)
- self._validate_error_code(res, 400)
-
- # failure case, wrong resource type
- kw_routing6 = self._prepare_routing_element('server')
- self.controller.post(**kw_routing6)
- self._validate_error_code(res, 400)
-
- # failure case, the resource routing already exists
- kw_routing7 = self._prepare_routing_element('router')
- self.controller.post(**kw_routing7)
- res = self.controller.post(**kw_routing7)
- self._validate_error_code(res, 409)
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_one(self, mock_context):
- mock_context.return_value = self.context
-
- kw_routing = self._prepare_routing_element('port')
- id = self.controller.post(**kw_routing)['routing']['id']
-
- routing = self.controller.get_one(id)
- self.assertEqual('port', routing['routing']['resource_type'])
-
- # failure case, only admin can get resource routing
- self.context.is_admin = False
- res = self.controller.get_one(id)
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, resource routing not found
- res = self.controller.get_one(-123)
- self._validate_error_code(res, 404)
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_routings_with_pagination(self, mock_context):
- self.context.project_id = uuidutils.generate_uuid()
-
- mock_context.return_value = self.context
-
- # test when no pagination and filters are applied to the list
- # operation, then all of the routings will be retrieved.
- count = 1
- total_routings = 4
- for resource_type in ('subnet', 'router', 'security_group', 'network'):
- kw_routing = self._prepare_routing_element(resource_type)
- # for test convenience, the first routing has a different
- # project ID from later ones.
- if count > 1:
- kw_routing['routing']['project_id'] = self.context.project_id
- self.controller.post(**kw_routing)
- count += 1
-
- routings = self.controller.get_all()
- ids = [routing['id']
- for key, values in six.iteritems(routings)
- for routing in values]
- self.assertEqual([4, 3, 2], ids)
-
- for filter_name in ('router', 'security_group', 'network'):
- filters = {'resource_type': filter_name}
- routings = self.controller.get_all(**filters)
- items = [routing['resource_type']
- for key, values in six.iteritems(routings)
- for routing in values]
- self.assertEqual(1, len(items))
-
- # test when pagination limit varies in range [1, total_routings+1)
- for i in xrange(1, total_routings + 1):
- routings = []
- total_pages = 0
-
- routing = self.controller.get_all(limit=i)
- total_pages += 1
- routings.extend(routing['routings'])
-
- while 'routings_links' in routing:
- link = routing['routings_links'][0]['href']
- _, marker_dict = link.split('&')
- # link is like '/v1.0/routings?limit=1&marker=1', after split,
- # marker_dict is a string like 'marker=1'.
- _, marker_value = marker_dict.split('=')
- routing = self.controller.get_all(limit=i, marker=marker_value)
- if len(routing['routings']) > 0:
- total_pages += 1
- routings.extend(routing['routings'])
- # assert that total pages will decrease as the limit increase.
- # because the first routing has a different project ID and can't
- # be retrieved by current admin role of project, so the number
- # of actual total routings we can get is total_routings-1.
- pages = int((total_routings - 1) / i)
- if (total_routings - 1) % i:
- pages += 1
- self.assertEqual(pages, total_pages)
- self.assertEqual(total_routings - 1, len(routings))
-
- for i in xrange(total_routings - 1):
- self.assertEqual(total_routings - i, routings[i]['id'])
-
- set1 = set(['router', 'security_group', 'network'])
- set2 = set([routing1['resource_type'] for routing1 in routings])
- self.assertEqual(set1, set2)
-
- # test cases when pagination and filters are used
- routings = self.controller.get_all(resource_type='network', limit=1)
- self.assertEqual(1, len(routings['routings']))
-
- routings = self.controller.get_all(resource_type='subnet', limit=2)
- self.assertEqual(0, len(routings['routings']))
-
- # apply a filter and if it doesn't match with any of the retrieved
- # routings, then all of them will be discarded and the method returns
- # with [].
- kw_filter2 = {'resource_type': 'port2'}
- routings = self.controller.get_all(**kw_filter2)
-
- # test cases when limit from client is abnormal
- routings = self.controller.get_all(limit=0)
- self.assertEqual(total_routings - 1, len(routings['routings']))
-
- routings = self.controller.get_all(limit=-1)
- self.assertEqual(total_routings - 1, len(routings['routings']))
-
- res = self.controller.get_all(limit='20x')
- self._validate_error_code(res, 400)
-
- # test cases when pagination limit from client is greater than
- # max limit
- pagination_max_limit_backup = cfg.CONF.pagination_max_limit
- cfg.CONF.set_override('pagination_max_limit', 2)
- routings = self.controller.get_all(limit=3)
- self.assertEqual(2, len(routings['routings']))
- cfg.CONF.set_override('pagination_max_limit',
- pagination_max_limit_backup)
-
- # test case when marker reaches 1, then no link to next page
- routings = self.controller.get_all(limit=2, marker=3)
- self.assertNotIn('routings_links', routings)
-
- # test cases when marker is abnormal
- res = self.controller.get_all(limit=2, marker=-1)
- self._validate_error_code(res, 400)
-
- res = self.controller.get_all(limit=2, marker=0)
- self._validate_error_code(res, 400)
-
- res = self.controller.get_all(limit=2, marker="last")
- self._validate_error_code(res, 400)
-
- # failure case, use an unsupported filter type
- kw_filter3 = {'resource': 'port'}
- res = self.controller.get_all(**kw_filter3)
- self._validate_error_code(res, 400)
-
- kw_filter4 = {'pod_id': "pod_id_1",
- 'resource': 'port'}
- res = self.controller.get_all(**kw_filter4)
- self._validate_error_code(res, 400)
-
- # failure case, id can't be converted to an integer
- kw_filter5 = {'id': '4s'}
- res = self.controller.get_all(**kw_filter5)
- self._validate_error_code(res, 400)
-
- # test when specify project ID filter from client, if this
- # project ID is different from the one from context, then
- # it will be ignored, project ID from context will be
- # used instead.
- res = self.controller.get_all()
- kw_filter6 = {'project_id': uuidutils.generate_uuid()}
- res1 = self.controller.get_all(**kw_filter6)
-
- kw_filter7 = {'project_id': self.context.project_id}
- res2 = self.controller.get_all(**kw_filter7)
- self.assertEqual(len(res2['routings']), len(res1['routings']))
- self.assertEqual(len(res['routings']), len(res2['routings']))
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(context, 'extract_context_from_environ')
- def test_get_all_non_admin(self, mock_context):
- mock_context.return_value = self.context
-
- kw_routing1 = self._prepare_routing_element('subnet')
- self.controller.post(**kw_routing1)
-
- # failure case, only admin can show all resource routings
- self.context.is_admin = False
- res = self.controller.get_all()
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(pecan, 'response', new=mock.Mock)
- @patch.object(context, 'extract_context_from_environ')
- def test_delete(self, mock_context):
- mock_context.return_value = self.context
-
- # prepare the foreign key: pod_id
- kw_pod = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
- pod_id = pod.PodsController().post(**kw_pod)['pod']['pod_id']
-
- # a variable used for later test
- project_id = uuidutils.generate_uuid()
-
- kw_routing = {'routing':
- {'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe',
- 'bottom_id': 'dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef',
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': 'subnet'
- }}
-
- routing = self.controller.post(**kw_routing)
- id = routing['routing']['id']
- res = self.controller.delete(id)
- self.assertEqual(200, res.status)
-
- routings = db_api.list_resource_routings(self.context,
- [{'key': 'top_id',
- 'comparator': 'eq',
- 'value': '09fd7cc9-d'
- '169-4b5a-88e8-436ecf4d0bfe'
- },
- {'key': 'pod_id',
- 'comparator': 'eq',
- 'value': pod_id
- }])
- self.assertEqual(0, len(routings))
-
- # failure case, only admin can delete resource routing
- self.context.is_admin = False
- res = self.controller.delete(id)
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, resource routing not found
- res = self.controller.delete(-123)
- self._validate_error_code(res, 404)
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(pecan, 'response', new=mock.Mock)
- @patch.object(context, 'extract_context_from_environ')
- def test_put(self, mock_context):
- mock_context.return_value = self.context
-
- body = self._prepare_routing_element('subnet')
-
- # both bottom_id and resource type have been changed
- body_update1 = {'routing':
- {'bottom_id': uuidutils.generate_uuid(),
- 'resource_type': 'port'
- }}
-
- id = self.controller.post(**body)['routing']['id']
- routing = self.controller.put(id, **body_update1)
-
- self.assertEqual('port',
- routing['routing']['resource_type'])
- self.assertEqual(body_update1['routing']['bottom_id'],
- routing['routing']['bottom_id'])
-
- # failure case, only admin can update resource routing
- self.context.is_admin = False
- res = self.controller.put(id, **body_update1)
- self._validate_error_code(res, 403)
-
- self.context.is_admin = True
-
- # failure case, request body not found
- body_update2 = {'route':
- {'bottom_id': uuidutils.generate_uuid(),
- 'resource_type': 'port'
- }}
- res = self.controller.put(id, **body_update2)
- self._validate_error_code(res, 400)
-
- # failure case, wrong resource type
- body_update3 = {'routing':
- {'resource_type': 'volume'}}
- res = self.controller.put(id, **body_update3)
- self._validate_error_code(res, 400)
-
- # failure case, the value to be updated is empty
- body_update4 = {'routing':
- {'top_id': ''}}
- res = self.controller.put(id, **body_update4)
- self._validate_error_code(res, 400)
-
- # failure case, the value to be updated is None
- body_update5 = {'routing':
- {'top_id': None}}
- res = self.controller.put(id, **body_update5)
- self._validate_error_code(res, 400)
-
- # failure case, the value to be updated is not appropriate
- res = self.controller.put(-123, **body_update1)
- self._validate_error_code(res, 404)
-
- # failure case, the pod where the new pod_id lays on
- # should exist in pod table
-
- # a variable used for later test
- new_pod_id = uuidutils.generate_uuid()
- body_update6 = {'routing': {'pod_id': new_pod_id}}
- res = self.controller.put(id, **body_update6)
- self._validate_error_code(res, 400)
-
- def _prepare_routing_element(self, resource_type):
- """Prepare routing fields except id
-
- :return: A Dictionary with top_id, bottom_id, pod_id,
- project_id, resource_type
- """
-
- fake_routing = {
- 'routing': {
- 'top_id': uuidutils.generate_uuid(),
- 'bottom_id': uuidutils.generate_uuid(),
- 'pod_id': uuidutils.generate_uuid(),
- 'project_id': uuidutils.generate_uuid(),
- 'resource_type': resource_type,
- }
- }
-
- return fake_routing
-
- def tearDown(self):
- cfg.CONF.unregister_opts(app.common_opts)
- core.ModelBase.metadata.drop_all(core.get_engine())
-
- super(RoutingControllerTest, self).tearDown()
diff --git a/tricircle/tests/unit/cmd/__init__.py b/tricircle/tests/unit/cmd/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/cmd/test_cmd.py b/tricircle/tests/unit/cmd/test_cmd.py
deleted file mode 100644
index 00205a12..00000000
--- a/tricircle/tests/unit/cmd/test_cmd.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) 2018 NEC, Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from mock import patch
-import sys
-import unittest
-
-from oslo_config import cfg
-from oslo_service import service
-from tricircle.api import app
-from tricircle.cmd import api
-from tricircle.cmd import xjob
-from tricircle.xjob import xservice
-
-
-def fake_wait(self):
- return
-
-
-class TestXjobCmd(unittest.TestCase):
- def setUp(self):
- super(TestXjobCmd, self).setUp()
- sys.argv = ['tricircle-cmd']
- cfg.CONF.reset()
- cfg.CONF.unregister_opts(xservice.common_opts)
- cfg.CONF.unregister_opts(app.common_opts)
-
- @patch.object(service.ProcessLauncher, 'wait', new=fake_wait)
- @mock.patch('tricircle.xjob.xservice.create_service')
- @mock.patch('oslo_service.service.ProcessLauncher.launch_service')
- def test_xjob_main(self, launch_service, create_service):
- xjob.main()
- launch_service.assert_called_once_with(
- create_service.return_value, workers=1)
-
- @patch.object(service.ProcessLauncher, 'wait', new=fake_wait)
- @mock.patch('tricircle.api.app.setup_app')
- @mock.patch('oslo_service.wsgi.Server')
- @mock.patch('oslo_service.service.ProcessLauncher.launch_service')
- def test_api_main(self, launch_service, wsgi_server, setup_app):
- api.main()
- wsgi_server.assert_called_once_with(mock.ANY, 'Tricircle Admin_API',
- setup_app.return_value,
- mock.ANY, mock.ANY)
- launch_service.assert_called_once_with(
- wsgi_server.return_value, workers=1)
-
- def tearDown(self):
- cfg.CONF.reset()
- cfg.CONF.unregister_opts(xservice.common_opts)
- cfg.CONF.unregister_opts(app.common_opts)
diff --git a/tricircle/tests/unit/cmd/test_status.py b/tricircle/tests/unit/cmd/test_status.py
deleted file mode 100644
index 2f1eebed..00000000
--- a/tricircle/tests/unit/cmd/test_status.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) 2018 NEC, Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-from oslo_upgradecheck.upgradecheck import Code
-
-from tricircle.cmd import status
-
-
-class TestUpgradeChecks(unittest.TestCase):
-
- def setUp(self):
- super(TestUpgradeChecks, self).setUp()
- self.cmd = status.Checks()
-
- def test__check_placeholder(self):
- check_result = self.cmd._check_placeholder()
- self.assertEqual(
- Code.SUCCESS, check_result.code)
diff --git a/tricircle/tests/unit/common/__init__.py b/tricircle/tests/unit/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/common/test_client.py b/tricircle/tests/unit/common/test_client.py
deleted file mode 100644
index 0e6dbe2d..00000000
--- a/tricircle/tests/unit/common/test_client.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import unittest
-import uuid
-
-import mock
-from mock import patch
-from oslo_config import cfg
-
-import keystoneclient.v3.client as k_client
-
-from tricircle.common import client
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import resource_handle
-from tricircle.db import api
-from tricircle.db import core
-
-
-FAKE_AZ = 'fake_az'
-FAKE_RESOURCE = 'fake_res'
-FAKE_SITE_ID = 'fake_pod_id'
-FAKE_SITE_NAME = 'fake_region_name'
-FAKE_SERVICE_ID = 'fake_service_id'
-FAKE_SERVICE_NAME = 'fake_service_name'
-FAKE_TYPE = 'fake_type'
-FAKE_URL = 'http://127.0.0.1:12345'
-FAKE_URL_INVALID = 'http://127.0.0.1:23456'
-FAKE_RESOURCES = [{'name': 'res1'}, {'name': 'res2'}]
-
-
-class _List(object):
- def __init__(self, eles):
- self.eles = eles
-
- def list(self):
- return self.eles
-
-
-class _Dict(object):
- def __init__(self, ele):
- self.ele = ele
-
- def to_dict(self):
- return self.ele
-
-
-class FakeKeystoneClient(object):
- def __init__(self, **kwargs):
- _services = kwargs['services']
- _endpoints = kwargs['endpoints']
- self.services = _List([_Dict(_service) for _service in _services])
- self.endpoints = _List([_Dict(_endpoint) for _endpoint in _endpoints])
-
-
-class FakeException(Exception):
- pass
-
-
-class FakeClient(object):
- def __init__(self, url):
- self.endpoint = url
-
- def list_fake_res(self, search_opts):
- # make sure endpoint is correctly set
- if self.endpoint != FAKE_URL:
- raise FakeException()
- if not search_opts:
- return [res for res in FAKE_RESOURCES]
- else:
- return [res for res in FAKE_RESOURCES if (
- res['name'] == search_opts['name'])]
-
- def create_fake_res(self, name):
- if self.endpoint != FAKE_URL:
- raise FakeException()
- FAKE_RESOURCES.append({'name': name})
- return {'name': name}
-
- def delete_fake_res(self, name):
- if self.endpoint != FAKE_URL:
- raise FakeException()
- try:
- FAKE_RESOURCES.remove({'name': name})
- except ValueError:
- pass
-
- def action_fake_res(self, name, rename):
- if self.endpoint != FAKE_URL:
- raise FakeException()
- for res in FAKE_RESOURCES:
- if res['name'] == name:
- res['name'] = rename
- break
-
-
-class FakeResHandle(resource_handle.ResourceHandle):
- def _get_client(self, cxt):
- return FakeClient(self.endpoint_url)
-
- def handle_list(self, cxt, resource, filters):
- try:
- cli = self._get_client(cxt)
- return cli.list_fake_res(
- resource_handle._transform_filters(filters))
- except FakeException:
- raise exceptions.EndpointNotAvailable(FAKE_TYPE, cli.endpoint)
-
- def handle_create(self, cxt, resource, name):
- try:
- cli = self._get_client(cxt)
- return cli.create_fake_res(name)
- except FakeException:
- raise exceptions.EndpointNotAvailable(FAKE_TYPE, cli.endpoint)
-
- def handle_delete(self, cxt, resource, name):
- try:
- cli = self._get_client(cxt)
- cli.delete_fake_res(name)
- except FakeException:
- raise exceptions.EndpointNotAvailable(FAKE_TYPE, cli.endpoint)
-
- def handle_action(self, cxt, resource, action, name, rename):
- try:
- cli = self._get_client(cxt)
- cli.action_fake_res(name, rename)
- except FakeException:
- raise exceptions.EndpointNotAvailable(FAKE_TYPE, cli.endpoint)
-
-
-class ClientTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- # enforce foreign key constraint for sqlite
- core.get_engine().execute('pragma foreign_keys=on')
- self.context = context.Context()
-
- pod_dict = {
- 'pod_id': FAKE_SITE_ID,
- 'region_name': FAKE_SITE_NAME,
- 'az_name': FAKE_AZ
- }
- config_dict = {
- 'service_id': FAKE_SERVICE_ID,
- 'pod_id': FAKE_SITE_ID,
- 'service_type': FAKE_TYPE,
- 'service_url': FAKE_URL
- }
- api.create_pod(self.context, pod_dict)
- api.create_cached_endpoints(self.context, config_dict)
-
- global FAKE_RESOURCES
- FAKE_RESOURCES = [{'name': 'res1'}, {'name': 'res2'}]
-
- cfg.CONF.set_override(name='top_region_name', override=FAKE_SITE_NAME,
- group='client')
- self.client = client.Client()
- self.client.resource_service_map[FAKE_RESOURCE] = FAKE_TYPE
- self.client.operation_resources_map['list'].add(FAKE_RESOURCE)
- self.client.operation_resources_map['create'].add(FAKE_RESOURCE)
- self.client.operation_resources_map['delete'].add(FAKE_RESOURCE)
- self.client.operation_resources_map['action'].add(FAKE_RESOURCE)
- self.client.service_handle_map[FAKE_TYPE] = FakeResHandle(None)
-
- def test_list(self):
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [])
- self.assertEqual(resources, [{'name': 'res1'}, {'name': 'res2'}])
-
- def test_list_with_filters(self):
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [{'key': 'name',
- 'comparator': 'eq',
- 'value': 'res2'}])
- self.assertEqual(resources, [{'name': 'res2'}])
-
- def test_create(self):
- resource = self.client.create_resources(FAKE_RESOURCE, self.context,
- 'res3')
- self.assertEqual(resource, {'name': 'res3'})
- resources = self.client.list_resources(FAKE_RESOURCE, self.context)
- self.assertEqual(resources, [{'name': 'res1'}, {'name': 'res2'},
- {'name': 'res3'}])
-
- def test_delete(self):
- self.client.delete_resources(FAKE_RESOURCE, self.context, 'res1')
- resources = self.client.list_resources(FAKE_RESOURCE, self.context)
- self.assertEqual(resources, [{'name': 'res2'}])
-
- def test_action(self):
- self.client.action_resources(FAKE_RESOURCE, self.context,
- 'rename', 'res1', 'res3')
- resources = self.client.list_resources(FAKE_RESOURCE, self.context)
- self.assertEqual(resources, [{'name': 'res3'}, {'name': 'res2'}])
-
- def test_list_create_endpoint_not_found(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=False,
- group='client')
- # delete the configuration so endpoint cannot be found
- api.delete_cached_endpoints(self.context, FAKE_SERVICE_ID)
- resources = self.client.list_resources(FAKE_RESOURCE, self.context)
- # list_resources returns [] by default
- self.assertEqual(resources, [])
- resource = self.client.create_resources(FAKE_RESOURCE, self.context,
- 'res3')
- # create_resources returns None by default
- self.assertIsNone(resource)
-
- def test_resource_not_supported(self):
- # no such resource
- self.assertRaises(exceptions.ResourceNotSupported,
- self.client.list_resources,
- 'no_such_resource', self.context, [])
- # remove "create" entry for FAKE_RESOURCE
- self.client.operation_resources_map['create'].remove(FAKE_RESOURCE)
- # operation not supported
- self.assertRaises(exceptions.ResourceNotSupported,
- self.client.create_resources,
- FAKE_RESOURCE, self.context, [])
-
- def test_list_endpoint_not_found(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=False,
- group='client')
- # delete the configuration so endpoint cannot be found
- api.delete_cached_endpoints(self.context, FAKE_SERVICE_ID)
-
- # list returns empty list when endpoint not found
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [])
- self.assertEqual(resources, [])
-
- def test_list_endpoint_not_found_retry(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=True,
- group='client')
- # delete the configuration so endpoint cannot be found
- api.delete_cached_endpoints(self.context, FAKE_SERVICE_ID)
-
- self.client._get_admin_token = mock.Mock()
- self.client._get_admin_project_id = mock.Mock()
- self.client._get_endpoint_from_keystone = mock.Mock()
-
- self.client._get_endpoint_from_keystone.return_value = {}
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [])
- # retry but endpoint still not found
- self.assertEqual(resources, [])
-
- self.client._get_endpoint_from_keystone.return_value = {
- FAKE_SITE_NAME: {FAKE_TYPE: FAKE_URL}
- }
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [])
- self.assertEqual(resources, [{'name': 'res1'}, {'name': 'res2'}])
-
- def test_list_endpoint_not_valid(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=False,
- group='client')
- update_dict = {'service_url': FAKE_URL_INVALID}
- # update url to an invalid one
- api.update_cached_endpoints(self.context,
- FAKE_SERVICE_ID,
- update_dict)
-
- # auto refresh set to False, directly raise exception
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.client.list_resources,
- FAKE_RESOURCE, self.context, [])
-
- def test_list_endpoint_not_valid_retry(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=True,
- group='client')
- update_dict = {'service_url': FAKE_URL_INVALID}
- # update url to an invalid one
- api.update_cached_endpoints(self.context,
- FAKE_SERVICE_ID,
- update_dict)
-
- self.client._get_admin_token = mock.Mock()
- self.client._get_admin_project_id = mock.Mock()
- self.client._get_endpoint_from_keystone = mock.Mock()
- self.client._get_endpoint_from_keystone.return_value = {}
- # retry but still endpoint not updated
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.client.list_resources,
- FAKE_RESOURCE, self.context, [])
-
- self.client._get_endpoint_from_keystone.return_value = {
- FAKE_SITE_NAME: {FAKE_TYPE: FAKE_URL}
- }
- resources = self.client.list_resources(
- FAKE_RESOURCE, self.context, [])
- self.assertEqual(resources, [{'name': 'res1'}, {'name': 'res2'}])
-
- @patch.object(k_client, 'Client')
- def test_get_endpoint_from_keystone(self, mock_client):
- services = [{'id': FAKE_SERVICE_ID,
- 'name': FAKE_SERVICE_NAME},
- {'id': 'another_fake_service_id',
- 'name': 'another_fake_service_name'}]
- endpoints = [{'interface': 'public',
- 'region': FAKE_SITE_NAME,
- 'service_id': FAKE_SERVICE_ID,
- 'url': FAKE_URL},
- {'interface': 'admin',
- 'region': FAKE_SITE_NAME,
- 'service_id': FAKE_SERVICE_ID,
- 'url': FAKE_URL_INVALID}]
- mock_client.return_value = FakeKeystoneClient(services=services,
- endpoints=endpoints)
- endpoint_map = self.client._get_endpoint_from_keystone(self.context)
- # only public endpoint is saved
- self.assertEqual(endpoint_map,
- {FAKE_SITE_NAME: {FAKE_SERVICE_NAME: FAKE_URL}})
-
- @patch.object(uuid, 'uuid4')
- @patch.object(api, 'create_cached_endpoints')
- @patch.object(api, 'update_cached_endpoints')
- def test_update_endpoint_from_keystone(self, update_mock, create_mock,
- uuid_mock):
- self.client._get_admin_token = mock.Mock()
- self.client._get_endpoint_from_keystone = mock.Mock()
- self.client._get_endpoint_from_keystone.return_value = {
- FAKE_SITE_NAME: {FAKE_TYPE: FAKE_URL,
- 'another_fake_type': 'http://127.0.0.1:34567'},
- 'not_registered_pod': {FAKE_TYPE: FAKE_URL}
- }
- uuid_mock.return_value = 'another_fake_service_id'
-
- self.client.update_endpoint_from_keystone(self.context)
- update_dict = {'service_url': FAKE_URL}
- create_dict = {'service_id': 'another_fake_service_id',
- 'pod_id': FAKE_SITE_ID,
- 'service_type': 'another_fake_type',
- 'service_url': 'http://127.0.0.1:34567'}
- # not registered pod is skipped
- update_mock.assert_called_once_with(
- self.context, FAKE_SERVICE_ID, update_dict)
- create_mock.assert_called_once_with(self.context, create_dict)
-
- def test_get_endpoint(self):
- cfg.CONF.set_override(name='auto_refresh_endpoint', override=False,
- group='client')
- url = self.client.get_endpoint(self.context, FAKE_SITE_ID, FAKE_TYPE)
- self.assertEqual(url, FAKE_URL)
-
- @patch.object(FakeClient, 'list_fake_res')
- def test_resource_handle_endpoint_unavailable(self, mock_list):
- handle = FakeResHandle(None)
- handle.endpoint_url = FAKE_URL
- mock_list.side_effect = FakeException
- self.assertRaises(exceptions.EndpointNotAvailable,
- handle.handle_list, self.context, FAKE_RESOURCE, [])
- # endpoint_url will not be set to None
- self.assertIsNotNone(handle.endpoint_url)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
diff --git a/tricircle/tests/unit/common/test_exception.py b/tricircle/tests/unit/common/test_exception.py
deleted file mode 100644
index 99be2ce7..00000000
--- a/tricircle/tests/unit/common/test_exception.py
+++ /dev/null
@@ -1,114 +0,0 @@
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-import unittest
-
-from tricircle.common import exceptions
-
-
-class TricircleExceptionTestCase(unittest.TestCase):
- def test_default_error_msg(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = "default message"
-
- exc = FakeTricircleException()
- self.assertEqual('default message', six.text_type(exc))
-
- def test_error_msg(self):
- self.assertEqual('test',
- six.text_type(exceptions.TricircleException('test')))
-
- def test_default_error_msg_with_kwargs(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = "default message: %(code)s"
-
- exc = FakeTricircleException(code=500)
- self.assertEqual('default message: 500', six.text_type(exc))
-
- def test_error_msg_exception_with_kwargs(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = "default message: %(misspelled_code)s"
-
- exc = FakeTricircleException(code=500)
- self.assertEqual('default message: %(misspelled_code)s',
- six.text_type(exc))
-
- def test_default_error_code(self):
- class FakeTricircleException(exceptions.TricircleException):
- code = 404
-
- exc = FakeTricircleException()
- self.assertEqual(404, exc.kwargs['code'])
-
- def test_error_code_from_kwarg(self):
- class FakeTricircleException(exceptions.TricircleException):
- code = 500
-
- exc = FakeTricircleException(code=404)
- self.assertEqual(404, exc.kwargs['code'])
-
- def test_error_msg_is_exception_to_string(self):
- msg = 'test message'
- exc1 = Exception(msg)
- exc2 = exceptions.TricircleException(exc1)
- self.assertEqual(msg, exc2.msg)
-
- def test_exception_kwargs_to_string(self):
- msg = 'test message'
- exc1 = Exception(msg)
- exc2 = exceptions.TricircleException(kwarg1=exc1)
- self.assertEqual(msg, exc2.kwargs['kwarg1'])
-
- def test_message_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = 'FakeCinderException: %(message)s'
-
- exc = FakeTricircleException(message='message')
- self.assertEqual('FakeCinderException: message', six.text_type(exc))
-
- def test_message_and_kwarg_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = 'Error %(code)d: %(message)s'
-
- exc = FakeTricircleException(message='message', code=404)
- self.assertEqual('Error 404: message', six.text_type(exc))
-
- def test_message_is_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = 'Exception: %(message)s'
-
- msg = 'test message'
- exc1 = Exception(msg)
- exc2 = FakeTricircleException(message=exc1)
- self.assertEqual('Exception: test message', six.text_type(exc2))
-
- def test_no_message_input_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = 'Error: %(message)s'
-
- exc = FakeTricircleException()
- out_message = six.text_type(exc)
- self.assertEqual('Error: None', out_message)
-
- def test_no_kwarg_input_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
- message = 'No Kwarg Error: %(why)s, %(reason)s'
-
- exc = FakeTricircleException(why='why')
- out_message = six.text_type(exc)
- self.assertEqual('No Kwarg Error: %(why)s, %(reason)s', out_message)
diff --git a/tricircle/tests/unit/common/test_httpclient.py b/tricircle/tests/unit/common/test_httpclient.py
deleted file mode 100644
index ea231104..00000000
--- a/tricircle/tests/unit/common/test_httpclient.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from mock import patch
-
-import unittest
-
-from tricircle.common import constants as cons
-from tricircle.common import context
-from tricircle.common import httpclient as hclient
-
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-
-
-def fake_get_pod_service_endpoint(ctx, region_name, st):
-
- pod = api.get_pod_by_name(ctx, region_name)
- if pod:
- f = [{'key': 'pod_id', 'comparator': 'eq',
- 'value': pod['pod_id']},
- {'key': 'service_type', 'comparator': 'eq',
- 'value': st}]
- pod_services = api.list_cached_endpoints(
- ctx,
- filters=f,
- sorts=[])
-
- if len(pod_services) != 1:
- return ''
-
- return pod_services[0]['service_url']
-
- return ''
-
-
-class HttpClientTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- # enforce foreign key constraint for sqlite
- core.get_engine().execute('pragma foreign_keys=on')
- self.context = context.Context()
-
- def test_get_version_from_url(self):
- url = 'http://127.0.0.1:9696/v2.0/networks'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'v2.0')
-
- url = 'http://127.0.0.1:9696/v2.0/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'v2.0')
-
- url = 'http://127.0.0.1:9696/v2.0/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'v2.0')
-
- url = 'https://127.0.0.1:9696/v2.0/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'v2.0')
-
- url = 'https://127.0.0.1/v2.0/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'v2.0')
-
- url = 'https://127.0.0.1/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, '')
-
- url = 'https://127.0.0.1/sss/'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'sss')
-
- url = ''
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, '')
-
- url = 'sss/networks'
- ver = hclient.get_version_from_url(url)
- self.assertEqual(ver, 'sss')
-
- def test_get_bottom_url(self):
- b_endpoint = 'http://127.0.0.1:9696/v2.0/networks'
- t_url = 'http://127.0.0.1:9696/v2.0/networks'
- t_ver = hclient.get_version_from_url(t_url)
- b_ver = hclient.get_version_from_url(b_endpoint)
-
- self.assertEqual(t_ver, 'v2.0')
- self.assertEqual(b_ver, 'v2.0')
-
- t_url_1 = 'http://127.0.0.1:9696/sss/v2.0/networks'
- b_url = hclient.get_bottom_url(t_ver, t_url_1, b_ver, b_endpoint)
- self.assertEqual(b_url, '')
-
- t_url_1 = 'v2.0/networks'
- b_url = hclient.get_bottom_url(t_ver, t_url_1, b_ver, b_endpoint)
- self.assertEqual(b_url, 'http://127.0.0.1:9696/v2.0/networks')
-
- b_url = hclient.get_bottom_url(t_ver, t_url, '', b_endpoint)
- self.assertEqual(b_url, 'http://127.0.0.1:9696/networks')
-
- t_url_1 = 'http://127.0.0.1:9696/v2.0/networks?qqq=123&sss=456'
- b_url = hclient.get_bottom_url(t_ver, t_url_1, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks?qqq=123&sss=456')
-
- t_url_1 = 'http://127.0.0.1:9696/v2.0/networks?' \
- 'qqq=123&availability_zone=456'
- b_url = hclient.get_bottom_url(t_ver, t_url_1, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks?qqq=123')
-
- b_url = hclient.get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks')
-
- b_endpoint = 'http://127.0.0.1:9696/v2.0'
- b_ver = hclient.get_version_from_url(b_endpoint)
- self.assertEqual(b_ver, 'v2.0')
-
- b_url = hclient.get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks')
-
- b_endpoint = 'http://127.0.0.1:9696/v2.0/'
- b_ver = hclient.get_version_from_url(b_endpoint)
- self.assertEqual(b_ver, 'v2.0')
-
- b_url = hclient.get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks')
-
- b_endpoint = 'http://127.0.0.1:9696/v2.0/'
- b_ver = hclient.get_version_from_url(b_endpoint)
- self.assertEqual(b_ver, 'v2.0')
-
- b_url = hclient.get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
- self.assertEqual(b_url,
- 'http://127.0.0.1:9696/v2.0/networks')
-
- @patch.object(hclient, 'get_pod_service_endpoint',
- new=fake_get_pod_service_endpoint)
- def test_get_pod_service_ctx(self):
- pod_dict = {
- 'pod_id': 'fake_pod_id',
- 'region_name': 'fake_region_name',
- 'az_name': 'fake_az'
- }
-
- config_dict = {
- 'service_id': 'fake_service_id',
- 'pod_id': 'fake_pod_id',
- 'service_type': cons.ST_NEUTRON,
- 'service_url': 'http://127.0.0.1:9696/v2.0/networks'
- }
- t_url = 'http://127.0.0.1:9696/v2.0/networks'
- api.create_pod(self.context, pod_dict)
- api.create_cached_endpoints(self.context, config_dict)
-
- b_url = 'http://127.0.0.1:9696/v2.0/networks'
-
- b_endpoint = hclient.get_pod_service_endpoint(self.context,
- pod_dict['region_name'],
- cons.ST_NEUTRON)
- self.assertEqual(b_endpoint, config_dict['service_url'])
-
- b_ctx = hclient.get_pod_service_ctx(self.context,
- t_url,
- pod_dict['region_name'],
- cons.ST_NEUTRON)
- self.assertEqual(b_ctx['t_ver'], 'v2.0')
- self.assertEqual(b_ctx['t_url'], t_url)
- self.assertEqual(b_ctx['b_ver'], 'v2.0')
- self.assertEqual(b_ctx['b_url'], b_url)
-
- # wrong pod name
- b_ctx = hclient.get_pod_service_ctx(self.context,
- t_url,
- pod_dict['region_name'] + '1',
- cons.ST_NEUTRON)
- self.assertEqual(b_ctx['t_ver'], 'v2.0')
- self.assertEqual(b_ctx['t_url'], t_url)
- self.assertEqual(b_ctx['b_ver'], '')
- self.assertEqual(b_ctx['b_url'], '')
-
- # wrong service_type
- b_ctx = hclient.get_pod_service_ctx(self.context,
- t_url,
- pod_dict['region_name'],
- cons.ST_NEUTRON + '1')
- self.assertEqual(b_ctx['t_ver'], 'v2.0')
- self.assertEqual(b_ctx['t_url'], t_url)
- self.assertEqual(b_ctx['b_ver'], '')
- self.assertEqual(b_ctx['b_url'], '')
-
- @patch.object(hclient, 'get_pod_service_endpoint',
- new=fake_get_pod_service_endpoint)
- def test_get_pod_and_endpoint_by_name(self):
- pod_dict = {
- 'pod_id': 'fake_pod_id',
- 'region_name': 'fake_region_name',
- 'az_name': 'fake_az'
- }
- api.create_pod(self.context, pod_dict)
-
- pod = api.get_pod_by_name(self.context, pod_dict['region_name'] + '1')
- self.assertIsNone(pod)
-
- pod = api.get_pod_by_name(self.context, pod_dict['region_name'])
- self.assertEqual(pod['pod_id'], pod_dict['pod_id'])
- self.assertEqual(pod['region_name'], pod_dict['region_name'])
- self.assertEqual(pod['az_name'], pod_dict['az_name'])
-
- config_dict = {
- 'service_id': 'fake_service_id',
- 'pod_id': 'fake_pod_id',
- 'service_type': cons.ST_NEUTRON,
- 'service_url': 'http://127.0.0.1:9696/v2.0/networks'
- }
- api.create_cached_endpoints(self.context, config_dict)
-
- endpoint = hclient.get_pod_service_endpoint(
- self.context,
- pod_dict['region_name'],
- config_dict['service_type'])
- self.assertEqual(endpoint, config_dict['service_url'])
-
- endpoint = hclient.get_pod_service_endpoint(
- self.context,
- 'x_region_name',
- config_dict['service_type'])
- self.assertEqual(endpoint, '')
-
- def test_get_res_routing_ref(self):
- t_url = 'http://127.0.0.1:9696/v2.0/networks'
-
- self.assertIsNone(hclient.get_res_routing_ref(
- self.context, 'fake_pod_id', t_url, s_type=cons.ST_NEUTRON))
-
- pod_dict = {
- 'pod_id': 'fake_pod_id',
- 'region_name': 'fake_region_name',
- 'az_name': 'fake_az'
- }
- api.create_pod(self.context, pod_dict)
- routes = [
- {
- 'top_id': 'top_id',
- 'bottom_id': 'bottom_id',
- 'pod_id': 'fake_pod_id',
- 'project_id': 'test_project_id',
- 'resource_type': 'network'
- },
- ]
-
- with self.context.session.begin():
- for route in routes:
- core.create_resource(
- self.context, models.ResourceRouting, route)
-
- config_dict = {
- 'service_id': 'fake_service_id',
- 'pod_id': 'fake_pod_id',
- 'service_type': cons.ST_NEUTRON,
- 'service_url': 'http://127.0.0.1:9696/v2.0/networks'
- }
- api.create_cached_endpoints(self.context, config_dict)
-
- s_ctx = {'t_ver': 'v2.0', 'b_ver': 'v2.0',
- 't_url': t_url, 'b_url': t_url}
- self.assertEqual(s_ctx, hclient.get_res_routing_ref(
- self.context, 'top_id', t_url, s_type=cons.ST_NEUTRON))
-
- def test_convert_header(self):
- header = {'header1': 'aaa', 'header2': 'bbb'}
- self.assertEqual(header,
- hclient.convert_header('v1.0', 'v1.0', header))
-
- header = {'header1': 'aaa', 'header2': None}
- except_header = {'header1': 'aaa'}
- self.assertEqual(except_header,
- hclient.convert_header('v1.0', 'v1.0', header))
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
diff --git a/tricircle/tests/unit/common/test_lock_handle.py b/tricircle/tests/unit/common/test_lock_handle.py
deleted file mode 100644
index 0c9c53c7..00000000
--- a/tricircle/tests/unit/common/test_lock_handle.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-import neutronclient.common.exceptions as q_exceptions
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import lock_handle
-from tricircle.db import api
-from tricircle.db import core
-
-
-RES = []
-
-
-def list_resource(t_ctx, q_ctx, pod, ele, _type):
- for res in RES:
- if res['name'] == ele['id']:
- return [res]
- return []
-
-
-def create_resource(t_ctx, q_ctx, pod, body, _type):
- body['id'] = uuidutils.generate_uuid()
- RES.append(body)
- return body
-
-
-class LockHandleTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.project_id = 'project_id'
- self.t_ctx = context.Context()
- self.q_ctx = object()
-
- def _prepare_pod(self):
- return api.create_pod(self.t_ctx, {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'})
-
- def test_get_create_element_new(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- is_new, b_resource_id = lock_handle.get_or_create_element(
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
- self.assertTrue(is_new)
- self.assertEqual(b_resource_id, RES[0]['id'])
-
- def test_get_create_element_routing_valid(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- lock_handle.get_or_create_element(
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
- is_new, b_resource_id = lock_handle.get_or_create_element(
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
- self.assertFalse(is_new)
- self.assertEqual(b_resource_id, RES[0]['id'])
-
- def test_get_create_element_routing_expire_resource_exist(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
- pod['pod_id'], self.project_id,
- _type)
- api.update_resource_routing(self.t_ctx, routing['id'],
- {'created_at': constants.expire_time,
- 'updated_at': constants.expire_time})
-
- RES.append({'id': uuidutils.generate_uuid(),
- 'name': resource_id})
- is_new, b_resource_id = lock_handle.get_or_create_element(
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
- self.assertTrue(is_new)
- self.assertEqual(b_resource_id, RES[0]['id'])
-
- def test_get_create_element_routing_expire_resource_missing(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
- pod['pod_id'], self.project_id,
- _type)
- api.update_resource_routing(self.t_ctx, routing['id'],
- {'created_at': constants.expire_time,
- 'updated_at': constants.expire_time})
-
- is_new, b_resource_id = lock_handle.get_or_create_element(
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
- self.assertTrue(is_new)
- self.assertEqual(b_resource_id, RES[0]['id'])
-
- def test_get_create_element_routing_conflict(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- api.create_resource_mapping(self.t_ctx, resource_id, None,
- pod['pod_id'], self.project_id, _type)
- self.assertRaises(
- exceptions.RoutingCreateFail, lock_handle.get_or_create_element,
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, create_resource)
-
- def test_get_create_element_create_fail(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
-
- def fake_create_resource(t_ctx, q_ctx, pod, body, _type):
- raise q_exceptions.ConnectionFailed()
-
- self.assertRaises(
- q_exceptions.ConnectionFailed, lock_handle.get_or_create_element,
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- list_resource, fake_create_resource)
- routing = api.get_bottom_id_by_top_id_region_name(
- self.t_ctx, resource_id, pod['region_name'], _type)
- self.assertIsNone(routing)
-
- def test_get_list_element_create_fail(self):
- pod = self._prepare_pod()
- resource_id = 'fake_resource_id'
- _type = 'fake_resource'
- ele = {'id': resource_id}
- body = {'name': resource_id}
- routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
- pod['pod_id'], self.project_id,
- _type)
- api.update_resource_routing(self.t_ctx, routing['id'],
- {'created_at': constants.expire_time,
- 'updated_at': constants.expire_time})
-
- def fake_list_resource(t_ctx, q_ctx, pod, body, _type):
- raise q_exceptions.ConnectionFailed()
-
- self.assertRaises(
- q_exceptions.ConnectionFailed, lock_handle.get_or_create_element,
- self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
- fake_list_resource, create_resource)
- # the original routing is not deleted
- routing = api.get_resource_routing(self.t_ctx, routing['id'])
- self.assertIsNone(routing['bottom_id'])
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- del RES[:]
diff --git a/tricircle/tests/unit/common/test_policy.py b/tricircle/tests/unit/common/test_policy.py
deleted file mode 100644
index 91c047bb..00000000
--- a/tricircle/tests/unit/common/test_policy.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2016 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-from oslo_policy import policy as oslo_policy
-
-from tricircle.common import context
-from tricircle.common import policy
-
-
-class PolicyTestCase(unittest.TestCase):
- def setUp(self):
- super(PolicyTestCase, self).setUp()
- rules = oslo_policy.Rules.from_dict({
- "true": '@',
- "example:allowed": '@',
- "example:denied": "!",
- "example:my_file": "role:admin or "
- "project_id:%(project_id)s",
- "example:early_and_fail": "! and @",
- "example:early_or_success": "@ or !",
- "example:lowercase_admin": "role:admin or role:sysadmin",
- "example:uppercase_admin": "role:ADMIN or role:sysadmin",
- })
- policy.reset()
- policy.init()
- policy.set_rules(rules)
- self.context = context.Context(user_id='fake',
- tenant_id='fake',
- roles=['member'])
- self.target = None
-
- def test_enforce_nonexistent_action_throws(self):
- action = "example:non_exist"
- result = policy.enforce(self.context, action, self.target)
- self.assertFalse(result)
-
- def test_enforce_bad_action_throws(self):
- action = "example:denied"
- result = policy.enforce(self.context, action, self.target)
- self.assertFalse(result)
-
- def test_enforce_good_action(self):
- action = "example:allowed"
- result = policy.enforce(self.context, action, self.target)
- self.assertTrue(result)
-
- def test_templatized_enforcement(self):
- target_mine = {'project_id': 'fake'}
- target_not_mine = {'project_id': 'another'}
- action = "example:my_file"
- result = policy.enforce(self.context, action, target_mine)
- self.assertTrue(result)
- result = policy.enforce(self.context, action, target_not_mine)
- self.assertFalse(result)
-
- def test_early_AND_enforcement(self):
- action = "example:early_and_fail"
- result = policy.enforce(self.context, action, self.target)
- self.assertFalse(result)
-
- def test_early_OR_enforcement(self):
- action = "example:early_or_success"
- result = policy.enforce(self.context, action, self.target)
- self.assertTrue(result)
-
- def test_ignore_case_role_check(self):
- lowercase_action = "example:lowercase_admin"
- uppercase_action = "example:uppercase_admin"
- admin_context = context.Context(user_id='fake',
- tenant_id='fake',
- roles=['AdMiN'])
- result = policy.enforce(admin_context, lowercase_action, self.target)
- self.assertTrue(result)
- result = policy.enforce(admin_context, uppercase_action, self.target)
- self.assertTrue(result)
-
-
-class DefaultPolicyTestCase(unittest.TestCase):
-
- def setUp(self):
- super(DefaultPolicyTestCase, self).setUp()
-
- self.rules = oslo_policy.Rules.from_dict({
- "default": '',
- "example:exist": "!",
- })
-
- self._set_rules('default')
-
- self.context = context.Context(user_id='fake',
- tenant_id='fake')
-
- def _set_rules(self, default_rule):
- policy.reset()
- policy.init(rules=self.rules, default_rule=default_rule,
- use_conf=False)
-
- def test_policy_called(self):
- result = policy.enforce(self.context, "example:exist", {})
- self.assertFalse(result)
-
- def test_not_found_policy_calls_default(self):
- result = policy.enforce(self.context, "example:noexist", {})
- self.assertTrue(result)
-
- def test_default_not_found(self):
- self._set_rules("default_noexist")
- result = policy.enforce(self.context, "example:noexist", {})
- self.assertFalse(result)
diff --git a/tricircle/tests/unit/common/test_resource_handle.py b/tricircle/tests/unit/common/test_resource_handle.py
deleted file mode 100644
index 38ac6d45..00000000
--- a/tricircle/tests/unit/common/test_resource_handle.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from mock import patch
-import unittest
-
-import neutronclient.common.exceptions as q_exceptions
-
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import resource_handle
-
-
-class FakeHttpClient(object):
- endpoint_url = 'fake_endpoint_url'
-
-
-class FakeNeutronClient(object):
- def __init__(self):
- self.httpclient = FakeHttpClient()
-
- def create_network(self, body):
- pass
-
- def show_network(self, _id):
- pass
-
- def update_network(self, _id, body):
- pass
-
- def list_networks(self, **search_opts):
- pass
-
- def delete_network(self, _id):
- pass
-
- def remove_gateway_router(self, _id):
- pass
-
-
-class FakeResourceHandle(resource_handle.NeutronResourceHandle):
- def _get_client(self, cxt):
- return FakeNeutronClient()
-
-
-class ResourceHandleTest(unittest.TestCase):
- def setUp(self):
- self.context = context.Context()
- self.handle = FakeResourceHandle('fake_auth_url')
-
- @patch.object(FakeNeutronClient, 'create_network')
- def test_handle_create(self, mock_create):
- body = {'name': 'net1'}
- self.handle.handle_create(self.context, 'network', body)
- mock_create.assert_called_once_with(body)
-
- @patch.object(FakeNeutronClient, 'create_network')
- def test_handle_create_fail(self, mock_create):
- body = {'name': 'net1'}
- mock_create.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_create,
- self.context, 'network', body)
- self.assertIsNone(self.handle.endpoint_url)
-
- @patch.object(FakeNeutronClient, 'show_network')
- def test_handle_get(self, mock_get):
- fake_network_id = 'fake_network_id'
- self.handle.handle_get(self.context, 'network', fake_network_id)
- mock_get.assert_called_once_with(fake_network_id)
-
- @patch.object(FakeNeutronClient, 'show_network')
- def test_handle_get_fail(self, mock_get):
- fake_network_id = 'fake_network_id'
- mock_get.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_get,
- self.context, 'network', fake_network_id)
- self.assertIsNone(self.handle.endpoint_url)
- mock_get.side_effect = q_exceptions.NotFound
- ret = self.handle.handle_get(self.context, 'network', fake_network_id)
- self.assertIsNone(ret)
-
- @patch.object(FakeNeutronClient, 'update_network')
- def test_handle_update(self, mock_update):
- fake_network_id = 'fake_network_id'
- body = {'name': 'net2'}
- self.handle.handle_update(self.context, 'network',
- fake_network_id, body)
- mock_update.assert_called_once_with(fake_network_id, body)
-
- @patch.object(FakeNeutronClient, 'update_network')
- def test_handle_update_fail(self, mock_update):
- fake_network_id = 'fake_network_id'
- body = {'name': 'net2'}
- mock_update.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_update,
- self.context, 'network', fake_network_id, body)
- self.assertIsNone(self.handle.endpoint_url)
-
- @patch.object(FakeNeutronClient, 'list_networks')
- def test_handle_list(self, mock_list):
- self.handle.handle_list(self.context, 'network',
- [{'key': 'name', 'comparator': 'eq',
- 'value': 'net1'}])
- # resource_handle will transform the filter format
- mock_list.assert_called_once_with(name='net1')
-
- @patch.object(FakeNeutronClient, 'list_networks')
- def test_handle_list_fail(self, mock_list):
- mock_list.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_list, self.context, 'network',
- [{'key': 'name', 'comparator': 'eq',
- 'value': 'net1'}])
- self.assertIsNone(self.handle.endpoint_url)
-
- @patch.object(FakeNeutronClient, 'delete_network')
- def test_handle_delete(self, mock_delete):
- fake_network_id = 'fake_network_id'
- self.handle.handle_delete(self.context, 'network', fake_network_id)
- mock_delete.assert_called_once_with(fake_network_id)
-
- @patch.object(FakeNeutronClient, 'delete_network')
- def test_handle_delete_fail(self, mock_delete):
- fake_network_id = 'fake_network_id'
- mock_delete.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_delete,
- self.context, 'network', fake_network_id)
- self.assertIsNone(self.handle.endpoint_url)
- mock_delete.side_effect = q_exceptions.NotFound
- ret = self.handle.handle_delete(self.context, 'network',
- fake_network_id)
- self.assertIsNone(ret)
-
- @patch.object(FakeNeutronClient, 'remove_gateway_router')
- def test_handle_action(self, mock_action):
- fake_router_id = 'fake_router_id'
- self.handle.handle_action(self.context, 'router', 'remove_gateway',
- fake_router_id)
- mock_action.assert_called_once_with(fake_router_id)
-
- @patch.object(FakeNeutronClient, 'remove_gateway_router')
- def test_handle_action_fail(self, mock_action):
- fake_router_id = 'fake_router_id'
- mock_action.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(exceptions.EndpointNotAvailable,
- self.handle.handle_action, self.context, 'router',
- 'remove_gateway', fake_router_id)
- self.assertIsNone(self.handle.endpoint_url)
diff --git a/tricircle/tests/unit/common/test_utils.py b/tricircle/tests/unit/common/test_utils.py
deleted file mode 100644
index 809b2ff5..00000000
--- a/tricircle/tests/unit/common/test_utils.py
+++ /dev/null
@@ -1,105 +0,0 @@
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import pecan
-import unittest
-
-from oslo_config import cfg
-from tricircle.common import constants as cons
-from tricircle.common import exceptions
-from tricircle.common import utils
-
-
-class TricircleUtilsTestCase(unittest.TestCase):
- def test_bool_from_string(self):
- self.assertEqual(True, utils.bool_from_string('true'))
- self.assertEqual(False, utils.bool_from_string('false'))
- self.assertRaises(ValueError, utils.bool_from_string, 'a', strict=True)
- self.assertEqual(True, utils.bool_from_string('a', default=True))
-
- def test_check_string_length(self):
- self.assertIsNone(utils.check_string_length(
- 'test', 'name', max_len=255))
- self.assertRaises(exceptions.InvalidInput,
- utils.check_string_length,
- 11, 'name', max_len=255)
- self.assertRaises(exceptions.InvalidInput,
- utils.check_string_length,
- '', 'name', min_len=1)
- self.assertRaises(exceptions.InvalidInput,
- utils.check_string_length,
- 'a' * 256, 'name', max_len=255)
-
- def test_get_id_from_name(self):
- output = utils.get_id_from_name(
- cons.RT_NETWORK, 'name#77b0babc-f7e4-4c14-b250-1f18835a52c2')
- self.assertEqual('77b0babc-f7e4-4c14-b250-1f18835a52c2', output)
-
- output = utils.get_id_from_name(
- cons.RT_NETWORK, '77b0babc-f7e4-4c14-b250-1f18835a52c2')
- self.assertEqual('77b0babc-f7e4-4c14-b250-1f18835a52c2', output)
-
- output = utils.get_id_from_name(
- cons.RT_NETWORK, 'name@not_uuid')
- self.assertIsNone(output)
-
- output = utils.get_id_from_name(
- cons.RT_PORT, '77b0babc-f7e4-4c14-b250-1f18835a52c2')
- self.assertEqual('77b0babc-f7e4-4c14-b250-1f18835a52c2', output)
-
- output = utils.get_id_from_name(
- cons.RT_PORT, 'not_uuid')
- self.assertIsNone(output)
-
- @mock.patch.object(pecan, 'response')
- def test_format_error(self, mock_response):
- output = utils.format_error(401, 'this is error', 'MyError')
- self.assertEqual({'MyError': {
- 'message': 'this is error', 'code': 401
- }}, output)
-
- output = utils.format_error(400, 'this is error')
- self.assertEqual({'badRequest': {
- 'message': 'this is error', 'code': 400
- }}, output)
-
- output = utils.format_error(401, 'this is error')
- self.assertEqual({'Error': {
- 'message': 'this is error', 'code': 401
- }}, output)
-
- @mock.patch('tricircle.common.utils.format_error')
- def test_format_api_error(self, mock_format_error):
- output = utils.format_api_error(400, 'this is error')
- self.assertEqual(mock_format_error.return_value, output)
-
- @mock.patch('tricircle.common.utils.format_error')
- def test_format_nova_error(self, mock_format_error):
- output = utils.format_nova_error(400, 'this is error')
- self.assertEqual(mock_format_error.return_value, output)
-
- @mock.patch('tricircle.common.utils.format_error')
- def test_format_cinder_error(self, mock_format_error):
- output = utils.format_cinder_error(400, 'this is error')
- self.assertEqual(mock_format_error.return_value, output)
-
- def test_get_pagination_limit(self):
- setattr(cfg.CONF, 'pagination_max_limit', 1024)
- self.assertEqual(512, utils.get_pagination_limit(512))
- self.assertEqual(1024, utils.get_pagination_limit(2048))
- self.assertEqual(1024, utils.get_pagination_limit(-1))
diff --git a/tricircle/tests/unit/db/__init__.py b/tricircle/tests/unit/db/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/db/test_api.py b/tricircle/tests/unit/db/test_api.py
deleted file mode 100644
index d3b01c6d..00000000
--- a/tricircle/tests/unit/db/test_api.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from six.moves import xrange
-import unittest
-
-from tricircle.common import context
-from tricircle.common import exceptions
-
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-
-
-class APITest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.context = context.Context()
-
- def _create_pod(self, index, test_az_uuid):
- pod_body = {'pod_id': 'test_pod_uuid_%d' % index,
- 'region_name': 'test_pod_%d' % index,
- 'pod_az_name': 'test_pod_az_name_%d' % index,
- 'dc_name': 'test_dc_name_%d' % index,
- 'az_name': test_az_uuid,
- }
- api.create_pod(self.context, pod_body)
-
- def _create_resource_mappings(self):
- route1 = {
- 'top_id': 'top_uuid',
- 'pod_id': 'test_pod_uuid_0',
- 'resource_type': 'network'}
- route2 = {
- 'top_id': 'top_uuid',
- 'pod_id': 'test_pod_uuid_1',
- 'bottom_id': 'top_uuid',
- 'resource_type': 'network'}
- route3 = {
- 'top_id': 'top_uuid2',
- 'pod_id': 'test_pod_uuid_2',
- 'bottom_id': 'bottom_uuid_2',
- 'resource_type': 'port'}
-
- routes = [route1, route2, route3]
- with self.context.session.begin():
- for route in routes:
- core.create_resource(
- self.context, models.ResourceRouting, route)
-
- def test_get_bottom_id_by_top_id_region_name(self):
- self._create_pod(0, 'test_az_uuid_0')
- self._create_pod(1, 'test_az_uuid_1')
- self._create_pod(2, 'test_az_uuid_2')
- self._create_resource_mappings()
- region_name = 'test_pod_0'
- bottom_id = api.get_bottom_id_by_top_id_region_name(
- self.context, 'top_uuid', region_name, 'network')
- self.assertIsNone(bottom_id)
-
- region_name = 'test_pod_1'
- bottom_id = api.get_bottom_id_by_top_id_region_name(
- self.context, 'top_uuid', region_name, 'network')
- self.assertEqual(bottom_id, 'top_uuid')
-
- def test_get_bottom_mappings_by_top_id(self):
- self._create_pod(0, 'test_az_uuid_0')
- self._create_pod(1, 'test_az_uuid_1')
- self._create_pod(2, 'test_az_uuid_2')
- self._create_resource_mappings()
- mappings = api.get_bottom_mappings_by_top_id(self.context,
- 'top_uuid', 'network')
- self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
- self.assertEqual('top_uuid', mappings[0][1])
-
- def test_get_bottom_mappings_by_tenant_pod(self):
- for i in xrange(3):
- pod = {'pod_id': 'test_pod_uuid_%d' % i,
- 'region_name': 'test_pod_%d' % i,
- 'az_name': 'test_az_uuid_%d' % i}
- api.create_pod(self.context, pod)
- routes = [
- {
- 'route':
- {
- 'top_id': 'top_uuid',
- 'pod_id': 'test_pod_uuid_0',
- 'project_id': 'test_project_uuid_0',
- 'resource_type': 'port'
- },
- },
-
- {
- 'route':
- {
- 'top_id': 'top_uuid_0',
- 'bottom_id': 'top_uuid_0',
- 'pod_id': 'test_pod_uuid_0',
- 'project_id': 'test_project_uuid_0',
- 'resource_type': 'port'
- },
- },
-
- {
- 'route':
- {
- 'top_id': 'top_uuid_1',
- 'bottom_id': 'top_uuid_1',
- 'pod_id': 'test_pod_uuid_0',
- 'project_id': 'test_project_uuid_0',
- 'resource_type': 'port'
- },
- },
-
- {
- 'route':
- {
- 'top_id': 'top_uuid_2',
- 'bottom_id': 'top_uuid_2',
- 'pod_id': 'test_pod_uuid_0',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'port'
- },
- },
-
- {
- 'route':
- {
- 'top_id': 'top_uuid_3',
- 'bottom_id': 'top_uuid_3',
- 'pod_id': 'test_pod_uuid_1',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'port'
- },
- }
- ]
-
- with self.context.session.begin():
- for route in routes:
- core.create_resource(
- self.context, models.ResourceRouting, route['route'])
-
- routings = api.get_bottom_mappings_by_tenant_pod(
- self.context,
- 'test_project_uuid_0',
- 'test_pod_uuid_0',
- 'port'
- )
- self.assertEqual(len(routings), 2)
- self.assertEqual(routings['top_uuid_0']['top_id'], 'top_uuid_0')
- self.assertEqual(routings['top_uuid_1']['top_id'], 'top_uuid_1')
-
- routings = api.get_bottom_mappings_by_tenant_pod(
- self.context,
- 'test_project_uuid_1',
- 'test_pod_uuid_0',
- 'port'
- )
- self.assertEqual(len(routings), 1)
- self.assertEqual(routings['top_uuid_2']['top_id'], 'top_uuid_2')
- self.assertEqual(routings['top_uuid_2']['bottom_id'], 'top_uuid_2')
-
- routings = api.get_bottom_mappings_by_tenant_pod(
- self.context,
- 'test_project_uuid_1',
- 'test_pod_uuid_1',
- 'port'
- )
- self.assertEqual(len(routings), 1)
- self.assertEqual(routings['top_uuid_3']['top_id'], 'top_uuid_3')
- self.assertEqual(routings['top_uuid_3']['bottom_id'], 'top_uuid_3')
-
- def test_get_pod_by_top_id(self):
- self._create_pod(1, 'test_az_uuid1')
- self._create_pod(2, 'test_az_uuid2')
- routes = [
- {
- 'top_id': 'top_uuid_1',
- 'bottom_id': 'bottom_uuid_1',
- 'pod_id': 'test_pod_uuid_1',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'port'
- },
- {
- 'top_id': 'top_uuid_2',
- 'bottom_id': 'bottom_uuid_2-1',
- 'pod_id': 'test_pod_uuid_1',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'network'
- },
- {
- 'top_id': 'top_uuid_2',
- 'bottom_id': 'bottom_uuid_2-2',
- 'pod_id': 'test_pod_uuid_2',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'network'
- },
- {
- 'top_id': 'top_uuid_3',
- 'bottom_id': '',
- 'pod_id': 'test_pod_uuid_1',
- 'project_id': 'test_project_uuid_1',
- 'resource_type': 'port'
- }
- ]
-
- with self.context.session.begin():
- for route in routes:
- core.create_resource(
- self.context, models.ResourceRouting, route)
- pod = api.get_pod_by_top_id(self.context, 'top_uuid_1')
- self.assertEqual(pod['pod_id'], 'test_pod_uuid_1')
- pod = api.get_pod_by_top_id(self.context, 'top_uuid_2')
- # more than one routing entries found, method returns None
- self.assertIsNone(pod)
- pod = api.get_pod_by_top_id(self.context, 'top_uuid_3')
- # bottom_id is empty, method returns None
- self.assertIsNone(pod)
-
- def test_get_next_bottom_pod(self):
- next_pod = api.get_next_bottom_pod(self.context)
- self.assertIsNone(next_pod)
- pods = []
- for i in xrange(5):
- pod = {'pod_id': 'test_pod_uuid_%d' % i,
- 'region_name': 'test_pod_%d' % i,
- 'pod_az_name': 'test_pod_az_name_%d' % i,
- 'dc_name': 'test_dc_name_%d' % i,
- 'az_name': 'test_az_uuid_%d' % i,
- }
- api.create_pod(self.context, pod)
- pods.append(pod)
- next_pod = api.get_next_bottom_pod(self.context)
- self.assertEqual(next_pod, pods[0])
-
- next_pod = api.get_next_bottom_pod(
- self.context, current_pod_id='test_pod_uuid_2')
- self.assertEqual(next_pod, pods[3])
-
- next_pod = api.get_next_bottom_pod(
- self.context, current_pod_id='test_pod_uuid_4')
- self.assertIsNone(next_pod)
-
- def test_find_pod_by_az_or_region(self):
- self._create_pod(0, 'test_az_uuid1')
- self._create_pod(1, 'test_az_uuid1')
- self._create_pod(2, 'test_az_uuid2')
-
- az_region = None
- pod = api.find_pod_by_az_or_region(self.context, az_region)
- self.assertIsNone(pod)
-
- az_region = 'test_pod_3'
- self.assertRaises(exceptions.PodNotFound,
- api.find_pod_by_az_or_region,
- self.context, az_region)
-
- az_region = 'test_pod_0'
- pod = api.find_pod_by_az_or_region(self.context, az_region)
- self.assertEqual(pod['region_name'], az_region)
-
- az_region = 'test_az_uuid2'
- pod = api.find_pod_by_az_or_region(self.context, az_region)
- self.assertEqual(pod['az_name'], az_region)
-
- az_region = 'test_az_uuid1'
- self.assertRaises(exceptions.InvalidInput,
- api.find_pod_by_az_or_region,
- self.context, az_region)
-
- def test_get_top_pod(self):
- self._create_pod(0, '')
- self._create_pod(1, 'test_az_uuid1')
- self._create_pod(2, 'test_az_uuid2')
-
- pod = api.get_top_pod(self.context)
- self.assertEqual(pod['region_name'], 'test_pod_0')
- self.assertEqual(pod['az_name'], '')
-
- def test_get_pod_by_region(self):
- self._create_pod(0, 'test_az_uuid1')
- self._create_pod(1, 'test_az_uuid1')
- self._create_pod(2, 'test_az_uuid2')
-
- region_name = 'test_pod_3'
- pod = api.get_pod_by_name(self.context, region_name)
- self.assertIsNone(pod)
-
- region_name = 'test_pod_0'
- pod = api.get_pod_by_name(self.context, region_name)
- self.assertEqual(pod['region_name'], region_name)
-
- def test_get_pod(self):
- self._create_pod(0, 'test_az_uuid1')
- self._create_pod(1, 'test_az_uuid1')
- self._create_pod(2, 'test_az_uuid2')
-
- pod_id = 'test_pod_uuid_0'
- pod = api.get_pod(self.context, pod_id)
- self.assertEqual(pod['pod_id'], pod_id)
-
- pod_id = 'test_pod_uuid_3'
- self.assertRaises(
- exceptions.ResourceNotFound, api.get_pod, self.context, pod_id)
-
- def test_delete_mappings_by_bottom_id(self):
- self._create_pod(0, 'test_az_uuid_0')
- self._create_pod(1, 'test_az_uuid_1')
- self._create_pod(2, 'test_az_uuid_2')
- self._create_resource_mappings()
- bottom_id = 'bottom_uuid_1'
- api.delete_mappings_by_bottom_id(self.context, bottom_id)
-
- filters = [{'key': 'bottom_id', 'comparator': 'eq',
- 'value': bottom_id}]
- routing = core.query_resource(
- self.context, models.ResourceRouting, filters, [])
- self.assertEqual(len(routing), 0)
-
- def test_delete_mappings_by_top_id(self):
- self._create_pod(0, 'test_az_uuid_0')
- self._create_pod(1, 'test_az_uuid_1')
- self._create_pod(2, 'test_az_uuid_2')
- self._create_resource_mappings()
- top_id = 'top_uuid'
-
- api.delete_mappings_by_top_id(self.context, top_id,
- pod_id='test_pod_uuid_0')
- mappings = api.get_bottom_mappings_by_top_id(
- self.context, top_id, 'network')
- # entry in pod_uuid_0 is deleted, entry in pod_uuid_1 is left
- self.assertEqual(1, len(mappings))
- self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
-
- api.delete_mappings_by_top_id(self.context, top_id)
- mappings = api.get_bottom_mappings_by_top_id(
- self.context, top_id, 'network')
- self.assertEqual(0, len(mappings))
-
- def test_update_pod(self):
- self._create_pod(0, 'test_az_uuid_0')
- pod_body = {
- 'region_name': 'test_pod_1',
- 'pod_az_name': 'test_pod_az_name_1',
- 'dc_name': 'test_dc_name_1',
- 'az_name': 'test_az_uuid_1'}
-
- updated_pod = api.update_pod(self.context, 'test_pod_uuid_0', pod_body)
- self.assertEqual(updated_pod['region_name'], 'test_pod_1')
- self.assertEqual(updated_pod['pod_az_name'], 'test_pod_az_name_1')
- self.assertEqual(updated_pod['dc_name'], 'test_dc_name_1')
- self.assertEqual(updated_pod['az_name'], 'test_az_uuid_1')
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
diff --git a/tricircle/tests/unit/db/test_models.py b/tricircle/tests/unit/db/test_models.py
deleted file mode 100644
index be0d899d..00000000
--- a/tricircle/tests/unit/db/test_models.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import datetime
-import inspect
-import unittest
-
-import oslo_db.exception
-import sqlalchemy as sql
-
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-
-
-def _get_field_value(column):
- """Get field value for resource creating
-
- returning None indicates that not setting this field in resource dict
- """
- if column.nullable:
- # just skip nullable column
- return None
- if isinstance(column.type, sql.Text):
- return 'fake_text'
- elif isinstance(column.type, sql.Enum):
- return column.type.enums[0]
- elif isinstance(column.type, sql.String):
- return 'fake_str'
- elif isinstance(column.type, sql.Integer):
- return 1
- elif isinstance(column.type, sql.Float):
- return 1.0
- elif isinstance(column.type, sql.Boolean):
- return True
- elif isinstance(column.type, sql.DateTime):
- return datetime.datetime.utcnow()
- else:
- return None
-
-
-def _construct_resource_dict(resource_class):
- ret_dict = {}
- for field in inspect.getmembers(resource_class):
- if field[0] in resource_class.attributes:
- field_value = _get_field_value(field[1])
- if field_value is not None:
- ret_dict[field[0]] = field_value
- return ret_dict
-
-
-def _sort_model_by_foreign_key(resource_class_list):
- """Apply topology sorting to obey foreign key constraints"""
- relation_map = {}
- table_map = {}
- # {table: (set(depend_on_table), set(depended_by_table))}
- for resource_class in resource_class_list:
- table = resource_class.__tablename__
- if table not in relation_map:
- relation_map[table] = (set(), set())
- if table not in table_map:
- table_map[table] = resource_class
- for field in inspect.getmembers(resource_class):
- if field[0] in resource_class.attributes:
- f_keys = field[1].foreign_keys
- for f_key in f_keys:
- f_table = f_key.column.table.name
- # just skip self reference
- if table == f_table:
- continue
- relation_map[table][0].add(f_table)
- if f_table not in relation_map:
- relation_map[f_table] = (set(), set())
- relation_map[f_table][1].add(table)
-
- sorted_list = []
- total = len(relation_map)
-
- while len(sorted_list) < total:
- candidate_table = None
- for table in relation_map:
- # no depend-on table
- if not relation_map[table][0]:
- candidate_table = table
- sorted_list.append(candidate_table)
- for _table in relation_map[table][1]:
- relation_map[_table][0].remove(table)
- break
- del relation_map[candidate_table]
-
- return [table_map[table] for table in sorted_list]
-
-
-class ModelsTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.context = context.Context()
-
- def test_obj_to_dict(self):
- pod = {'pod_id': 'test_pod_uuid',
- 'region_name': 'test_pod',
- 'pod_az_name': 'test_pod_az_name',
- 'dc_name': 'test_dc_name',
- 'az_name': 'test_az_uuid'}
- pod_obj = models.Pod.from_dict(pod)
- for attr in pod_obj.attributes:
- self.assertEqual(getattr(pod_obj, attr), pod[attr])
-
- def test_create(self):
- pod = {'pod_id': 'test_pod_uuid',
- 'region_name': 'test_pod',
- 'pod_az_name': 'test_pod_az_name',
- 'dc_name': 'test_dc_name',
- 'az_name': 'test_az_uuid'}
- pod_ret = api.create_pod(self.context, pod)
- self.assertEqual(pod_ret, pod)
-
- configuration = {
- 'service_id': 'test_config_uuid',
- 'pod_id': 'test_pod_uuid',
- 'service_type': 'nova',
- 'service_url': 'http://test_url'
- }
- config_ret = api.create_cached_endpoints(self.context,
- configuration)
- self.assertEqual(config_ret, configuration)
-
- def test_update(self):
- pod = {'pod_id': 'test_pod_uuid',
- 'region_name': 'test_pod',
- 'az_name': 'test_az1_uuid'}
- api.create_pod(self.context, pod)
- update_dict = {'pod_id': 'fake_uuid',
- 'region_name': 'test_pod2',
- 'az_name': 'test_az2_uuid'}
- ret = api.update_pod(self.context, 'test_pod_uuid', update_dict)
- # primary key value will not be updated
- self.assertEqual(ret['pod_id'], 'test_pod_uuid')
- self.assertEqual(ret['region_name'], 'test_pod2')
- self.assertEqual(ret['az_name'], 'test_az2_uuid')
-
- def test_delete(self):
- pod = {'pod_id': 'test_pod_uuid',
- 'region_name': 'test_pod',
- 'az_name': 'test_az_uuid'}
- api.create_pod(self.context, pod)
- api.delete_pod(self.context, 'test_pod_uuid')
- self.assertRaises(exceptions.ResourceNotFound, api.get_pod,
- self.context, 'test_pod_uuid')
-
- def test_query(self):
- pod1 = {'pod_id': 'test_pod1_uuid',
- 'region_name': 'test_pod1',
- 'pod_az_name': 'test_pod_az_name1',
- 'dc_name': 'test_dc_name1',
- 'az_name': 'test_az1_uuid'}
- pod2 = {'pod_id': 'test_pod2_uuid',
- 'region_name': 'test_pod2',
- 'pod_az_name': 'test_pod_az_name2',
- 'dc_name': 'test_dc_name1',
- 'az_name': 'test_az2_uuid'}
- api.create_pod(self.context, pod1)
- api.create_pod(self.context, pod2)
- filters = [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': 'test_pod2'}]
- pods = api.list_pods(self.context, filters)
- self.assertEqual(len(pods), 1)
- self.assertEqual(pods[0], pod2)
- filters = [{'key': 'region_name',
- 'comparator': 'eq',
- 'value': 'test_pod3'}]
- pods = api.list_pods(self.context, filters)
- self.assertEqual(len(pods), 0)
-
- def test_sort(self):
- pod1 = {'pod_id': 'test_pod1_uuid',
- 'region_name': 'test_pod1',
- 'pod_az_name': 'test_pod_az_name1',
- 'dc_name': 'test_dc_name1',
- 'az_name': 'test_az1_uuid'}
- pod2 = {'pod_id': 'test_pod2_uuid',
- 'region_name': 'test_pod2',
- 'pod_az_name': 'test_pod_az_name2',
- 'dc_name': 'test_dc_name1',
- 'az_name': 'test_az2_uuid'}
- pod3 = {'pod_id': 'test_pod3_uuid',
- 'region_name': 'test_pod3',
- 'pod_az_name': 'test_pod_az_name3',
- 'dc_name': 'test_dc_name1',
- 'az_name': 'test_az3_uuid'}
- pods = [pod1, pod2, pod3]
- for pod in pods:
- api.create_pod(self.context, pod)
- pods = api.list_pods(self.context,
- sorts=[(models.Pod.pod_id, False)])
- self.assertEqual(pods, [pod3, pod2, pod1])
-
- def test_resources(self):
- """Create all the resources to test model definition"""
- try:
- model_list = []
- for _, model_class in inspect.getmembers(models):
- if inspect.isclass(model_class) and (
- issubclass(model_class, core.ModelBase)):
- model_list.append(model_class)
- for model_class in _sort_model_by_foreign_key(model_list):
- create_dict = _construct_resource_dict(model_class)
- with self.context.session.begin():
- core.create_resource(
- self.context, model_class, create_dict)
- except Exception as e:
- msg = str(e)
- self.fail('test_resources raised Exception unexpectedly %s' % msg)
-
- def test_resource_routing_unique_key(self):
- pod = {'pod_id': 'test_pod1_uuid',
- 'region_name': 'test_pod1',
- 'az_name': 'test_az1_uuid'}
- api.create_pod(self.context, pod)
- routing = {'top_id': 'top_uuid',
- 'pod_id': 'test_pod1_uuid',
- 'resource_type': 'port'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting, routing)
- self.assertRaises(oslo_db.exception.DBDuplicateEntry,
- core.create_resource,
- self.context, models.ResourceRouting, routing)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
diff --git a/tricircle/tests/unit/network/__init__.py b/tricircle/tests/unit/network/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/network/test_central_plugin.py b/tricircle/tests/unit/network/test_central_plugin.py
deleted file mode 100644
index 4ab9ad7e..00000000
--- a/tricircle/tests/unit/network/test_central_plugin.py
+++ /dev/null
@@ -1,2814 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import copy
-import mock
-from mock import patch
-import netaddr
-import six
-from six.moves import xrange
-import unittest
-
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.api.definitions import provider_net
-import neutron_lib.constants as q_constants
-from neutron_lib.db import utils as db_utils
-import neutron_lib.exceptions as q_lib_exc
-from neutron_lib.exceptions import availability_zone as az_exc
-from neutron_lib.plugins import constants as plugin_constants
-from neutron_lib.plugins import directory
-
-import neutron.conf.common as q_config
-from neutron.db import db_base_plugin_v2
-from neutron.db import ipam_pluggable_backend
-from neutron.db import models_v2
-from neutron.db import rbac_db_models as rbac_db
-import neutron.objects.base as base_object
-from neutron.services.qos.drivers import manager as q_manager
-from neutron.services.trunk import plugin as trunk_plugin
-
-from neutron.plugins.ml2 import managers as n_managers
-
-from neutron.ipam import driver
-from neutron.ipam import exceptions as ipam_exc
-from neutron.ipam import requests
-import neutron.ipam.utils as ipam_utils
-
-from neutron import manager
-import neutronclient.common.exceptions as q_exceptions
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import exceptions as t_exceptions
-from tricircle.common.i18n import _
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.central_plugin as plugin
-from tricircle.network import central_qos_plugin
-from tricircle.network import helper
-from tricircle.network import qos_driver
-from tricircle.tests.unit.network import test_central_trunk_plugin
-from tricircle.tests.unit.network import test_qos
-from tricircle.tests.unit.network import test_security_groups
-import tricircle.tests.unit.utils as test_utils
-from tricircle.xjob import xmanager
-
-
-_resource_store = test_utils.get_resource_store()
-TOP_NETS = _resource_store.TOP_NETWORKS
-TOP_SUBNETS = _resource_store.TOP_SUBNETS
-TOP_PORTS = _resource_store.TOP_PORTS
-TOP_ROUTERS = _resource_store.TOP_ROUTERS
-TOP_ROUTERPORTS = _resource_store.TOP_ROUTERPORTS
-TOP_IPALLOCATIONS = _resource_store.TOP_IPALLOCATIONS
-TOP_VLANALLOCATIONS = _resource_store.TOP_ML2_VLAN_ALLOCATIONS
-TOP_VXLANALLOCATIONS = _resource_store.TOP_ML2_VXLAN_ALLOCATIONS
-TOP_FLATALLOCATIONS = _resource_store.TOP_ML2_FLAT_ALLOCATIONS
-TOP_SEGMENTS = _resource_store.TOP_NETWORKSEGMENTS
-TOP_FLOATINGIPS = _resource_store.TOP_FLOATINGIPS
-TOP_SGS = _resource_store.TOP_SECURITYGROUPS
-TOP_SG_RULES = _resource_store.TOP_SECURITYGROUPRULES
-TOP_POLICIES = _resource_store.TOP_QOS_POLICIES
-TOP_POLICY_RULES = _resource_store.TOP_QOS_BANDWIDTH_LIMIT_RULES
-BOTTOM1_NETS = _resource_store.BOTTOM1_NETWORKS
-BOTTOM1_SUBNETS = _resource_store.BOTTOM1_SUBNETS
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM1_SGS = _resource_store.BOTTOM1_SECURITYGROUPS
-BOTTOM1_FIPS = _resource_store.BOTTOM1_FLOATINGIPS
-BOTTOM1_ROUTERS = _resource_store.BOTTOM1_ROUTERS
-BOTTOM1_POLICIES = _resource_store.BOTTOM1_QOS_POLICIES
-BOTTOM1_POLICY_RULES = _resource_store.BOTTOM1_QOS_BANDWIDTH_LIMIT_RULES
-BOTTOM2_NETS = _resource_store.BOTTOM2_NETWORKS
-BOTTOM2_SUBNETS = _resource_store.BOTTOM2_SUBNETS
-BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
-BOTTOM2_SGS = _resource_store.BOTTOM2_SECURITYGROUPS
-BOTTOM2_FIPS = _resource_store.BOTTOM2_FLOATINGIPS
-BOTTOM2_ROUTERS = _resource_store.BOTTOM2_ROUTERS
-BOTTOM2_POLICIES = _resource_store.BOTTOM2_QOS_POLICIES
-BOTTOM2_POLICY_RULES = _resource_store.BOTTOM2_QOS_BANDWIDTH_LIMIT_RULES
-TOP_TRUNKS = _resource_store.TOP_TRUNKS
-TOP_SUBPORTS = _resource_store.TOP_SUBPORTS
-BOTTOM1_TRUNKS = _resource_store.BOTTOM1_TRUNKS
-BOTTOM2_TRUNKS = _resource_store.BOTTOM2_TRUNKS
-BOTTOM1_SUBPORTS = _resource_store.BOTTOM1_SUBPORTS
-BOTTOM2_SUBPORTS = _resource_store.BOTTOM2_SUBPORTS
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-FakeNeutronContext = test_utils.FakeNeutronContext
-
-
-def _fill_external_gateway_info(router):
- ext_gw_info = None
- for router_port in TOP_ROUTERPORTS:
- if router_port['router_id'] == router['id']:
- ext_gw_info = {
- 'network_id': router_port['port']['network_id'],
- 'external_fixed_ips': [
- {'subnet_id': ip["subnet_id"],
- 'ip_address': ip["ip_address"]}
- for ip in router_port['port']['fixed_ips']]}
- break
-
- router['external_gateway_info'] = ext_gw_info
- return router
-
-
-def _transform_az(network):
- az_hints_key = 'availability_zone_hints'
- if az_hints_key in network:
- ret = test_utils.DotDict(network)
- az_str = network[az_hints_key]
- ret[az_hints_key] = jsonutils.loads(az_str) if az_str else []
- return ret
- return network
-
-
-class FakeIpamSubnet(driver.Subnet):
- def __init__(self, subnet):
- self._subnet = subnet
-
- def allocate(self, address_request):
- pass
-
- def deallocate(self, address):
- pass
-
- def get_details(self):
- return requests.SpecificSubnetRequest(self._subnet['tenant_id'],
- self._subnet['id'],
- self._subnet['cidr'],
- self._subnet['gateway'],
- self._subnet['pools'])
-
-
-class FakeNetworkRBAC(object):
- def __init__(self, **kwargs):
- self.__tablename__ = 'networkrbacs'
- self.project_id = kwargs['tenant_id']
- self.id = uuidutils.generate_uuid()
- self.target_tenant = kwargs['target_tenant']
- self.action = kwargs['action']
- network = kwargs['network']
- self.object_id = network['id']
-
- def _as_dict(self):
- return {'porject_id': self.project_id,
- 'id': self.id,
- 'target_tenant': self.target_tenant,
- 'action': self.action,
- 'object': self.object_id}
-
-
-class FakePool(driver.Pool):
- def allocate_subnet(self, subnet_request):
- if isinstance(subnet_request, requests.SpecificSubnetRequest):
- subnet_info = {'id': subnet_request.subnet_id,
- 'tenant_id': subnet_request.tenant_id,
- 'cidr': subnet_request.subnet_cidr,
- 'gateway': subnet_request.gateway_ip,
- 'pools': subnet_request.allocation_pools}
- return FakeIpamSubnet(subnet_info)
- prefix = self._subnetpool.prefixes[0]
- subnet = next(prefix.subnet(subnet_request.prefixlen))
- gateway = subnet.network + 1
- pools = ipam_utils.generate_pools(subnet.cidr,
- gateway)
- subnet_info = {'id': subnet_request.subnet_id,
- 'tenant_id': subnet_request.tenant_id,
- 'cidr': subnet.cidr,
- 'gateway': gateway,
- 'pools': pools}
- return FakeIpamSubnet(subnet_info)
-
- def get_subnet(self, subnet_id):
- for subnet in TOP_SUBNETS:
- if subnet['id'] == subnet_id:
- return FakeIpamSubnet(subnet)
- raise q_lib_exc.SubnetNotFound(subnet_id=id)
-
- def get_allocator(self, subnet_ids):
- return driver.SubnetGroup()
-
- def update_subnet(self, subnet_request):
- pools = []
- for subnet in TOP_SUBNETS:
- if subnet['id'] == subnet_request.subnet_id:
- for request_pool in subnet_request.allocation_pools:
- pool = {'start': str(request_pool._start),
- 'end': str(request_pool._end)}
- pools.append(pool)
- subnet['allocation_pools'] = pools
- return FakeIpamSubnet(subnet_request)
-
- raise ipam_exc.InvalidSubnetRequest(
- reason=_("updated subnet id not found"))
-
- def remove_subnet(self, subnet_id):
- pass
-
-
-class FakeNeutronClient(test_utils.FakeNeutronClient):
- _resource = 'port'
- ports_path = ''
-
-
-class FakeClient(test_utils.FakeClient):
- def __init__(self, region_name=None):
- super(FakeClient, self).__init__(region_name)
- self.client = FakeNeutronClient(self.region_name)
-
- def get_native_client(self, resource, ctx):
- return self.client
-
- def _get_connection(self):
- # only for mock purpose
- pass
-
- def _allocate_ip(self, port_body):
- subnet_list = self._res_map[self.region_name]['subnet']
- for subnet in subnet_list:
- if subnet['network_id'] == port_body['port']['network_id']:
- cidr = subnet['cidr']
- ip = cidr[:cidr.rindex('.')] + '.5'
- return {'subnet_id': subnet['id'],
- 'ip_address': ip}
-
- def create_resources(self, _type, ctx, body):
- self._get_connection()
- if _type == 'port':
- res_list = self._res_map[self.region_name][_type]
- subnet_ips_map = {}
- for res in res_list:
- fixed_ips = res.get('fixed_ips', [])
- for fixed_ip in fixed_ips:
- if fixed_ip['subnet_id'] not in subnet_ips_map:
- subnet_ips_map[fixed_ip['subnet_id']] = set()
- subnet_ips_map[fixed_ip['subnet_id']].add(
- fixed_ip['ip_address'])
- fixed_ips = body[_type].get('fixed_ips', [])
- for fixed_ip in fixed_ips:
- for subnet in self._res_map[self.region_name]['subnet']:
- ip_range = netaddr.IPNetwork(subnet['cidr'])
- ip = netaddr.IPAddress(fixed_ip['ip_address'])
- if ip in ip_range:
- fixed_ip['subnet_id'] = subnet['id']
- break
- if 'subnet_id' not in fixed_ip:
- # we still cannot find the proper subnet, that's because
- # this is a copy port. local plugin will create the missing
- # subnet for this port but FakeClient won't. we just skip
- # the ip address check
- continue
- if fixed_ip['ip_address'] in subnet_ips_map.get(
- fixed_ip['subnet_id'], set()):
- raise q_exceptions.IpAddressInUseClient()
- if 'device_id' not in body[_type]:
- body[_type]['device_id'] = ''
- if 'fixed_ips' not in body[_type]:
- body[_type]['fixed_ips'] = [self._allocate_ip(body)]
- if _type == 'subnet':
- if 'gateway_ip' not in body[_type]:
- cidr = body[_type]['cidr']
- body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1'
- if _type == 'qos_policy':
- body['policy']['id'] = uuidutils.generate_uuid()
- elif 'id' not in body[_type]:
- body[_type]['id'] = uuidutils.generate_uuid()
- return super(FakeClient, self).create_resources(_type, ctx, body)
-
- def list_networks(self, ctx, filters=None):
- networks = self.list_resources('network', ctx, filters)
- if self.region_name != 'top':
- return networks
- ret_list = []
- for network in networks:
- ret_list.append(_transform_az(network))
- return ret_list
-
- def get_networks(self, ctx, net_id):
- return self.get_resource(constants.RT_NETWORK, ctx, net_id)
-
- def delete_networks(self, ctx, net_id):
- self.delete_resources('network', ctx, net_id)
-
- def update_networks(self, ctx, net_id, network):
- self.update_resources('network', ctx, net_id, network)
-
- def list_subnets(self, ctx, filters=None):
- return self.list_resources('subnet', ctx, filters)
-
- def get_subnets(self, ctx, subnet_id):
- return self.get_resource(constants.RT_SUBNET, ctx, subnet_id)
-
- def delete_subnets(self, ctx, subnet_id):
- self.delete_resources('subnet', ctx, subnet_id)
-
- def update_ports(self, ctx, port_id, body):
- self.update_resources('port', ctx, port_id, body)
-
- def update_subnets(self, ctx, subnet_id, body):
- updated = self.update_resources('subnet', ctx, subnet_id, body)
- if not updated:
- raise ipam_exc.InvalidSubnetRequest(
- reason=_("updated subnet id not found"))
-
- def create_ports(self, ctx, body):
- if 'ports' in body:
- ret = []
- for port in body['ports']:
- ret.append(self.create_resources('port', ctx, {'port': port}))
- return ret
- return self.create_resources('port', ctx, body)
-
- def list_ports(self, ctx, filters=None):
- filter_dict = {}
- filters = filters or []
- for query_filter in filters:
- key = query_filter['key']
- # when querying ports, "fields" is passed in the query string to
- # ask the server to only return necessary fields, which can reduce
- # the data being transferred. In test, we just return all the
- # fields since there's no need to optimize
- if key != 'fields':
- value = query_filter['value']
- filter_dict[key] = value
- return self.client.get('', filter_dict)['ports']
-
- def get_ports(self, ctx, port_id):
- return self.client.get(
- '', params={'id': [port_id]})['ports'][0]
-
- def delete_ports(self, ctx, port_id):
- self.delete_resources('port', ctx, port_id)
- index = -1
- for i, allocation in enumerate(TOP_IPALLOCATIONS):
- if allocation['port_id'] == port_id:
- index = i
- break
- if index != -1:
- del TOP_IPALLOCATIONS[index]
-
- def dhcp_allocate_ip(self, subnets):
- fixed_ips = []
- for subnet in subnets:
- fixed_ips.append({'subnet_id': subnet['id'],
- 'ip_address': '10.0.0.1'})
- return fixed_ips
-
- def add_gateway_routers(self, ctx, *args, **kwargs):
- router_id, body = args
- try:
- t_name = constants.bridge_port_name % (TEST_TENANT_ID, router_id)
- t_client = FakeClient()
- t_ports = t_client.list_ports(
- ctx, [{'key': 'name', 'comparator': 'eq', 'value': t_name}])
- b_id = t_ports[0]['id'] if t_ports else uuidutils.generate_uuid()
- host_id = 'host1' if self.region_name == 'pod_1' else 'host_2'
- if not body.get('external_fixed_ips'):
- net_id = body['network_id']
- subnets = self.list_subnets(ctx,
- [{'key': 'network_id',
- 'comparator': 'eq',
- 'value': net_id}])
- body['external_fixed_ips'] = self.dhcp_allocate_ip(subnets)
-
- self.create_ports(ctx, {'port': {
- 'admin_state_up': True,
- 'id': b_id,
- 'name': '',
- 'network_id': body['network_id'],
- 'fixed_ips': body['external_fixed_ips'],
- 'mac_address': '',
- 'device_id': router_id,
- 'device_owner': 'network:router_gateway',
- 'binding:vif_type': portbindings.VIF_TYPE_OVS,
- 'binding:host_id': host_id
- }})
- except q_exceptions.IpAddressInUseClient:
- # just skip if the gateway port is already there
- pass
-
- def add_interface_routers(self, ctx, *args, **kwargs):
- self._get_connection()
-
- router_id, body = args
- if 'port_id' in body:
- for port in self._res_map[self.region_name]['port']:
- if port['id'] == body['port_id']:
- port['device_id'] = router_id
- port['device_owner'] = 'network:router_interface'
- else:
- subnet_id = body['subnet_id']
- subnet = self.get_subnets(ctx, subnet_id)
- self.create_ports(ctx, {'port': {
- 'tenant_id': subnet['tenant_id'],
- 'admin_state_up': True,
- 'id': uuidutils.generate_uuid(),
- 'name': '',
- 'network_id': subnet['network_id'],
- 'fixed_ips': [
- {'subnet_id': subnet_id,
- 'ip_address': subnet['gateway_ip']}
- ],
- 'mac_address': '',
- 'device_id': router_id,
- 'device_owner': 'network:router_interface'
- }})
-
- def remove_interface_routers(self, ctx, *args, **kwargs):
- # only for mock purpose
- pass
-
- def get_routers(self, ctx, router_id):
- router = self.get_resource(constants.RT_ROUTER, ctx, router_id)
- return _fill_external_gateway_info(router)
-
- def list_routers(self, ctx, filters=None):
- return self.list_resources('router', ctx, filters)
-
- def delete_routers(self, ctx, router_id):
- self.delete_resources('router', ctx, router_id)
-
- def action_routers(self, ctx, action, *args, **kwargs):
- # divide into three functions for test purpose
- if action == 'add_interface':
- return self.add_interface_routers(ctx, *args, **kwargs)
- elif action == 'add_gateway':
- return self.add_gateway_routers(ctx, *args, **kwargs)
- elif action == 'remove_interface':
- return self.remove_interface_routers(ctx, *args, **kwargs)
-
- def _is_bridge_network_attached(self):
- pass
-
- def create_floatingips(self, ctx, body):
- fip = self.create_resources('floatingip', ctx, body)
- for key in ['fixed_port_id']:
- if key not in fip:
- fip[key] = None
- return fip
-
- def list_floatingips(self, ctx, filters=None):
- fips = self.list_resources('floatingip', ctx, filters)
- for fip in fips:
- if 'port_id' not in fip:
- fip['port_id'] = fip.get('fixed_port_id', None)
- return fips
-
- def update_floatingips(self, ctx, _id, body):
- pass
-
- def delete_floatingips(self, ctx, _id):
- self.delete_resources('floatingip', ctx, _id)
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- def create_security_group_rules(self, ctx, body):
- sg_id = body['security_group_rules'][0]['security_group_id']
- res_list = self._res_map[self.region_name]['security_group']
- for sg in res_list:
- if sg['id'] == sg_id:
- target_sg = sg
- new_rules = copy.copy(body['security_group_rules'])
- match_found = False
- for new_rule in new_rules:
- for rule in target_sg['security_group_rules']:
- if self._compare_rule(rule, new_rule):
- match_found = True
- break
- if not match_found:
- new_rule['id'] = uuidutils.generate_uuid()
- if match_found:
- raise q_exceptions.Conflict()
- target_sg['security_group_rules'].extend(body['security_group_rules'])
-
- def delete_security_group_rules(self, ctx, rule_id):
- res_list = self._res_map[self.region_name]['security_group']
- for sg in res_list:
- for rule in sg['security_group_rules']:
- if rule['id'] == rule_id:
- sg['security_group_rules'].remove(rule)
- return
-
- def get_security_groups(self, ctx, sg_id):
- sg = self.get_resource(constants.RT_SG, ctx, sg_id)
- # need to do a deep copy because we will traverse the security
- # group's 'security_group_rules' field and make change to the
- # group
- return copy.deepcopy(sg)
-
- def get_security_group(self, ctx, _id, fields=None, tenant_id=None):
- pass
-
- def delete_security_groups(self, ctx, sg_id):
- res_list = self._res_map[self.region_name]['security_group']
- for sg in res_list:
- if sg['id'] == sg_id:
- res_list.remove(sg)
-
- def get_qos_policies(self, ctx, policy_id):
- rules = {'rules': []}
- rule_list = \
- self._res_map[self.region_name]['qos_bandwidth_limit_rules']
- for rule in rule_list:
- if rule['qos_policy_id'] == policy_id:
- rules['rules'].append(rule)
-
- res_list = self._res_map[self.region_name]['qos_policy']
- for policy in res_list:
- if policy['id'] == policy_id:
- policy['rules'] = rules['rules']
- return policy
-
- def update_qos_policies(self, ctx, policy_id, body):
- self.update_resources('policy', ctx, policy_id, body)
-
- def delete_qos_policies(self, ctx, policy_id):
- self.delete_resources('policy', ctx, policy_id)
-
- def list_bandwidth_limit_rules(self, ctx, filters):
- policy_id = filters[0].get("value")
- if self.region_name == 'top':
- res_list = \
- self._res_map[self.region_name]['qos_bandwidth_limit_rules']
- else:
- res_list = self._res_map[self.region_name]['qos_policy']
- for policy in res_list:
- if policy['id'] == policy_id:
- res_list = policy.get('rules', [])
-
- ret_rules = []
- for rule in res_list:
- if rule['qos_policy_id'] == policy_id:
- ret_rules.append(rule)
-
- return ret_rules
-
- def list_dscp_marking_rules(self, ctx, filters):
- return []
-
- def list_minimum_bandwidth_rules(self, ctx, filters):
- return []
-
- def create_bandwidth_limit_rules(self, ctx, policy_id, body):
- res_list = self._res_map[self.region_name]['qos_policy']
- for policy in res_list:
- if policy['id'] == policy_id:
- rule_id = uuidutils.generate_uuid()
- body['bandwidth_limit_rule']['id'] = rule_id
- body['bandwidth_limit_rule']['qos_policy_id'] = policy_id
- policy['rules'].append(body['bandwidth_limit_rule'])
- return body
-
- raise q_exceptions.Conflict()
-
- def create_dscp_marking_rules(self, ctx, policy_id, body):
- pass
-
- def create_minimum_bandwidth_rules(self, ctx, policy_id, body):
- pass
-
- def delete_bandwidth_limit_rules(self, ctx, combined_id):
- (rule_id, policy_id) = combined_id.split('#')
- res_list = self._res_map[self.region_name]['qos_policy']
- for policy in res_list:
- if policy['id'] == policy_id:
- for rule in policy['rules']:
- if rule['id'] == rule_id:
- policy['rules'].remove(rule)
- return
- raise q_exceptions.Conflict()
-
- def delete_dscp_marking_rules(self, ctx, combined_id):
- pass
-
- def delete_minimum_bandwidth_rules(self, ctx, combined_id):
- pass
-
- def list_security_groups(self, ctx, sg_filters):
- return self.list_resources('security_group', ctx, sg_filters)
-
-
-def update_floatingip_dict(fip_dict, update_dict):
- for field in ('subnet_id', 'floating_ip_address', 'tenant_id'):
- update_dict.pop(field, None)
-
- def _update():
- if type(fip_dict) == test_utils.DotDict:
- fip_dict.update(update_dict)
- else:
- for key in update_dict:
- setattr(fip_dict, key, update_dict[key])
-
- if not update_dict.get('port_id'):
- update_dict['fixed_port_id'] = None
- update_dict['fixed_ip_address'] = None
- update_dict['router_id'] = None
- _update()
- if 'tenant_id' not in fip_dict.keys():
- fip_dict['tenant_id'] = TEST_TENANT_ID
- update_dict['tenant_id'] = TEST_TENANT_ID
- return fip_dict
- for port in TOP_PORTS:
- if port['id'] != update_dict['port_id']:
- continue
- update_dict['fixed_port_id'] = port['id']
- update_dict[
- 'fixed_ip_address'] = port['fixed_ips'][0]['ip_address']
- for router_port in TOP_ROUTERPORTS:
- for _port in TOP_PORTS:
- if _port['id'] != router_port['port_id']:
- continue
- if _port['network_id'] == port['network_id']:
- update_dict['router_id'] = router_port['router_id']
-
- _update()
- if 'tenant_id' not in fip_dict.keys():
- fip_dict['tenant_id'] = TEST_TENANT_ID
- update_dict['tenant_id'] = TEST_TENANT_ID
- return fip_dict
-
-
-def update_floatingip(self, context, _id, floatingip):
- for fip in TOP_FLOATINGIPS:
- if fip['id'] != _id:
- continue
- update_floatingip_dict(fip, floatingip['floatingip'])
-
-
-def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
- return update_floatingip_dict(floatingip_db, fip)
-
-
-class FakeBaseXManager(xmanager.XManager):
- def __init__(self, fake_plugin):
- self.clients = {constants.TOP: client.Client()}
- self.job_handles = {
- constants.JT_CONFIGURE_ROUTE: self.configure_route,
- constants.JT_ROUTER_SETUP: self.setup_bottom_router,
- constants.JT_PORT_DELETE: self.delete_server_port}
- self.helper = FakeHelper(fake_plugin)
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
- def setup_bottom_router(self, ctx, payload):
- (b_pod_id,
- t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_net_id, constants.RT_NETWORK)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- resource_id = '%s#%s#%s' % (b_pod['pod_id'],
- t_router_id, t_net_id)
- _payload = {constants.JT_ROUTER_SETUP: resource_id}
- super(FakeBaseXManager,
- self).setup_bottom_router(ctx, _payload)
- else:
- super(FakeBaseXManager, self).setup_bottom_router(ctx, payload)
-
-
-class FakeXManager(FakeBaseXManager):
- def __init__(self, fake_plugin):
- super(FakeXManager, self).__init__(fake_plugin)
- self.xjob_handler = FakeBaseRPCAPI(fake_plugin)
-
-
-class FakeBaseRPCAPI(object):
- def __init__(self, fake_plugin):
- self.xmanager = FakeBaseXManager(fake_plugin)
-
- def configure_route(self, ctxt, project_id, router_id):
- pass
-
- def update_network(self, ctxt, project_id, network_id, pod_id):
- combine_id = '%s#%s' % (pod_id, network_id)
- self.xmanager.update_network(
- ctxt, payload={constants.JT_NETWORK_UPDATE: combine_id})
-
- def update_subnet(self, ctxt, project_id, subnet_id, pod_id):
- combine_id = '%s#%s' % (pod_id, subnet_id)
- self.xmanager.update_subnet(
- ctxt, payload={constants.JT_SUBNET_UPDATE: combine_id})
-
- def configure_security_group_rules(self, ctxt, project_id):
- pass
-
- def setup_shadow_ports(self, ctxt, project_id, pod_id, net_id):
- pass
-
- def create_qos_policy(self, ctxt, project_id, policy_id, pod_id,
- res_type, res_id=None):
- combine_id = '%s#%s#%s#%s' % (pod_id, policy_id, res_type, res_id)
- self.xmanager.create_qos_policy(
- ctxt, payload={constants.JT_QOS_CREATE: combine_id})
-
- def update_qos_policy(self, ctxt, project_id, policy_id, pod_id):
- combine_id = '%s#%s' % (pod_id, policy_id)
- self.xmanager.update_qos_policy(
- ctxt, payload={constants.JT_QOS_UPDATE: combine_id})
-
- def delete_qos_policy(self, ctxt, project_id, policy_id, pod_id):
- combine_id = '%s#%s' % (pod_id, policy_id)
- self.xmanager.delete_qos_policy(
- ctxt, payload={constants.JT_QOS_DELETE: combine_id})
-
- def sync_qos_policy_rules(self, ctxt, project_id, policy_id):
- self.xmanager.sync_qos_policy_rules(
- ctxt, payload={constants.JT_SYNC_QOS_RULE: policy_id})
-
-
-class FakeRPCAPI(FakeBaseRPCAPI):
- def __init__(self, fake_plugin):
- self.xmanager = FakeXManager(fake_plugin)
-
- def setup_bottom_router(self, ctxt, project_id, net_id, router_id, pod_id):
- combine_id = '%s#%s#%s' % (pod_id, router_id, net_id)
- self.xmanager.setup_bottom_router(
- ctxt, payload={constants.JT_ROUTER_SETUP: combine_id})
-
- def delete_server_port(self, ctxt, project_id, port_id, pod_id):
- pass
-
- def configure_security_group_rules(self, ctxt, project_id):
- self.xmanager.configure_security_group_rules(
- ctxt, payload={constants.JT_SEG_RULE_SETUP: project_id})
-
- def setup_shadow_ports(self, ctxt, project_id, pod_id, net_id):
- combine_id = '%s#%s' % (pod_id, net_id)
- self.xmanager.setup_shadow_ports(
- ctxt, payload={constants.JT_SHADOW_PORT_SETUP: combine_id})
-
-
-class FakeHelper(helper.NetworkHelper):
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
- def _prepare_top_element_by_call(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- if not q_ctx:
- q_ctx = FakeNeutronContext()
- return super(FakeHelper, self)._prepare_top_element_by_call(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def _get_top_element(self, t_ctx, q_ctx, _type, _id):
- if not q_ctx:
- q_ctx = FakeNeutronContext()
- return super(FakeHelper, self)._get_top_element(
- t_ctx, q_ctx, _type, _id)
-
-
-class FakeExtensionManager(n_managers.ExtensionManager):
- def __init__(self):
- super(FakeExtensionManager, self).__init__()
-
-
-class FakeTricircleQoSDriver(qos_driver.TricircleQoSDriver):
- def __init__(self, name, vif_types, vnic_types,
- supported_rules,
- requires_rpc_notifications):
- super(FakeTricircleQoSDriver, self).__init__(
- name, vif_types, vnic_types, supported_rules,
- requires_rpc_notifications)
- self.xjob_handler = FakeRPCAPI(self)
-
- @staticmethod
- def create():
- return FakeTricircleQoSDriver(
- name='tricircle',
- vif_types=qos_driver.VIF_TYPES,
- vnic_types=portbindings.VNIC_TYPES,
- supported_rules=qos_driver.SUPPORTED_RULES,
- requires_rpc_notifications=False)
-
-
-class FakeQosServiceDriverManager(q_manager.QosServiceDriverManager):
- def __init__(self):
- self._drivers = [FakeTricircleQoSDriver.create()]
- self.rpc_notifications_required = False
-
-
-class FakePlugin(plugin.TricirclePlugin,
- central_qos_plugin.TricircleQosPlugin):
- def __init__(self):
- self.set_ipam_backend()
- self.helper = FakeHelper(self)
- self.xjob_handler = FakeRPCAPI(self)
- self.type_manager = test_utils.FakeTypeManager()
- self.extension_manager = FakeExtensionManager()
- self.extension_manager.initialize()
- self.driver_manager = FakeQosServiceDriverManager()
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
- def create_network(self, context, network):
- # neutron has been updated to use the new enginefacade, we no longer
- # call update_network in TricirclePlugin.create_network to update AZ
- # info. new context manager will update AZ info after context exits,
- # but since we don't simulate such process, we override this method to
- # insert AZ info
- net = super(FakePlugin, self).create_network(context, network)
- if 'availability_zone_hints' not in network['network']:
- return net
- for _net in TOP_NETS:
- if _net['id'] == net['id']:
- _net['availability_zone_hints'] = jsonutils.dumps(
- network['network']['availability_zone_hints'])
- return net
-
- def _make_network_dict(self, network, fields=None,
- process_extensions=True, context=None):
- network = _transform_az(network)
- if 'project_id' in network:
- network['tenant_id'] = network['project_id']
- return network
-
- def _make_subnet_dict(self, ori_subnet, fields=None, context=None):
- if hasattr(ori_subnet, 'to_dict'):
- subnet = ori_subnet.to_dict()
- elif hasattr(ori_subnet, '_as_dict'):
- subnet = ori_subnet._as_dict()
- else:
- subnet = ori_subnet
- if 'ipv6_ra_mode' not in subnet:
- subnet['ipv6_ra_mode'] = None
- if 'ipv6_address_mode' not in subnet:
- subnet['ipv6_address_mode'] = None
- if type(subnet.get('gateway_ip')) == netaddr.ip.IPAddress:
- subnet['gateway_ip'] = str(subnet['gateway_ip'])
- if 'project_id' in subnet:
- subnet['tenant_id'] = subnet['project_id']
- return subnet
-
- def _make_port_dict(self, ori_port, fields=None, process_extensions=True):
- if not isinstance(ori_port, dict):
- port = ori_port._as_dict()
- port['fixed_ips'] = ori_port.get('fixed_ips')
- else:
- port = ori_port
- if 'project_id' in port:
- port['tenant_id'] = port['project_id']
- if port.get('fixed_ips'):
- if isinstance(port['fixed_ips'][0], dict):
- return port
- else:
- for i, fixed_ip in enumerate(port['fixed_ips']):
- port['fixed_ips'][i] = {
- 'subnet_id': fixed_ip['subnet_id'],
- 'ip_address': fixed_ip['ip_address']}
- return port
- # if fixed_ips is empty, we try first to load it from ip allocation
- for allocation in TOP_IPALLOCATIONS:
- if allocation['port_id'] == port['id']:
- ret = {}
- for key, value in six.iteritems(port):
- if key == 'fixed_ips':
- ret[key] = [{'subnet_id': allocation['subnet_id'],
- 'ip_address': allocation['ip_address']}]
- else:
- ret[key] = value
- if 'project_id' in ret:
- ret['tenant_id'] = ret['project_id']
- return ret
- return port
-
- def _make_security_group_dict(self, security_group, fields=None):
- return security_group
-
- def _get_port_security_group_bindings(self, context, filters):
- return None
-
-
-def fake_get_context_from_neutron_context(q_context):
- return context.get_db_context()
-
-
-def fake_get_client(self, region_name):
- return FakeClient(region_name)
-
-
-def fake_make_network_dict(self, network, fields=None,
- process_extensions=True, context=None):
- return network
-
-
-def fake_make_subnet_dict(self, subnet, fields=None, context=None):
- return subnet
-
-
-def fake_make_router_dict(self, router, fields=None, process_extensions=True):
- return _fill_external_gateway_info(router)
-
-
-def fake_generate_ip(subnet):
- suffix = 1
- for allocation in TOP_IPALLOCATIONS:
- if allocation['subnet_id'] == subnet['id']:
- ip = allocation['ip_address']
- current_suffix = int(ip[ip.rindex('.') + 1:])
- if current_suffix >= suffix:
- suffix = current_suffix
- suffix += 1
- cidr = subnet['cidr']
- new_ip = cidr[:cidr.rindex('.') + 1] + ('%d' % suffix)
- return {'ip_address': new_ip, 'subnet_id': subnet['id']}
-
-
-def fake_allocate_ips_for_port(self, context, port):
- if 'fixed_ips' in port['port'] and (
- port['port'][
- 'fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED):
- return port['port']['fixed_ips']
- for subnet in TOP_SUBNETS:
- if subnet['network_id'] == port['port']['network_id']:
- allocation = fake_generate_ip(subnet)
- # save allocation so we can retrieve it in make_port_dict
- TOP_IPALLOCATIONS.append(models_v2.IPAllocation(
- network_id=subnet['network_id'],
- port_id=port['port']['id'],
- ip_address=allocation['ip_address'],
- subnet_id=allocation['subnet_id']))
- return [allocation]
-
-
-def fake_update_ips_for_port(self, context, port, host,
- original_ips, new_ips, mac):
- # NOTE: remove this mock after we support ip updating
- return ipam_pluggable_backend.IpamPluggableBackend.Changes(
- add=[], original=[], remove=[])
-
-
-@classmethod
-def fake_get_instance(cls, subnet_pool, context):
- return FakePool(subnet_pool, context)
-
-
-def fake_get_plugin(alias=plugin_constants.CORE):
- if alias == 'trunk':
- return test_central_trunk_plugin.FakePlugin()
- return FakePlugin()
-
-
-def fake_filter_non_model_columns(data, model):
- return data
-
-
-@classmethod
-def fake_load_obj(cls, context, db_obj, fields=None):
- return db_obj
-
-
-class FakeTrunkPlugin(object):
-
- def get_trunk_subports(self, context, filters):
- return None
-
-
-class PluginTest(unittest.TestCase,
- test_security_groups.TricircleSecurityGroupTestMixin,
- test_qos.TricircleQosTestMixin):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- cfg.CONF.register_opts(q_config.core_opts)
- cfg.CONF.register_opts(plugin.tricircle_opts)
- plugin_path = \
- 'tricircle.tests.unit.network.test_central_plugin.FakePlugin'
- cfg.CONF.set_override('core_plugin', plugin_path)
- cfg.CONF.set_override('enable_api_gateway', True)
- self.context = context.Context()
- self.save_method = manager.NeutronManager._get_default_service_plugins
- manager.NeutronManager._get_default_service_plugins = mock.Mock()
- manager.NeutronManager._get_default_service_plugins.return_value = []
- xmanager.IN_TEST = True
-
- phynet = 'bridge'
- phynet2 = 'bridge2'
- vlan_min, vlan_max = 2000, 2001
- vxlan_min, vxlan_max = 20001, 20002
- cfg.CONF.set_override('type_drivers', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('tenant_network_types', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('network_vlan_ranges',
- ['%s:%d:%d' % (phynet, vlan_min, vlan_max),
- '%s:%d:%d' % (phynet2, vlan_min, vlan_max)],
- group='tricircle')
- cfg.CONF.set_override('bridge_network_type', 'vlan',
- group='tricircle')
- cfg.CONF.set_override('default_region_for_external_network',
- 'pod_1', group='tricircle')
- for vlan in (vlan_min, vlan_max):
- TOP_VLANALLOCATIONS.append(
- test_utils.DotDict({'physical_network': phynet,
- 'vlan_id': vlan, 'allocated': False}))
- for vxlan in (vxlan_min, vxlan_max):
- TOP_VXLANALLOCATIONS.append(
- test_utils.DotDict({'vxlan_vni': vxlan, 'allocated': False}))
-
- def fake_get_plugin(alias=plugin_constants.CORE):
- if alias == 'trunk':
- return FakeTrunkPlugin()
- return FakePlugin()
- # from neutron_lib.plugins import directory
- # directory.get_plugin = fake_get_plugin
-
- def _basic_pod_route_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
- route1 = {
- 'top_id': 'top_id_1',
- 'pod_id': 'pod_id_1',
- 'bottom_id': 'bottom_id_1',
- 'resource_type': 'port'}
- route2 = {
- 'top_id': 'top_id_2',
- 'pod_id': 'pod_id_2',
- 'bottom_id': 'bottom_id_2',
- 'resource_type': 'port'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting, route1)
- core.create_resource(self.context, models.ResourceRouting, route2)
-
- def _basic_port_setup(self):
- TOP_PORTS.extend([{'id': 'top_id_0', 'name': 'top',
- 'fixed_ips': [models_v2.IPAllocation(
- port_id='top_id_0', ip_address='10.0.0.1',
- subnet_id='top_subnet_id',
- network_id='top_net_id')]},
- {'id': 'top_id_1', 'name': 'top',
- 'tenant_id': 'project_id'},
- {'id': 'top_id_2', 'name': 'top'},
- {'id': 'top_id_3', 'name': 'top'}])
- BOTTOM1_PORTS.append({'id': 'bottom_id_1', 'name': 'bottom'})
- BOTTOM2_PORTS.append({'id': 'bottom_id_2', 'name': 'bottom'})
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port')
- def test_get_port(self, mock_plugin_method):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- fake_plugin.get_port(neutron_context, 'top_id_0')
- port1 = fake_plugin.get_port(neutron_context, 'top_id_1')
- port2 = fake_plugin.get_port(neutron_context, 'top_id_2')
- fake_plugin.get_port(neutron_context, 'top_id_3')
-
- self.assertEqual({'id': 'top_id_1', 'name': 'bottom',
- 'qos_policy_id': None}, port1)
- self.assertEqual({'id': 'top_id_2', 'name': 'bottom',
- 'qos_policy_id': None}, port2)
- calls = [mock.call(neutron_context, 'top_id_0', None),
- mock.call().__setitem__('qos_policy_id', None),
- mock.call(neutron_context, 'top_id_3', None),
- mock.call().__setitem__('qos_policy_id', None)]
- mock_plugin_method.assert_has_calls(calls)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- def test_get_ports_pagination(self):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- ports1 = fake_plugin.get_ports(neutron_context, limit=1)
- ports2 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports1[-1]['id'])
- ports3 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports2[-1]['id'])
- ports4 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports3[-1]['id'])
- ports = []
- expected_ports = [{'id': 'top_id_0', 'name': 'top',
- 'qos_policy_id': None,
- 'fixed_ips': [{'subnet_id': 'top_subnet_id',
- 'ip_address': '10.0.0.1'}]},
- {'id': 'top_id_1', 'name': 'bottom',
- 'qos_policy_id': None},
- {'id': 'top_id_2', 'name': 'bottom',
- 'qos_policy_id': None},
- {'id': 'top_id_3', 'name': 'top',
- 'qos_policy_id': None}]
- for _ports in (ports1, ports2, ports3, ports4):
- ports.extend(_ports)
- six.assertCountEqual(self, expected_ports, ports)
-
- ports = fake_plugin.get_ports(neutron_context)
- six.assertCountEqual(self, expected_ports, ports)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- def test_get_ports_filters(self):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- ports1 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_0']})
- ports2 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_1']})
- ports3 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_4']})
- self.assertEqual([{'id': 'top_id_0', 'name': 'top',
- 'qos_policy_id': None,
- 'fixed_ips': [{'subnet_id': 'top_subnet_id',
- 'ip_address': '10.0.0.1'}]}], ports1)
- self.assertEqual([{'id': 'top_id_1', 'name': 'bottom',
- 'qos_policy_id': None}], ports2)
- self.assertEqual([], ports3)
-
- TOP_ROUTERS.append({'id': 'router_id'})
- b_routers_list = [BOTTOM1_ROUTERS, BOTTOM2_ROUTERS]
- b_ports_list = [BOTTOM1_PORTS, BOTTOM2_PORTS]
- for i in xrange(1, 3):
- router_id = 'router_%d_id' % i
- b_routers_list[i - 1].append({'id': router_id})
- route = {
- 'top_id': 'router_id',
- 'pod_id': 'pod_id_%d' % i,
- 'bottom_id': router_id,
- 'resource_type': 'router'}
- with self.context.session.begin():
- core.create_resource(self.context,
- models.ResourceRouting, route)
- # find port and add device_id
- for port in b_ports_list[i - 1]:
- port_id = 'bottom_id_%d' % i
- if port['id'] == port_id:
- port['device_id'] = router_id
- ports = fake_plugin.get_ports(neutron_context,
- filters={'device_id': ['router_id']})
- expected = [{'id': 'top_id_1', 'name': 'bottom',
- 'qos_policy_id': None, 'device_id': 'router_id'},
- {'id': 'top_id_2', 'name': 'bottom',
- 'qos_policy_id': None, 'device_id': 'router_id'}]
- six.assertCountEqual(self, expected, ports)
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'delete_port')
- @patch.object(FakeRPCAPI, 'delete_server_port')
- def test_delete_port(self, mock_client_method, mock_plugin_method,
- mock_context_method):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context_method.return_value = tricircle_context
- project_id = 'project_id'
-
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, tricircle_context, 'pod_1', 1)
- t_port_id1, _ = self._prepare_port_test(
- project_id, tricircle_context, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
- t_port_id2, _ = self._prepare_port_test(
- project_id, tricircle_context, 'pod_1', 2, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
-
- fake_plugin.delete_port(neutron_context, t_port_id1)
- fake_plugin.delete_port(neutron_context, t_port_id2)
-
- plugin_calls = [mock.call(neutron_context, t_port_id1),
- mock.call(neutron_context, t_port_id2)]
- client_calls = [
- mock.call(tricircle_context, project_id, t_port_id1, 'pod_id_1'),
- mock.call(tricircle_context, project_id, t_port_id2, 'pod_id_1')]
- mock_plugin_method.assert_has_calls(plugin_calls)
- mock_client_method.assert_has_calls(client_calls)
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'create_network')
- def test_network_az_region(self, mock_create, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- net_id = uuidutils.generate_uuid()
- network = {'network': {
- 'id': net_id, 'name': 'net_az', 'tenant_id': TEST_TENANT_ID,
- 'admin_state_up': True, 'shared': False,
- 'availability_zone_hints': ['az_name_1', 'pod_2']}}
- mock_create.return_value = {'id': net_id, 'name': 'net_az'}
- ret_net = fake_plugin.create_network(neutron_context, network)
- self.assertEqual(['az_name_1', 'pod_2'],
- ret_net['availability_zone_hints'])
-
- net_id = uuidutils.generate_uuid()
- err_network = {'network': {
- 'id': 'net_id', 'name': 'net_az', 'tenant_id': TEST_TENANT_ID,
- 'availability_zone_hints': ['az_name_1', 'az_name_3']}}
- mock_create.return_value = {'id': net_id, 'name': 'net_az'}
- self.assertRaises(az_exc.AvailabilityZoneNotFound,
- fake_plugin.create_network,
- neutron_context, err_network)
-
- net_id = uuidutils.generate_uuid()
- err_network = {'network': {
- 'id': net_id, 'name': 'net_az', 'tenant_id': TEST_TENANT_ID,
- 'availability_zone_hints': ['pod_1', 'pod_3']}}
- mock_create.return_value = {'id': net_id, 'name': 'net_az'}
- self.assertRaises(az_exc.AvailabilityZoneNotFound,
- fake_plugin.create_network,
- neutron_context, err_network)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- network = {'network': {
- 'id': uuidutils.generate_uuid(), 'name': 'net_az',
- 'tenant_id': TEST_TENANT_ID,
- 'admin_state_up': True, 'shared': False,
- 'availability_zone_hints': ['az_name_1', 'az_name_2']}}
- fake_plugin.create_network(neutron_context, network)
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(rbac_db, 'NetworkRBAC', new=FakeNetworkRBAC)
- def test_convert_az2region_for_nets(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
-
- az_hints = []
- region_names = []
- t_net_id, _, _, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1, az_hints=az_hints)
- net_filter = {'id': [t_net_id]}
- top_net = fake_plugin.get_networks(neutron_context, net_filter)
- six.assertCountEqual(self, top_net[0]['availability_zone_hints'],
- region_names)
-
- az_hints = '["az_name_1", "az_name_2"]'
- region_names = ['pod_1', 'pod_2']
- t_net_id, _, _, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 2, az_hints=az_hints)
- net_filter = {'id': [t_net_id]}
- top_net = fake_plugin.get_networks(neutron_context, net_filter)
- six.assertCountEqual(self, top_net[0]['availability_zone_hints'],
- region_names)
-
- az_hints = '["pod_1", "pod_2"]'
- region_names = ['pod_1', 'pod_2']
- t_net_id, _, _, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 3, az_hints=az_hints)
- net_filter = {'id': [t_net_id]}
- top_net = fake_plugin.get_networks(neutron_context, net_filter)
- six.assertCountEqual(self, top_net[0]['availability_zone_hints'],
- region_names)
-
- az_hints = '["pod_1", "az_name_2"]'
- region_names = ['pod_1', 'pod_2']
- t_net_id, _, _, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 4, az_hints=az_hints)
- net_filter = {'id': [t_net_id]}
- top_net = fake_plugin.get_networks(neutron_context, net_filter)
- six.assertCountEqual(self, top_net[0]['availability_zone_hints'],
- region_names)
-
- pod4 = {'pod_id': 'pod_id_4',
- 'region_name': 'pod_4',
- 'az_name': 'az_name_1'}
- db_api.create_pod(self.context, pod4)
- az_hints = '["pod_1", "az_name_1"]'
- region_names = ['pod_1', 'pod_4']
- t_net_id, _, _, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 5, az_hints=az_hints)
- net_filter = {'id': [t_net_id]}
- top_net = fake_plugin.get_networks(neutron_context, net_filter)
- six.assertCountEqual(self, top_net[0]['availability_zone_hints'],
- region_names)
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(rbac_db, 'NetworkRBAC', new=FakeNetworkRBAC)
- def test_update_network(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- t_net_id, _, b_net_id, _ = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1)
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
-
- update_body = {
- 'network': {
- 'name': 'new_name',
- 'description': 'new_description',
- 'admin_state_up': True,
- 'shared': True}
- }
- fake_plugin.update_network(neutron_context, t_net_id, update_body)
-
- top_net = fake_plugin.get_network(neutron_context, t_net_id)
- self.assertEqual(top_net['name'], update_body['network']['name'])
- self.assertEqual(top_net['description'],
- update_body['network']['description'])
- self.assertEqual(top_net['admin_state_up'],
- update_body['network']['admin_state_up'])
- self.assertTrue(top_net['shared'])
-
- bottom_net = fake_client.get_networks(t_ctx, b_net_id)
- # name is set to top resource id, which is used by lock_handle to
- # retrieve bottom/local resources that have been created but not
- # registered in the resource routing table, so it's not allowed to
- # be updated
- self.assertEqual(bottom_net['name'], t_net_id)
- self.assertEqual(bottom_net['description'],
- update_body['network']['description'])
- self.assertEqual(bottom_net['admin_state_up'],
- update_body['network']['admin_state_up'])
- self.assertTrue(bottom_net['shared'])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(rbac_db, 'NetworkRBAC', new=FakeNetworkRBAC)
- def test_update_network_external_attr(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- t_net_id, _, _, _ = self._prepare_network_subnet(tenant_id, t_ctx,
- 'pod_1', 1)
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
-
- update_body = {
- 'network': {
- 'router:external': True
- }
- }
- self.assertRaises(q_lib_exc.InvalidInput, fake_plugin.update_network,
- neutron_context, t_net_id, update_body)
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(rbac_db, 'NetworkRBAC', new=FakeNetworkRBAC)
- def test_update_network_provider_attrs(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_admin_context()
- t_net_id, _, _, _ = self._prepare_network_subnet(tenant_id, t_ctx,
- 'pod_1', 1)
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
-
- provider_attrs = {'provider:network_type': 'vlan',
- 'provider:physical_network': 'br-vlan',
- 'provider:segmentation_id': 1234}
-
- for key, value in provider_attrs.items():
- update_body = {
- 'network': {
- key: value
- }
- }
- self.assertRaises(q_lib_exc.InvalidInput,
- fake_plugin.update_network,
- neutron_context, t_net_id, update_body)
-
- @staticmethod
- def _prepare_sg_test(project_id, ctx, pod_name):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- b_rule_id = uuidutils.generate_uuid()
- t_sg = {
- 'id': t_sg_id,
- 'name': 'default',
- 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [
- {'security_group_id': t_sg_id,
- 'id': t_rule_id,
- 'tenant_id': project_id,
- 'remote_group_id': t_sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.0.0/24',
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'}
- ]
- }
- TOP_PORTS.append(test_utils.DotDict(t_sg))
-
- b_sg = {
- 'id': b_sg_id,
- 'name': 'default',
- 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [
- {'security_group_id': b_sg_id,
- 'id': b_rule_id,
- 'tenant_id': project_id,
- 'remote_group_id': b_sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.0.0/24',
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'}
- ]
- }
- if pod_name == 'pod_1':
- BOTTOM1_PORTS.append(test_utils.DotDict(b_sg))
- else:
- BOTTOM2_PORTS.append(test_utils.DotDict(b_sg))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_sg_id,
- 'bottom_id': b_sg_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_SG})
-
- return t_sg_id, b_sg_id
-
- @staticmethod
- def _prepare_port_test(tenant_id, ctx, pod_name, index, t_net_id,
- b_net_id, t_subnet_id, b_subnet_id, add_ip=True,
- vif_type=portbindings.VIF_TYPE_UNBOUND,
- device_onwer='compute:None'):
- t_port_id = uuidutils.generate_uuid()
- b_port_id = uuidutils.generate_uuid()
-
- if add_ip:
- ip_address = ''
- for subnet in TOP_SUBNETS:
- if subnet['id'] == t_subnet_id:
- ip_address = subnet['cidr'].replace('.0/24',
- '.%d' % (index + 4))
-
- t_port = {
- 'id': t_port_id,
- 'name': 'top_port_%d' % index,
- 'description': 'old_top_description',
- 'extra_dhcp_opts': [],
- 'device_owner': device_onwer,
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': tenant_id,
- 'mac_address': 'fa:16:3e:cd:76:40',
- 'binding:vif_type': vif_type,
- 'project_id': 'project_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE'
- }
- if add_ip:
- t_port.update({'fixed_ips': [{'subnet_id': t_subnet_id,
- 'ip_address': ip_address}]})
- TOP_PORTS.append(test_utils.DotDict(t_port))
-
- b_port = {
- 'id': b_port_id,
- 'name': b_port_id,
- 'description': 'old_bottom_description',
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': b_net_id,
- 'tenant_id': tenant_id,
- 'device_owner': 'compute:None',
- 'extra_dhcp_opts': [],
- 'mac_address': 'fa:16:3e:cd:76:40',
- 'binding:vif_type': vif_type,
- 'project_id': 'tenant_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE'
- }
- if add_ip:
- b_port.update({'fixed_ips': [{'subnet_id': b_subnet_id,
- 'ip_address': ip_address}]})
-
- if pod_name == 'pod_1':
- BOTTOM1_PORTS.append(test_utils.DotDict(b_port))
- else:
- BOTTOM2_PORTS.append(test_utils.DotDict(b_port))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_port_id,
- 'bottom_id': b_port_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_PORT})
-
- return t_port_id, b_port_id
-
- @staticmethod
- def _prepare_trunk_test(project_id, ctx, pod_name, index, t_net_id,
- b_net_id, t_subnet_id, b_subnet_id):
- t_trunk_id = uuidutils.generate_uuid()
- b_trunk_id = uuidutils.generate_uuid()
- t_parent_port_id = uuidutils.generate_uuid()
- t_sub_port_id = PluginTest._prepare_port_test(
- project_id, ctx, pod_name, index, t_net_id,
- b_net_id, t_subnet_id, b_subnet_id)
-
- t_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': t_trunk_id}
-
- t_trunk = {
- 'id': t_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'DOWN',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [t_subport]
- }
- TOP_TRUNKS.append(test_utils.DotDict(t_trunk))
- TOP_SUBPORTS.append(test_utils.DotDict(t_subport))
-
- b_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': b_trunk_id}
-
- b_trunk = {
- 'id': b_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'UP',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [b_subport]
- }
-
- if pod_name == 'pod_1':
- BOTTOM1_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM1_TRUNKS.append(test_utils.DotDict(b_trunk))
- else:
- BOTTOM2_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM2_TRUNKS.append(test_utils.DotDict(b_trunk))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_trunk_id,
- 'bottom_id': b_trunk_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_TRUNK})
-
- return t_trunk, b_trunk
-
- @staticmethod
- def _prepare_network_subnet(project_id, ctx, region_name, index,
- enable_dhcp=True, az_hints=None,
- network_type=constants.NT_LOCAL):
- t_client = FakeClient()
- t_net_name = 'top_net_%d' % index
- t_nets = t_client.list_networks(ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': t_net_name}])
- if not t_nets:
- t_net_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- t_net = {
- 'id': t_net_id,
- 'name': 'top_net_%d' % index,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'admin_state_up': False,
- 'shared': False,
- 'provider:network_type': network_type,
- 'availability_zone_hints': az_hints
- }
- t_subnet = {
- 'id': t_subnet_id,
- 'network_id': t_net_id,
- 'name': 'top_subnet_%d' % index,
- 'ip_version': 4,
- 'cidr': '10.0.%d.0/24' % index,
- 'allocation_pools': [],
- 'enable_dhcp': True,
- 'gateway_ip': '10.0.%d.1' % index,
- 'ipv6_address_mode': q_constants.IPV6_SLAAC,
- 'ipv6_ra_mode': q_constants.IPV6_SLAAC,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'host_routes': [],
- 'dns_nameservers': [],
- 'segment_id': 'b85fd910-e483-4ef1-bdf5-b0f747d0b0d5'
- }
- TOP_NETS.append(test_utils.DotDict(t_net))
- TOP_SUBNETS.append(test_utils.DotDict(t_subnet))
- else:
- t_net_id = t_nets[0]['id']
- t_subnet_name = 'top_subnet_%d' % index
- t_subnets = t_client.list_subnets(ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': t_subnet_name}])
- t_subnet_id = t_subnets[0]['id']
-
- b_net_id = t_net_id
- b_subnet_id = t_subnet_id
- b_net = {
- 'id': b_net_id,
- 'name': t_net_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'admin_state_up': False,
- 'shared': False,
- 'tenant_id': project_id
- }
- b_subnet = {
- 'id': b_subnet_id,
- 'network_id': b_net_id,
- 'name': t_subnet_id,
- 'ip_version': 4,
- 'cidr': '10.0.%d.0/24' % index,
- 'allocation_pools': [],
- 'enable_dhcp': enable_dhcp,
- 'gateway_ip': '10.0.%d.25' % index,
- 'ipv6_address_mode': '',
- 'ipv6_ra_mode': '',
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'host_routes': [],
- 'dns_nameservers': [],
- 'segment_id': 'b85fd910-e483-4ef1-bdf5-b0f747d0b0d5'
- }
- if region_name == 'pod_1':
- BOTTOM1_NETS.append(test_utils.DotDict(b_net))
- BOTTOM1_SUBNETS.append(test_utils.DotDict(b_subnet))
- else:
- BOTTOM2_NETS.append(test_utils.DotDict(b_net))
- BOTTOM2_SUBNETS.append(test_utils.DotDict(b_subnet))
-
- pod_id = 'pod_id_1' if region_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_net_id,
- 'bottom_id': b_net_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_NETWORK})
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_subnet_id,
- 'bottom_id': b_subnet_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_SUBNET})
- return t_net_id, t_subnet_id, b_net_id, b_subnet_id
-
- @staticmethod
- def _prepare_port(project_id, ctx, region_name, index, extra_attrs={}):
- t_client = FakeClient()
- t_net_name = 'top_net_%d' % index
- t_nets = t_client.list_networks(ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': t_net_name}])
- t_subnet_name = 'top_subnet_%d' % index
- t_subnets = t_client.list_subnets(ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': t_subnet_name}])
-
- t_port_id = uuidutils.generate_uuid()
- b_port_id = t_port_id
- ip_suffix = index if region_name == 'pod_1' else 100 + index
- t_port = {
- 'id': b_port_id,
- 'network_id': t_nets[0]['id'],
- 'device_id': 'vm%d_id' % index,
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': t_subnets[0]['id'],
- 'ip_address': '10.0.%d.%d' % (index, ip_suffix)}],
- 'mac_address': 'fa:16:3e:d4:%02x:%02x' % (index, ip_suffix),
- 'security_groups': [],
- 'tenant_id': project_id,
- 'project_id': project_id
- }
- t_port.update(extra_attrs)
- # resource ids in top and bottom pod are the same
- b_port = {
- 'id': t_port_id,
- 'network_id': t_nets[0]['id'],
- 'device_id': 'vm%d_id' % index,
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': t_subnets[0]['id'],
- 'ip_address': '10.0.%d.%d' % (index, ip_suffix)}],
- 'mac_address': 'fa:16:3e:d4:%02x:%02x' % (index, ip_suffix),
- 'security_groups': [],
- 'tenant_id': project_id,
- 'project_id': project_id
- }
- b_port.update(extra_attrs)
- TOP_PORTS.append(test_utils.DotDict(t_port))
- if region_name == 'pod_1':
- BOTTOM1_PORTS.append(test_utils.DotDict(b_port))
- else:
- BOTTOM2_PORTS.append(test_utils.DotDict(b_port))
-
- pod_id = 'pod_id_1' if region_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_port_id,
- 'bottom_id': t_port_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_PORT})
- return t_port_id, b_port_id
-
- def _prepare_router(self, project_id, router_az_hints=None):
- t_router_id = uuidutils.generate_uuid()
- t_router = {
- 'id': t_router_id,
- 'name': 'top_router',
- 'distributed': False,
- 'tenant_id': project_id,
- 'attached_ports': test_utils.DotList(),
- 'extra_attributes': {
- 'availability_zone_hints': router_az_hints
- }
- }
- TOP_ROUTERS.append(test_utils.DotDict(t_router))
- return t_router_id
-
- def _prepare_router_test(self, tenant_id, ctx, region_name, index,
- router_az_hints=None, net_az_hints=None,
- create_new_router=False,
- network_type=constants.NT_LOCAL):
- (t_net_id, t_subnet_id, b_net_id,
- b_subnet_id) = self._prepare_network_subnet(
- tenant_id, ctx, region_name, index, az_hints=net_az_hints,
- network_type=network_type)
- if create_new_router or len(TOP_ROUTERS) == 0:
- t_router_id = self._prepare_router(tenant_id, router_az_hints)
- else:
- t_router_id = TOP_ROUTERS[0]['id']
-
- return t_net_id, t_subnet_id, t_router_id, b_net_id, b_subnet_id
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(base_object.NeutronDbObject, '_load_object',
- new=fake_load_obj)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_subnet(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- neutron_context = FakeNeutronContext()
- t_ctx = context.get_db_context()
- _, t_subnet_id, _, b_subnet_id = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1)
-
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
- mock_context.return_value = t_ctx
- update_body = {
- 'subnet':
- {'name': 'new_name',
- 'description': 'new_description',
- 'allocation_pools': [{"start": "10.0.1.10",
- "end": "10.0.1.254"}],
- 'gateway_ip': '10.0.1.2',
- 'host_routes': [{"nexthop": "10.1.0.1",
- "destination": "10.1.0.0/24"},
- {"nexthop": "10.2.0.1",
- "destination": "10.2.0.0/24"}],
- 'dns_nameservers': ['114.114.114.114', '8.8.8.8']}
- }
- body_copy = copy.deepcopy(update_body)
- fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
- top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
- self.assertEqual(top_subnet['name'], body_copy['subnet']['name'])
- self.assertEqual(top_subnet['description'],
- body_copy['subnet']['description'])
- self.assertEqual(top_subnet['allocation_pools'],
- body_copy['subnet']['allocation_pools'])
- six.assertCountEqual(self, top_subnet['host_routes'],
- body_copy['subnet']['host_routes'])
- six.assertCountEqual(self, top_subnet['dns_nameservers'],
- body_copy['subnet']['dns_nameservers'])
- self.assertEqual(top_subnet['gateway_ip'],
- body_copy['subnet']['gateway_ip'])
-
- bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
- # name is set to top resource id, which is used by lock_handle to
- # retrieve bottom/local resources that have been created but not
- # registered in the resource routing table, so it's not allowed
- # to be updated
- self.assertEqual(bottom_subnet['name'], b_subnet_id)
- self.assertEqual(bottom_subnet['description'],
- body_copy['subnet']['description'])
- bottom_allocation_pools = [{'start': '10.0.1.2', 'end': '10.0.1.2'},
- {'start': '10.0.1.10', 'end': '10.0.1.24'},
- {'start': '10.0.1.26', 'end': '10.0.1.254'}]
- six.assertCountEqual(self,
- bottom_subnet['allocation_pools'],
- bottom_allocation_pools)
- six.assertCountEqual(self,
- bottom_subnet['host_routes'],
- body_copy['subnet']['host_routes'])
- six.assertCountEqual(self,
- bottom_subnet['dns_nameservers'],
- body_copy['subnet']['dns_nameservers'])
- # gateway ip is set to origin gateway ip ,because it is reserved
- # by top pod, so it's not allowed to be updated
- self.assertEqual(bottom_subnet['gateway_ip'], '10.0.1.25')
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_subnet_enable_disable_dhcp(self, mock_context):
-
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- neutron_context = FakeNeutronContext()
- t_ctx = context.get_db_context()
- _, t_subnet_id, _, b_subnet_id = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1, enable_dhcp=False)
-
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
- mock_context.return_value = t_ctx
-
- self.assertEqual(0, len(TOP_PORTS))
- self.assertEqual(0, len(BOTTOM1_PORTS))
-
- update_body = {
- 'subnet':
- {'enable_dhcp': True}
- }
- body_copy = copy.deepcopy(update_body)
- # from disable dhcp to enable dhcp, create a new dhcp port
- fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
- top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
- self.assertEqual(top_subnet['enable_dhcp'],
- body_copy['subnet']['enable_dhcp'])
- self.assertEqual(1, len(TOP_PORTS))
-
- bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
- self.assertEqual(bottom_subnet['enable_dhcp'],
- body_copy['subnet']['enable_dhcp'])
-
- update_body = {
- 'subnet':
- {'enable_dhcp': False}
- }
- body_copy = copy.deepcopy(update_body)
- # from enable dhcp to disable dhcp, reserved dhcp port
- # previously created
- fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
- top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
- self.assertEqual(top_subnet['enable_dhcp'],
- body_copy['subnet']['enable_dhcp'])
- self.assertEqual(1, len(TOP_PORTS))
-
- bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
- self.assertEqual(bottom_subnet['enable_dhcp'],
- body_copy['subnet']['enable_dhcp'])
-
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_port(self, mock_context):
- project_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, self.context, 'pod_1', 1)
- neutron_context = FakeNeutronContext()
- fake_plugin = FakePlugin()
- fake_client = FakeClient()
- mock_context.return_value = self.context
-
- t_pod = {'pod_id': 'pod_id_top', 'region_name': 'top-region',
- 'az_name': ''}
- db_api.create_pod(self.context, t_pod)
-
- body_port = {
- 'port': {
- 'name': 'interface_top-region_port-1',
- 'description': 'top_description',
- 'extra_dhcp_opts': [],
- 'security_groups': [],
- 'device_id': 'reserved_gateway_port',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- }
- }
-
- port = fake_plugin.create_port(neutron_context, body_port)
- t_gw_ports = fake_client.list_resources(
- 'port', None, [{'key': 'name', 'comparator': 'eq',
- 'value': 'interface_top-region_port-1'}])
- self.assertEqual(t_gw_ports[0]['id'], port['id'])
-
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_update_ips_for_port', new=fake_update_ips_for_port)
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_port(self, mock_context):
- project_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- neutron_context = FakeNeutronContext()
- t_ctx = context.get_db_context()
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, t_ctx, 'pod_1', 1)
- t_port_id, b_port_id = self._prepare_port_test(
- project_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
- t_sg_id, _ = self._prepare_sg_test(project_id, t_ctx, 'pod_1')
-
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
- mock_context.return_value = t_ctx
-
- update_body = {
- 'port': {
- 'description': 'new_description',
- 'extra_dhcp_opts': [
- {"opt_value": "123.123.123.45",
- "opt_name": "server-ip-address"},
- {"opt_value": "123.123.123.123",
- "opt_name": "tftp-server"}
- ],
- 'device_owner': 'compute:new',
- 'device_id': 'new_device_id',
- 'name': 'new_name',
- 'admin_state_up': False,
- 'mac_address': 'fa:16:3e:cd:76:bb',
- 'security_groups': [t_sg_id],
- 'allowed_address_pairs': [{"ip_address": "23.23.23.1",
- "mac_address": "fa:16:3e:c4:cd:3f"}]
- }
-
- }
- body_copy = copy.deepcopy(update_body)
- top_port = fake_plugin.update_port(
- neutron_context, t_port_id, update_body)
- self.assertEqual(top_port['name'], body_copy['port']['name'])
- self.assertEqual(top_port['description'],
- body_copy['port']['description'])
- self.assertEqual(top_port['extra_dhcp_opts'],
- body_copy['port']['extra_dhcp_opts'])
- self.assertEqual(top_port['device_owner'],
- body_copy['port']['device_owner'])
- self.assertEqual(top_port['device_id'],
- body_copy['port']['device_id'])
- self.assertEqual(top_port['admin_state_up'],
- body_copy['port']['admin_state_up'])
- self.assertEqual(top_port['mac_address'],
- body_copy['port']['mac_address'])
- self.assertEqual(top_port['security_groups'],
- body_copy['port']['security_groups'])
- self.assertEqual(top_port['allowed_address_pairs'][0],
- body_copy['port']['allowed_address_pairs'][0])
-
- bottom_port = fake_client.get_ports(t_ctx, b_port_id)
- # name is set to bottom resource id, which is used by lock_handle to
- # retrieve bottom/local resources that have been created but not
- # registered in the resource routing table, so it's not allowed
- # to be updated
- self.assertEqual(bottom_port['name'], b_port_id)
- self.assertEqual(bottom_port['description'],
- body_copy['port']['description'])
- self.assertEqual(bottom_port['extra_dhcp_opts'],
- body_copy['port']['extra_dhcp_opts'])
- self.assertEqual(bottom_port['device_owner'],
- body_copy['port']['device_owner'])
- self.assertEqual(bottom_port['device_id'],
- body_copy['port']['device_id'])
- self.assertEqual(bottom_port['admin_state_up'],
- body_copy['port']['admin_state_up'])
- self.assertEqual(bottom_port['mac_address'],
- body_copy['port']['mac_address'])
- self.assertEqual(bottom_port['security_groups'],
- body_copy['port']['security_groups'])
- self.assertEqual(bottom_port['allowed_address_pairs'][0],
- body_copy['port']['allowed_address_pairs'][0])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(trunk_plugin.TrunkPlugin, 'get_trunk')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_port_tunck(self, mock_context, mock_get_trunk):
- project_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, t_ctx, 'pod_1', 1)
- t_port_id, b_port_id = self._prepare_port_test(
- project_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
- t_trunk, b_trunk = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_1', 2, t_net_id,
- b_net_id, t_subnet_id, b_subnet_id)
-
- fake_plugin = FakePlugin()
- mock_context.return_value = t_ctx
-
- update_body = {
- 'port': {
- 'binding:profile': {
- constants.PROFILE_REGION: 'pod_1',
- constants.PROFILE_DEVICE: 'compute:new'
- },
- 'trunk_details': {'trunk_id': t_trunk['id'],
- 'sub_ports': []}
- }
-
- }
-
- body_copy = copy.deepcopy(update_body)
- q_ctx = test_central_trunk_plugin.FakeNeutronContext()
- mock_get_trunk.return_value = t_trunk
- top_port = fake_plugin.update_port(q_ctx, t_port_id, update_body)
- self.assertEqual(top_port['binding:profile'],
- body_copy['port']['binding:profile'])
- self.assertEqual(top_port['trunk_details'],
- body_copy['port']['trunk_details'])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_port_mapping(self, mock_context):
- project_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- neutron_context = FakeNeutronContext()
- t_ctx = context.get_db_context()
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, t_ctx, 'pod_1', 1)
- t_port_id, b_port_id = self._prepare_port_test(
- project_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
-
- fake_plugin = FakePlugin()
- mock_context.return_value = t_ctx
-
- update_body = {
- 'port': {
- 'binding:profile': {
- constants.PROFILE_REGION: 'pod_1',
- constants.PROFILE_DEVICE: '',
- constants.PROFILE_STATUS: 'DOWN'
- }
- }
- }
- b_update_body = {'port': {'device_id': None}}
- fake_client = FakeClient('pod_1')
- fake_client.update_ports(t_ctx, b_port_id, b_update_body)
- fake_plugin.update_port(neutron_context, t_port_id, update_body)
- routing_resources = core.query_resource(
- t_ctx, models.ResourceRouting,
- [{'key': 'bottom_id', 'comparator': 'eq', 'value': b_port_id}], [])
- self.assertListEqual(routing_resources, [])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_bound_port_mac(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- neutron_context = FakeNeutronContext()
- t_ctx = context.get_db_context()
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1)
- (t_port_id, b_port_id) = self._prepare_port_test(
- tenant_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id,
- vif_type='ovs', device_onwer='compute:None')
-
- fake_plugin = FakePlugin()
- mock_context.return_value = t_ctx
- update_body = {
- 'port': {
- 'mac_address': 'fa:16:3e:cd:76:bb'
- }
- }
-
- self.assertRaises(q_lib_exc.PortBound, fake_plugin.update_port,
- neutron_context, t_port_id, update_body)
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_non_vm_port(self, mock_context):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1)
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
-
- non_vm_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_ROUTER_GW,
- q_constants.DEVICE_OWNER_DHCP]
- for port_type in non_vm_port_types:
- (t_port_id, b_port_id) = self._prepare_port_test(
- tenant_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id, add_ip=False, device_onwer=port_type)
- update_body = {
- 'port': {'binding:host_id': 'zhiyuan-6'}
- }
- body_copy = copy.deepcopy(update_body)
- top_port = fake_plugin.update_port(
- neutron_context, t_port_id, update_body)
- self.assertEqual(top_port['binding:host_id'],
- body_copy['port']['binding:host_id'])
- # for router interface, router gw, dhcp port, not directly
- # update bottom, so bottom not changed
- bottom_port = fake_client.get_ports(t_ctx, b_port_id)
- self.assertEqual(bottom_port['binding:host_id'], 'zhiyuan-5')
-
- @patch.object(FakeRPCAPI, 'setup_shadow_ports')
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(db_utils, 'filter_non_model_columns',
- new=fake_filter_non_model_columns)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_vm_port(self, mock_context, mock_setup):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- neutron_context = FakeNeutronContext()
- mock_context.return_value = t_ctx
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 1, network_type=constants.NT_LOCAL)
- fake_plugin = FakePlugin()
-
- (t_port_id, b_port_id) = self._prepare_port_test(
- tenant_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
- update_body = {
- 'port': {'binding:profile': {
- 'region': 'pod_1',
- 'host': 'fake_host',
- 'type': 'Open vSwitch agent',
- 'tunnel_ip': '192.168.1.101',
- 'device': 'compute: None'
- }}
- }
- fake_plugin.update_port(
- neutron_context, t_port_id, update_body)
- agents = core.query_resource(t_ctx, models.ShadowAgent, [], [])
- # we only create shadow agent for vxlan network
- self.assertEqual(len(agents), 0)
- self.assertFalse(mock_setup.called)
-
- client = FakeClient()
- # in fact provider attribute is not allowed to be updated, but in test
- # we just change the network type for convenience
- client.update_networks(
- t_ctx, t_net_id,
- {'network': {'provider:network_type': constants.NT_VxLAN}})
- fake_plugin.update_port(
- neutron_context, t_port_id, update_body)
- agents = core.query_resource(t_ctx, models.ShadowAgent, [], [])
- self.assertEqual(len(agents), 1)
- self.assertEqual(agents[0]['type'], 'Open vSwitch agent')
- self.assertEqual(agents[0]['host'], 'fake_host')
- self.assertEqual(agents[0]['tunnel_ip'], '192.168.1.101')
- # we test the exact effect of setup_shadow_ports in
- # test_update_port_trigger_l2pop
- self.assertTrue(mock_setup.called)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_validation_router_net_location_match(self, mock_context):
- self._basic_pod_route_setup()
- pod4 = {'pod_id': 'pod_id_4',
- 'region_name': 'pod_4',
- 'az_name': 'az_name_2'}
- db_api.create_pod(self.context, pod4)
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
- tenant_id = TEST_TENANT_ID
-
- router_az_hints = '["pod_1"]'
- net_az_hints = '["pod_2"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- self.assertRaises(t_exceptions.RouterNetworkLocationMismatch,
- fake_plugin.validate_router_net_location_match,
- t_ctx, router, net)
-
- router_az_hints = '["pod_1"]'
- net_az_hints = '["pod_1", "az_name_2"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 2, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- # for supporting multi-gateway l3 mode, we allow attaching a network
- # to a local router if the regions of the network include the region
- # of the router
- self.assertTrue(is_local_router)
-
- router_az_hints = '["az_name_1"]'
- net_az_hints = '["az_name_1", "pod_2"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 3, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- self.assertRaises(t_exceptions.RouterNetworkLocationMismatch,
- fake_plugin.validate_router_net_location_match,
- t_ctx, router, net)
-
- router_az_hints = '["pod_1"]'
- net_az_hints = None
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 4, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- # for supporting multi-gateway l3 mode, we allow attaching a network
- # to a local router if the regions of the network include the region
- # of the router
- self.assertTrue(is_local_router)
-
- router_az_hints = None
- net_az_hints = '["pod_1", "az_name_2"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 5, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertFalse(is_local_router)
-
- router_az_hints = None
- net_az_hints = None
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 6, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertFalse(is_local_router)
-
- router_az_hints = '["pod_1"]'
- net_az_hints = '["pod_1"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 7, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertTrue(is_local_router)
-
- router_az_hints = '["az_name_2"]'
- net_az_hints = '["az_name_2"]'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 8, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertFalse(is_local_router)
-
- router_az_hints = '["pod_1", "az_name_2"]'
- net_az_hints = '["az_name_2"]'
- t_ctx.is_admin = True
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 9, router_az_hints, net_az_hints, True)
- router = fake_plugin._get_router(q_ctx, t_router_id)
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertFalse(is_local_router)
-
- net_az_hints = '["pod_1"]'
- t_ctx.is_admin = True
- (t_net_id, t_subnet_id, b_net_id,
- b_subnet_id) = self._prepare_network_subnet(
- tenant_id, t_ctx, 'pod_1', 10, az_hints=net_az_hints)
-
- # add a use case: router's extra_attributes attr is not exist but
- # availability_zone_hints attr exist
- t_router = {
- 'id': uuidutils.generate_uuid(),
- 'name': 'top_router',
- 'distributed': False,
- 'tenant_id': tenant_id,
- 'attached_ports': test_utils.DotList(),
- 'availability_zone_hints': ['pod_1']
- }
-
- net = fake_plugin.get_network(q_ctx, t_net_id)
- is_local_router = helper.NetworkHelper.is_local_router(t_ctx, t_router)
- fake_plugin.validate_router_net_location_match(t_ctx, router, net)
- self.assertTrue(is_local_router)
-
- def _prepare_interface_port(self, t_ctx, t_subnet_id, ip_suffix):
- t_client = FakeClient()
- t_subnet = t_client.get_subnets(t_ctx, t_subnet_id)
- t_net = t_client.get_networks(t_ctx, t_subnet['network_id'])
- t_port_id = uuidutils.generate_uuid()
- t_port = {
- 'id': t_port_id,
- 'network_id': t_net['id'],
- 'device_id': '',
- 'device_owner': '',
- 'fixed_ips': [{'subnet_id': t_subnet['id'],
- 'ip_address': '%s%d' % (
- t_subnet['cidr'][:-4], ip_suffix)}],
- 'mac_address': 'fa:16:3e:d4:%02x:%02x' % (
- int(t_subnet['cidr'].split('.')[2]), ip_suffix),
- 'security_groups': [],
- 'tenant_id': t_subnet['tenant_id']
- }
- TOP_PORTS.append(test_utils.DotDict(t_port))
- return t_port_id
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_external_network_no_az_pod(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_admin_context()
- mock_context.return_value = t_ctx
-
- # create external network without specifying az pod name
- body = {
- 'network': {
- 'name': 'ext-net',
- 'admin_state_up': True,
- 'shared': True,
- 'tenant_id': TEST_TENANT_ID,
- 'router:external': True,
- }
- }
-
- top_net = fake_plugin.create_network(q_ctx, body)
- for net in BOTTOM1_NETS:
- if net.get('router:external'):
- bottom_net = net
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, top_net['id'], constants.RT_NETWORK)
- self.assertEqual(mappings[0][1], bottom_net['id'])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_external_network(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- body = {
- 'network': {
- 'name': 'ext-net',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': TEST_TENANT_ID,
- 'router:external': True,
- 'availability_zone_hints': ['pod_1']
- }
- }
- top_net = fake_plugin.create_network(q_ctx, body)
- for net in BOTTOM1_NETS:
- if net.get('router:external'):
- bottom_net = net
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, top_net['id'], constants.RT_NETWORK)
- self.assertEqual(mappings[0][1], bottom_net['id'])
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_flat_external_network(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- body = {
- 'network': {
- 'name': 'ext-net1',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': TEST_TENANT_ID,
- 'router:external': True,
- 'availability_zone_hints': ['pod_1'],
- provider_net.PHYSICAL_NETWORK: 'extern',
- provider_net.NETWORK_TYPE: 'flat'
- }
- }
- fake_plugin.create_network(q_ctx, body)
- body['network']['name'] = ['ext-net2']
- body['network']['availability_zone_hints'] = ['pod_2']
- fake_plugin.create_network(q_ctx, body)
- # we have ignore the FlatNetworkInUse exception, so only one allocation
- # record is created, and both pods have one external network
- self.assertEqual(1, len(TOP_FLATALLOCATIONS))
- self.assertEqual(1, len(BOTTOM1_NETS))
- self.assertEqual(1, len(BOTTOM2_NETS))
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- def _prepare_external_net_router_test(self, q_ctx, fake_plugin,
- router_az_hints=None):
-
- tenant_id = TEST_TENANT_ID
- t_net_body = {
- 'name': 'ext_net',
- 'availability_zone_hints': ['pod_1'],
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'admin_state_up': True,
- 'shared': False,
- }
- fake_plugin.create_network(q_ctx, {'network': t_net_body})
- t_net_id = TOP_NETS[0]['id']
-
- t_subnet_body = {
- 'network_id': t_net_id, # only one network created
- 'name': 'ext_subnet',
- 'ip_version': 4,
- 'cidr': '100.64.0.0/24',
- 'allocation_pools': [],
- 'enable_dhcp': False,
- 'gateway_ip': '100.64.0.1',
- 'dns_nameservers': '',
- 'host_routes': '',
- 'tenant_id': tenant_id
- }
- fake_plugin.create_subnet(q_ctx, {'subnet': t_subnet_body})
- t_subnet_id = TOP_SUBNETS[0]['id']
-
- t_router_id = uuidutils.generate_uuid()
- t_router = {
- 'id': t_router_id,
- 'name': 'router',
- 'distributed': False,
- 'tenant_id': tenant_id,
- 'attached_ports': test_utils.DotList(),
- 'extra_attributes': {
- 'availability_zone_hints': router_az_hints
- }
- }
-
- TOP_ROUTERS.append(test_utils.DotDict(t_router))
- return t_net_id, t_subnet_id, t_router_id,
-
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- def _create_network_with_plugin(self, q_ctx, fake_plugin, network):
- return fake_plugin.create_network(q_ctx, network)
-
- def _prepare_associate_floatingip_test(self, t_ctx, q_ctx, fake_plugin,
- router_az_hints=None,
- net_az_hints=None,
- js_net_az_hints=None):
- tenant_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1, router_az_hints, js_net_az_hints)
- if not net_az_hints:
- net_az_hints = ['pod_2']
- net_body = {
- 'name': 'ext_net',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'availability_zone_hints': net_az_hints
- }
- e_net = self._create_network_with_plugin(q_ctx,
- fake_plugin,
- {'network': net_body})
- subnet_body = {
- 'network_id': e_net['id'],
- 'name': 'ext_subnet',
- 'ip_version': 4,
- 'cidr': '100.64.0.0/24',
- 'allocation_pools': [{'start': '100.64.0.2',
- 'end': '100.64.0.254'}],
- 'enable_dhcp': False,
- 'gateway_ip': '100.64.0.1',
- 'dns_nameservers': '',
- 'host_routes': '',
- 'tenant_id': tenant_id
- }
- e_subnet = fake_plugin.create_subnet(q_ctx, {'subnet': subnet_body})
- # set external gateway
- fake_plugin.update_router(
- q_ctx, t_router_id,
- {'router': {'external_gateway_info': {
- 'network_id': e_net['id'],
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': e_subnet['id'],
- 'ip_address': '100.64.0.5'}]}}})
- # create floating ip
- fip_body = {'floating_network_id': e_net['id'],
- 'tenant_id': tenant_id,
- 'subnet_id': None,
- 'floating_ip_address': None}
- fip = fake_plugin.create_floatingip(q_ctx, {'floatingip': fip_body})
- # add router interface
- fake_plugin.add_router_interface(q_ctx, t_router_id,
- {'subnet_id': t_subnet_id})
- # create internal port
- t_port_id = uuidutils.generate_uuid()
- # now top id and bottom id are the same
- b_port_id = t_port_id
- t_port = {
- 'id': t_port_id,
- 'network_id': t_net_id,
- 'mac_address': 'fa:16:3e:96:41:03',
- 'fixed_ips': [{'subnet_id': t_subnet_id,
- 'ip_address': '10.0.0.4'}]
- }
- b_port = {
- 'id': b_port_id,
- 'name': t_port_id,
- 'network_id': db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, t_net_id, 'pod_1', constants.RT_NETWORK),
- 'mac_address': 'fa:16:3e:96:41:03',
- 'device_id': None,
- 'fixed_ips': [
- {'subnet_id': db_api.get_bottom_id_by_top_id_region_name(
- t_ctx, t_subnet_id, 'pod_1', constants.RT_SUBNET),
- 'ip_address': '10.0.0.4'}],
- 'binding:host_id': 'host_1',
- 'binding:vif_type': 'ovs'
- }
- TOP_PORTS.append(t_port)
- BOTTOM1_PORTS.append(b_port)
- route = {'top_id': t_port_id,
- 'pod_id': 'pod_id_1',
- 'bottom_id': b_port_id,
- 'resource_type': constants.RT_PORT}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- return t_port_id, b_port_id, fip, e_net
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_policy(self, mock_context):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_create_policy(fake_plugin, q_ctx, t_ctx)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_policy(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_update_policy(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
- BOTTOM1_POLICIES)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_delete_policy(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_delete_policy(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
- BOTTOM1_POLICIES)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_policy_rule(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_create_policy_rule(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
- BOTTOM1_POLICIES)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_delete_policy_rule(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_delete_policy_rule(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
- BOTTOM1_POLICIES)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_network_with_qos_policy(self, mock_context):
- self._basic_pod_route_setup()
- t_ctx = context.get_db_context()
- fake_plugin = FakePlugin()
- fake_client = FakeClient('pod_1')
- q_ctx = FakeNeutronContext()
- mock_context.return_value = t_ctx
-
- tenant_id = TEST_TENANT_ID
- net_id, _, _, _ = \
- self._prepare_network_subnet(tenant_id, t_ctx, 'pod_1', 1)
-
- self._test_update_network_with_qos_policy(fake_plugin, fake_client,
- q_ctx, t_ctx, 'pod_id_1',
- net_id, BOTTOM1_POLICIES)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_port_with_qos_policy(self, mock_context):
- project_id = TEST_TENANT_ID
- self._basic_pod_route_setup()
- q_ctx = FakeNeutronContext()
- fake_client = FakeClient('pod_1')
- t_ctx = context.get_db_context()
- fake_plugin = FakePlugin()
- mock_context.return_value = t_ctx
- (t_net_id, t_subnet_id,
- b_net_id, b_subnet_id) = self._prepare_network_subnet(
- project_id, t_ctx, 'pod_1', 1)
- t_port_id, b_port_id = self._prepare_port_test(
- project_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
- t_subnet_id, b_subnet_id)
-
- self._test_update_port_with_qos_policy(fake_plugin, fake_client,
- q_ctx, t_ctx,
- 'pod_id_1', t_port_id,
- b_port_id, BOTTOM1_POLICIES)
-
- @patch.object(FakeBaseRPCAPI, 'setup_shadow_ports')
- @patch.object(FakeClient, 'update_ports')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_update_port_trigger_l2pop(self, mock_context, mock_update,
- mock_setup):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- t_ctx.project_id = TEST_TENANT_ID
- mock_context.return_value = t_ctx
-
- self._basic_pod_route_setup()
- (t_net_id, _, _, _) = self._prepare_network_subnet(
- TEST_TENANT_ID, t_ctx, 'pod_1', 1, network_type=constants.NT_VxLAN)
- self._prepare_network_subnet(TEST_TENANT_ID, t_ctx, 'pod_2', 1,
- network_type=constants.NT_VxLAN)
-
- t_port_id1, b_port_id1 = self._prepare_port(
- TEST_TENANT_ID, t_ctx, 'pod_1', 1,
- {'binding:host_id': 'host1',
- 'binding:vif_type': portbindings.VIF_TYPE_OVS})
- update_body = {'port': {
- 'binding:profile': {
- constants.PROFILE_REGION: 'pod_1',
- constants.PROFILE_DEVICE: 'compute:None',
- constants.PROFILE_HOST: 'host1',
- constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
- constants.PROFILE_TUNNEL_IP: '192.168.1.101'}}}
- fake_plugin.update_port(q_ctx, t_port_id1, update_body)
-
- t_port_id2, b_port_id2 = self._prepare_port(
- TEST_TENANT_ID, t_ctx, 'pod_2', 1,
- {'binding:host_id': 'host2',
- 'binding:vif_type': portbindings.VIF_TYPE_OVS})
- update_body = {'port': {
- 'binding:profile': {
- constants.PROFILE_REGION: 'pod_2',
- constants.PROFILE_DEVICE: 'compute:None',
- constants.PROFILE_HOST: 'host2',
- constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
- constants.PROFILE_TUNNEL_IP: '192.168.1.102'}}}
- fake_plugin.update_port(q_ctx, t_port_id2, update_body)
-
- # shadow port is created
- client = FakeClient('pod_2')
- b_sd_port1 = client.list_ports(
- t_ctx, [{'key': 'name', 'comparator': 'eq',
- 'value': constants.shadow_port_name % t_port_id1}])[0]
- # shadow port is updated to active
- mock_update.assert_called_once_with(
- t_ctx, b_sd_port1['id'], {'port': {
- 'binding:profile': {constants.PROFILE_FORCE_UP: 'True'}}})
- # asynchronous job in pod_1 is registered
- mock_setup.assert_called_once_with(t_ctx, TEST_TENANT_ID,
- 'pod_id_1', t_net_id)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
- xmanager.IN_TEST = False
diff --git a/tricircle/tests/unit/network/test_central_sfc_plugin.py b/tricircle/tests/unit/network/test_central_sfc_plugin.py
deleted file mode 100644
index c03dd769..00000000
--- a/tricircle/tests/unit/network/test_central_sfc_plugin.py
+++ /dev/null
@@ -1,1059 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import copy
-from mock import patch
-import six
-import unittest
-
-from networking_sfc.db import sfc_db
-from networking_sfc.services.flowclassifier import plugin as fc_plugin
-
-import neutron.conf.common as q_config
-from neutron.db import db_base_plugin_v2
-import neutron_lib.context as q_context
-from neutron_lib.plugins import directory
-from neutronclient.common import exceptions as client_exceptions
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.central_fc_driver as fc_driver
-from tricircle.network import central_plugin
-import tricircle.network.central_sfc_driver as sfc_driver
-import tricircle.network.central_sfc_plugin as sfc_plugin
-from tricircle.network import helper
-import tricircle.tests.unit.utils as test_utils
-from tricircle.xjob import xmanager
-
-
-_resource_store = test_utils.get_resource_store()
-TOP_PORTS = _resource_store.TOP_PORTS
-TOP_PORTPAIRS = _resource_store.TOP_SFC_PORT_PAIRS
-TOP_PORTPAIRGROUPS = _resource_store.TOP_SFC_PORT_PAIR_GROUPS
-TOP_PORTCHAINS = _resource_store.TOP_SFC_PORT_CHAINS
-TOP_FLOWCLASSIFIERS = _resource_store.TOP_SFC_FLOW_CLASSIFIERS
-TOP_CHAIN_GROUP_ASSOCS = _resource_store.TOP_SFC_CHAIN_GROUP_ASSOCIATIONS
-TOP_CHAIN_CLASSIFIER_ASSOCS = (
- _resource_store.TOP_SFC_CHAIN_CLASSIFIER_ASSOCIATIONS)
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
-BOTTOM1_PORTPAIRS = _resource_store.BOTTOM1_SFC_PORT_PAIRS
-BOTTOM2_PORTPAIRS = _resource_store.BOTTOM2_SFC_PORT_PAIRS
-BOTTOM1_PORTPAIRGROUPS = _resource_store.BOTTOM1_SFC_PORT_PAIR_GROUPS
-BOTTOM2_PORTPAIRGROUPS = _resource_store.BOTTOM2_SFC_PORT_PAIR_GROUPS
-BOTTOM1_PORTCHAINS = _resource_store.BOTTOM1_SFC_PORT_CHAINS
-BOTTOM2_PORTCHAINS = _resource_store.BOTTOM2_SFC_PORT_CHAINS
-BOTTOM1_FLOWCLASSIFIERS = _resource_store.BOTTOM1_SFC_FLOW_CLASSIFIERS
-BOTTOM2_FLOWCLASSIFIERS = _resource_store.BOTTOM2_SFC_FLOW_CLASSIFIERS
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-DotDict = test_utils.DotDict
-
-
-class FakeNetworkHelper(helper.NetworkHelper):
- def __init__(self):
- super(FakeNetworkHelper, self).__init__()
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
-
-class FakeBaseXManager(xmanager.XManager):
- def __init__(self):
- self.clients = {constants.TOP: client.Client()}
- self.helper = FakeNetworkHelper()
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
- def sync_service_function_chain(self, ctx, payload):
- (b_pod_id, t_port_chain_id, net_id) = payload[
- constants.JT_SFC_SYNC].split('#')
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, net_id, constants.RT_NETWORK)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- payload = '%s#%s#%s' % (b_pod['pod_id'], t_port_chain_id,
- net_id)
- super(FakeBaseXManager, self).sync_service_function_chain(
- ctx, {constants.JT_SFC_SYNC: payload})
- else:
- super(FakeBaseXManager, self).sync_service_function_chain(
- ctx, payload)
-
-
-class FakeXManager(FakeBaseXManager):
- def __init__(self, fake_plugin):
- super(FakeXManager, self).__init__()
- self.xjob_handler = FakeBaseRPCAPI(fake_plugin)
-
-
-class FakeBaseRPCAPI(object):
- def __init__(self, fake_plugin):
- self.xmanager = FakeBaseXManager()
-
- def sync_service_function_chain(self, ctxt, project_id, portchain_id,
- net_id, pod_id):
- combine_id = '%s#%s#%s' % (pod_id, portchain_id, net_id)
- self.xmanager.sync_service_function_chain(
- ctxt,
- payload={constants.JT_SFC_SYNC: combine_id})
-
- def recycle_resources(self, ctx, project_id):
- self.xmanager.recycle_resources(ctx, payload={
- constants.JT_RESOURCE_RECYCLE: project_id})
-
-
-class FakeRPCAPI(FakeBaseRPCAPI):
- def __init__(self, fake_plugin):
- self.xmanager = FakeXManager(fake_plugin)
-
-
-class FakeClient(test_utils.FakeClient):
-
- def delete_resources(self, _type, ctx, _id):
- if _type == constants.RT_PORT_PAIR:
- pp = self.get_resource(constants.RT_PORT_PAIR, ctx, _id)
- if not pp:
- raise client_exceptions.NotFound()
- if pp['portpairgroup_id']:
- raise client_exceptions.Conflict(constants.STR_IN_USE)
- elif _type == constants.RT_FLOW_CLASSIFIER:
- pc_list = self._res_map[self.region_name][constants.RT_PORT_CHAIN]
- for pc in pc_list:
- if _id in pc['flow_classifiers']:
- raise client_exceptions.Conflict(constants.STR_IN_USE)
-
- return super(FakeClient, self).delete_resources(_type, ctx, _id)
-
- def create_resources(self, _type, ctx, body):
- if _type == constants.RT_PORT_PAIR:
- pp_list = self._res_map[self.region_name][constants.RT_PORT_PAIR]
- for pp in pp_list:
- if body[_type]['ingress'] == pp['ingress']:
- raise client_exceptions.BadRequest(constants.STR_USED_BY)
- elif _type == constants.RT_PORT_PAIR_GROUP:
- ppg_list = self._res_map[self.region_name][
- constants.RT_PORT_PAIR_GROUP]
- for pp in body[_type]['port_pairs']:
- for ppg in ppg_list:
- if pp in ppg['port_pairs']:
- raise client_exceptions.Conflict(constants.STR_IN_USE)
- elif _type == constants.RT_FLOW_CLASSIFIER:
- fc_list = self._res_map[self.region_name][
- constants.RT_FLOW_CLASSIFIER]
- for fc in fc_list:
- if (body[_type]['logical_source_port'] ==
- fc['logical_source_port']):
- raise client_exceptions.BadRequest(
- constants.STR_CONFLICTS_WITH)
- elif _type == constants.RT_PORT_CHAIN:
- pc_list = self._res_map[self.region_name][constants.RT_PORT_CHAIN]
- for fc in body[_type]['flow_classifiers']:
- for pc in pc_list:
- if fc in pc['flow_classifiers']:
- raise client_exceptions.Conflict(constants.STR_IN_USE)
-
- return super(FakeClient, self).create_resources(_type, ctx, body)
-
- def get_port_chains(self, ctx, portchain_id):
- res = self.get_resource('port_chain', ctx, portchain_id)
- return copy.copy(res) if res else res
-
- def get_port_pair_groups(self, ctx, portpairgroup_id):
- res = self.get_resource('port_pair_group', ctx, portpairgroup_id)
- return copy.copy(res) if res else res
-
- def get_flow_classifiers(self, ctx, flowclassifier_id):
- res = self.get_resource('flow_classifier', ctx, flowclassifier_id)
- return copy.copy(res) if res else res
-
- def list_port_pairs(self, ctx, filters=None, _copy=True):
- portpairs = self.list_resources('port_pair', ctx, filters)
- portpairs_copy = [copy.copy(pp) for pp in portpairs]
- return portpairs_copy if _copy else portpairs
-
- def list_flow_classifiers(self, ctx, filters=None):
- return self.list_resources('flow_classifier', ctx, filters)
-
- def list_port_chains(self, ctx, filters=None):
- return self.list_resources('port_chain', ctx, filters)
-
- def list_port_pair_groups(self, ctx, filters=None):
- return self.list_resources('port_pair_group', ctx, filters)
-
- def update_port_pairs(self, ctx, portpair_id, body):
- return self.update_resources('port_pair', ctx,
- portpair_id, body)
-
- def update_port_pair_groups(self, ctx, portpairgroup_id, body):
- filters = [{'key': 'portpairgroup_id',
- 'comparator': 'eq',
- 'value': portpairgroup_id}]
- pps = self.list_port_pairs(ctx, filters, False)
- for pp in pps:
- pp['portpairgroup_id'] = None
- return self.update_resources('port_pair_group', ctx,
- portpairgroup_id, body)
-
- def update_flow_classifiers(self, ctx, flowclassifier_id, body):
- return self.update_resources('flow_classifier', ctx,
- flowclassifier_id, body)
-
- def update_port_chains(self, ctx, portchain_id, body):
- return self.update_resources('port_chain', ctx,
- portchain_id, body)
-
- def get_ports(self, ctx, port_id):
- return self.get_resource('port', ctx, port_id)
-
- def delete_port_chains(self, context, portchain_id):
- pc = self.get_resource('port_chain', context, portchain_id)
- if not pc:
- raise client_exceptions.NotFound()
- self.delete_resources('port_chain', context, portchain_id)
-
- def delete_port_pairs(self, context, portpair_id):
- pp = self.get_resource('port_pair', context, portpair_id)
- if not pp:
- raise client_exceptions.NotFound()
- pp = self.get_resource('port_pair', context, portpair_id)
- if pp and pp.get('portpairgroup_id'):
- raise client_exceptions.Conflict("in use")
- self.delete_resources('port_pair', context, portpair_id)
-
- def delete_port_pair_groups(self, context, portpairgroup_id):
- ppg = self.get_resource('port_pair_group', context, portpairgroup_id)
- if not ppg:
- raise client_exceptions.NotFound()
- for pc in BOTTOM1_PORTCHAINS:
- if portpairgroup_id in pc['port_pair_groups']:
- raise client_exceptions.Conflict("in use")
- self.delete_resources('port_pair_group', context, portpairgroup_id)
-
- def delete_flow_classifiers(self, context, flowclassifier_id):
- fc = self.get_resource('flow_classifier', context, flowclassifier_id)
- if not fc:
- raise client_exceptions.NotFound()
- for pc in BOTTOM1_PORTCHAINS:
- if flowclassifier_id in pc['flow_classifiers']:
- raise client_exceptions.Conflict("in use")
- self.delete_resources('flow_classifier', context, flowclassifier_id)
-
-
-class FakeNeutronContext(q_context.Context):
- def __init__(self):
- self._session = None
- self.is_admin = True
- self.is_advsvc = False
- self.tenant_id = TEST_TENANT_ID
-
- @property
- def session(self):
- if not self._session:
- self._session = FakeSession()
- return self._session
-
- def elevated(self):
- return self
-
-
-class FakeSession(test_utils.FakeSession):
-
- def _fill_port_chain_dict(self, port_chain, model_dict, fields=None):
- model_dict['port_pair_groups'] = [
- assoc['portpairgroup_id']
- for assoc in port_chain['chain_group_associations']]
- model_dict['flow_classifiers'] = [
- assoc['flowclassifier_id']
- for assoc in port_chain['chain_classifier_associations']]
-
- def add_hook(self, model_obj, model_dict):
- if model_obj.__tablename__ == 'sfc_port_chains':
- self._fill_port_chain_dict(model_obj, model_dict)
-
-
-class FakeDriver(object):
- def __init__(self, driver, name):
- self.obj = driver
- self.name = name
-
-
-class FakeSfcDriver(sfc_driver.TricircleSfcDriver):
- def __init__(self):
- self.xjob_handler = FakeRPCAPI(self)
- self.helper = helper.NetworkHelper(self)
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
-
-class FakeFcDriver(fc_driver.TricircleFcDriver):
- def __init__(self):
- self.xjob_handler = FakeRPCAPI(self)
- self.helper = helper.NetworkHelper(self)
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
-
-class FakeFcPlugin(fc_plugin.FlowClassifierPlugin):
- def __init__(self):
- super(FakeFcPlugin, self).__init__()
- self.driver_manager.ordered_drivers = [FakeDriver(
- FakeFcDriver(), "tricircle_fc")]
-
-
-class FakeSfcPlugin(sfc_plugin.TricircleSfcPlugin):
- def __init__(self):
- super(FakeSfcPlugin, self).__init__()
- self.driver_manager.ordered_drivers = [FakeDriver(
- FakeSfcDriver(), "tricircle_sfc")]
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
- def get_port_pairs(self, context, filters=None):
- ret = []
- client = self._get_client('top')
- for key, values in six.iteritems(filters):
- for v in values:
- _filter = [{'key': key, 'comparator': 'eq', 'value': v}]
- res = client.list_resources('port_pair', context, _filter)
- if res:
- ret.extend(res)
- return ret
-
- def get_port_chain(self, context, id, fields=None):
- client = self._get_client('top')
- filter = [{'key': 'id', 'comparator': 'eq', 'value': id}]
- portchains = client.list_resources('port_chain', context, filter)
- if portchains:
- return portchains[0]
- return None
-
-
-def fake_get_context_from_neutron_context(q_context):
- ctx = context.get_db_context()
- ctx.project_id = q_context.project_id
- return ctx
-
-
-def fake_validate_pps_in_ppg(self, portpairs_list, id=None):
- pass
-
-
-def fake_make_port_pair_group_dict(self, port_pair_group, fields=None):
- port_pairs = port_pair_group.port_pairs
- if isinstance(port_pairs[0], test_utils.DotDict):
- port_pair_group['port_pairs'] = [pp['id'] for pp in port_pairs]
- return port_pair_group
-
-
-def fake_make_port_chain_dict(self, port_chain, fields=None):
- port_chain['port_pair_groups'] = [assoc['portpairgroup_id'] for assoc in
- port_chain.chain_group_associations]
- port_chain['flow_classifiers'] = [assoc['flowclassifier_id'] for assoc in
- port_chain.chain_classifier_associations]
- return port_chain
-
-
-def fake_make_port_pair_dict(self, port_pair, fields=None):
- return port_pair
-
-
-class FakeCorePlugin(central_plugin.TricirclePlugin):
- def __init__(self):
- pass
-
- def get_port(self, ctx, _id):
- return self._get_port(ctx, _id)
-
- def _get_port(self, ctx, _id):
- top_client = FakeClient()
- _filters = [{'key': 'id', 'comparator': 'eq', 'value': _id}]
- return top_client.list_resources('port', ctx, _filters)[0]
-
-
-def fake_get_plugin(alias='core'):
- if alias == 'sfc':
- return FakeSfcPlugin()
- return FakeCorePlugin()
-
-
-class PluginTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- cfg.CONF.register_opts(q_config.core_opts)
- core.get_engine().execute('pragma foreign_keys=on')
- self.context = context.Context()
- xmanager.IN_TEST = True
- directory.get_plugin = fake_get_plugin
-
- def _basic_pod_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
-
- def _prepare_net_test(self, project_id, ctx, pod_name):
- t_net_id = uuidutils.generate_uuid()
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_net_id,
- 'bottom_id': t_net_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_NETWORK})
- return t_net_id
-
- def _prepare_port_test(self, tenant_id, ctx, pod_name, net_id):
- t_port_id = uuidutils.generate_uuid()
- t_port = {
- 'id': t_port_id,
- 'network_id': net_id
- }
- TOP_PORTS.append(DotDict(t_port))
- b_port = {
- 'id': t_port_id,
- 'network_id': net_id
- }
- if pod_name == 'pod_1':
- BOTTOM1_PORTS.append(DotDict(b_port))
- else:
- BOTTOM2_PORTS.append(DotDict(b_port))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_port_id,
- 'bottom_id': t_port_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_PORT})
-
- return t_port_id
-
- def _update_port_pair_test(self, ppg_mappings, port_pairs):
- for pp_id, ppg_id in six.iteritems(ppg_mappings):
- for pp in port_pairs:
- if pp['id'] == pp_id:
- pp['portpairgroup_id'] = ppg_id
-
- def _prepare_port_pair_test(self, project_id, t_ctx, pod_name,
- index, ingress, egress, create_bottom,
- portpairgroup_id=None):
- t_pp_id = uuidutils.generate_uuid()
- b_pp_id = uuidutils.generate_uuid()
- top_pp = {
- 'id': t_pp_id,
- 'project_id': project_id,
- 'tenant_id': project_id,
- 'ingress': ingress,
- 'egress': egress,
- 'name': 'top_pp_%d' % index,
- 'service_function_parameters': {
- "weight": 1,
- "correlation": DotDict({'value': 'null'})},
- 'description': "description",
- 'portpairgroup_id': portpairgroup_id
- }
- TOP_PORTPAIRS.append(DotDict(top_pp))
- if create_bottom:
- btm_pp = {
- 'id': b_pp_id,
- 'project_id': project_id,
- 'tenant_id': project_id,
- 'ingress': ingress,
- 'egress': egress,
- 'name': 'btm_pp_%d' % index,
- 'service_function_parameters': {
- "weight": 1,
- "correlation": DotDict({'value': 'null'})},
- 'description': "description",
- 'portpairgroup_id': portpairgroup_id
- }
- if pod_name == 'pod_1':
- BOTTOM1_PORTPAIRS.append(DotDict(btm_pp))
- else:
- BOTTOM2_PORTPAIRS.append(DotDict(btm_pp))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(t_ctx, models.ResourceRouting,
- {'top_id': t_pp_id,
- 'bottom_id': b_pp_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_PORT_PAIR})
-
- return t_pp_id, b_pp_id
-
- def _prepare_port_pair_group_test(self, project_id, t_ctx, pod_name, index,
- t_pp_ids, create_bottom, b_pp_ids):
- t_ppg_id = uuidutils.generate_uuid()
- b_ppg_id = uuidutils.generate_uuid()
-
- t_client = FakeClient()
- b_client = FakeClient(pod_name)
- t_pps = [t_client.get_resource(
- 'port_pair', t_ctx, e) for e in t_pp_ids]
- if create_bottom:
- b_pps = [b_client.get_resource(
- 'port_pair', t_ctx, e) for e in b_pp_ids]
-
- top_ppg = {
- "group_id": 1,
- "description": "",
- "tenant_id": project_id,
- "port_pair_group_parameters": {"lb_fields": []},
- "port_pairs": t_pps,
- "project_id": project_id,
- "id": t_ppg_id,
- "name": 'top_ppg_%d' % index,
- "tap_enabled": False}
- TOP_PORTPAIRGROUPS.append(DotDict(top_ppg))
- if create_bottom:
- btm_ppg = {
- "group_id": 1,
- "description": "",
- "tenant_id": project_id,
- "port_pair_group_parameters": {"lb_fields": []},
- "port_pairs": b_pps,
- "project_id": project_id,
- "id": b_ppg_id,
- "name": 'btm_ppg_%d' % index,
- "tap_enabled": False}
- if pod_name == 'pod_1':
- BOTTOM1_PORTPAIRGROUPS.append(DotDict(btm_ppg))
- else:
- BOTTOM2_PORTPAIRGROUPS.append(DotDict(btm_ppg))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(t_ctx, models.ResourceRouting,
- {'top_id': t_ppg_id,
- 'bottom_id': b_ppg_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type':
- constants.RT_PORT_PAIR_GROUP})
-
- return t_ppg_id, b_ppg_id
-
- def _prepare_flow_classifier_test(self, project_id, t_ctx, pod_name,
- index, src_port_id, create_bottom):
- t_fc_id = uuidutils.generate_uuid()
- b_fc_id = uuidutils.generate_uuid()
-
- top_fc = {
- "source_port_range_min": None,
- "destination_ip_prefix": None,
- "protocol": None,
- "description": "",
- "l7_parameters": {},
- "source_port_range_max": None,
- "id": t_fc_id,
- "name": "t_fc_%s" % index,
- "ethertype": "IPv4",
- "tenant_id": project_id,
- "source_ip_prefix": "1.0.0.0/24",
- "logical_destination_port": None,
- "destination_port_range_min": None,
- "destination_port_range_max": None,
- "project_id": project_id,
- "logical_source_port": src_port_id}
-
- TOP_FLOWCLASSIFIERS.append(DotDict(top_fc))
- if create_bottom:
- btm_fc = {
- "source_port_range_min": None,
- "destination_ip_prefix": None,
- "protocol": None,
- "description": "",
- "l7_parameters": {},
- "source_port_range_max": None,
- "id": b_fc_id,
- "name": "b_fc_%s" % index,
- "ethertype": "IPv4",
- "tenant_id": project_id,
- "source_ip_prefix": "1.0.0.0/24",
- "logical_destination_port": None,
- "destination_port_range_min": None,
- "destination_port_range_max": None,
- "project_id": project_id,
- "logical_source_port": src_port_id}
- if pod_name == 'pod_1':
- BOTTOM1_FLOWCLASSIFIERS.append(DotDict(btm_fc))
- else:
- BOTTOM2_FLOWCLASSIFIERS.append(DotDict(btm_fc))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(t_ctx, models.ResourceRouting,
- {'top_id': t_fc_id,
- 'bottom_id': b_fc_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type':
- constants.RT_FLOW_CLASSIFIER})
-
- return t_fc_id, b_fc_id
-
- def _prepare_chain_group_assoc_test(self, chain_id, group_id):
- chain_group_assoc = {'portpairgroup_id': group_id,
- 'portchain_id': chain_id}
- TOP_CHAIN_GROUP_ASSOCS.append(DotDict(chain_group_assoc))
-
- def _prepare_chain_classifier_assoc_test(self,
- chain_id, flowclassifier_id):
- chain_classifier_assoc = {'flowclassifier_id': flowclassifier_id,
- 'portchain_id': chain_id}
- TOP_CHAIN_CLASSIFIER_ASSOCS.append(DotDict(chain_classifier_assoc))
-
- def _prepare_port_chain_test(self, project_id, t_ctx, pod_name,
- index, create_bottom, ids):
- t_pc_id = uuidutils.generate_uuid()
- b_pc_id = uuidutils.generate_uuid()
-
- top_pc = {
- "tenant_id": project_id,
- "name": "t_pc_%s" % index,
- "chain_parameters": {
- "symmetric": False, "correlation": "mpls"},
- "port_pair_groups": ids['t_ppg_id'],
- "flow_classifiers": ids['t_fc_id'],
- "project_id": project_id,
- "chain_id": 1,
- "description": "",
- "id": t_pc_id}
-
- TOP_PORTCHAINS.append(DotDict(top_pc))
- if create_bottom:
- btm_pc = {
- "tenant_id": project_id,
- "name": "b_pc_%s" % index,
- "chain_parameters": {
- "symmetric": False, "correlation": "mpls"},
- "port_pair_groups": ids['b_ppg_id'],
- "flow_classifiers": ids['b_fc_id'],
- "project_id": project_id,
- "chain_id": 1,
- "description": "",
- "id": b_pc_id}
- if pod_name == 'pod_1':
- BOTTOM1_PORTCHAINS.append(DotDict(btm_pc))
- else:
- BOTTOM2_PORTCHAINS.append(DotDict(btm_pc))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(t_ctx, models.ResourceRouting,
- {'top_id': t_pc_id,
- 'bottom_id': b_pc_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_PORT_CHAIN})
-
- return t_pc_id, b_pc_id
-
- def test_get_client(self):
- driver = fc_driver.TricircleFcDriver()
- t_client = driver._get_client('top')
- self.assertEqual(t_client.region_name, 'top')
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(directory, 'get_plugin', new=fake_get_plugin)
- def test_get_port(self):
- self._basic_pod_setup()
- project_id = TEST_TENANT_ID
- fake_plugin = FakeSfcPlugin()
- t_ctx = context.get_db_context()
- port_id = self._prepare_port_test(project_id, t_ctx, 'pod_1', None)
- port = fake_plugin._get_port(context, port_id)
- self.assertIsNotNone(port)
-
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port',
- new=FakeCorePlugin.get_port)
- @patch.object(sfc_db.SfcDbPlugin, 'get_port_pairs',
- new=FakeSfcPlugin.get_port_pairs)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_create_port_chain(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakeSfcPlugin()
-
- t_net_id = self._prepare_net_test(project_id, t_ctx, 'pod_1')
- ingress = self._prepare_port_test(project_id, t_ctx, 'pod_1', t_net_id)
- egress = self._prepare_port_test(project_id, t_ctx, 'pod_1', t_net_id)
- src_port_id = self._prepare_port_test(project_id,
- t_ctx, 'pod_1', t_net_id)
- t_pp1_id, _ = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress, egress, False)
- t_ppg1_id, _ = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0, [t_pp1_id], False, None)
- ppg1_mapping = {t_pp1_id: t_ppg1_id}
- self._update_port_pair_test(ppg1_mapping, TOP_PORTPAIRS)
- t_fc1_id, _ = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, False)
- body = {"port_chain": {
- "tenant_id": project_id,
- "name": "pc1",
- "chain_parameters": {
- "symmetric": False, "correlation": "mpls"},
- "port_pair_groups": [t_ppg1_id],
- "flow_classifiers": [t_fc1_id],
- "project_id": project_id,
- "chain_id": 1,
- "description": ""}}
- t_pc1 = fake_plugin.create_port_chain(q_ctx, body)
- pp1_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pp1_id, constants.RT_PORT_PAIR)
- ppg1_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_ppg1_id, constants.RT_PORT_PAIR_GROUP)
- fc1_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_fc1_id, constants.RT_FLOW_CLASSIFIER)
- pc1_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pc1['id'], constants.RT_PORT_CHAIN)
- btm1_pp_ids = [btm_pp['id'] for btm_pp in BOTTOM1_PORTPAIRS]
- btm1_ppg_ids = [btm_ppg['id'] for btm_ppg in BOTTOM1_PORTPAIRGROUPS]
- btm1_fc_ids = [btm_fc['id'] for btm_fc in BOTTOM1_FLOWCLASSIFIERS]
- btm1_pc_ids = [btm_pc['id'] for btm_pc in BOTTOM1_PORTCHAINS]
- b_pp1_id = pp1_mappings[0][1]
- b_ppg1_id = ppg1_mappings[0][1]
- b_fc1_id = fc1_mappings[0][1]
- b_pc1_id = pc1_mappings[0][1]
- self.assertEqual([b_pp1_id], btm1_pp_ids)
- self.assertEqual([b_ppg1_id], btm1_ppg_ids)
- self.assertEqual([b_fc1_id], btm1_fc_ids)
- self.assertEqual([b_pc1_id], btm1_pc_ids)
-
- # make conflict
- TOP_PORTCHAINS.pop()
- TOP_FLOWCLASSIFIERS.pop()
- TOP_PORTPAIRGROUPS.pop()
- TOP_PORTPAIRS.pop()
- b_ppg1_mapping = {b_pp1_id: b_ppg1_id}
- self._update_port_pair_test(b_ppg1_mapping, BOTTOM1_PORTPAIRS)
- db_api.create_recycle_resource(
- t_ctx, t_ppg1_id, constants.RT_PORT_PAIR_GROUP, q_ctx.project_id)
-
- t_pp2_id, _ = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress, egress, False)
- t_ppg2_id, _ = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0, [t_pp2_id], False, None)
- ppg2_mapping = {t_pp2_id: t_ppg2_id}
- self._update_port_pair_test(ppg2_mapping, TOP_PORTPAIRS)
- t_fc2_id, _ = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, False)
- body2 = {"port_chain": {
- "tenant_id": project_id,
- "name": "pc1",
- "chain_parameters": {
- "symmetric": False, "correlation": "mpls"},
- "port_pair_groups": [t_ppg2_id],
- "flow_classifiers": [t_fc2_id],
- "project_id": project_id,
- "chain_id": 1,
- "description": ""}}
- t_pc2 = fake_plugin.create_port_chain(q_ctx, body2)
- pp2_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pp2_id, constants.RT_PORT_PAIR)
- ppg2_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_ppg2_id, constants.RT_PORT_PAIR_GROUP)
- fc2_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_fc2_id, constants.RT_FLOW_CLASSIFIER)
- pc2_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pc2['id'], constants.RT_PORT_CHAIN)
- btm1_pp_ids = [btm_pp['id'] for btm_pp in BOTTOM1_PORTPAIRS]
- btm1_ppg_ids = [btm_ppg['id'] for btm_ppg in BOTTOM1_PORTPAIRGROUPS]
- btm1_fc_ids = [btm_fc['id'] for btm_fc in BOTTOM1_FLOWCLASSIFIERS]
- btm1_pc_ids = [btm_pc['id'] for btm_pc in BOTTOM1_PORTCHAINS]
- b_pp2_id = pp2_mappings[0][1]
- b_ppg2_id = ppg2_mappings[0][1]
- b_fc2_id = fc2_mappings[0][1]
- b_pc2_id = pc2_mappings[0][1]
- self.assertEqual([b_pp2_id], btm1_pp_ids)
- self.assertEqual([b_ppg2_id], btm1_ppg_ids)
- self.assertEqual([b_fc2_id], btm1_fc_ids)
- self.assertEqual([b_pc2_id], btm1_pc_ids)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_port_chain(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakeSfcPlugin()
- ids = {'t_ppg_id': [uuidutils.generate_uuid()],
- 'b_ppg_id': [uuidutils.generate_uuid()],
- 't_fc_id': [uuidutils.generate_uuid()],
- 'b_fc_id': [uuidutils.generate_uuid()]}
- t_pc_id1, _ = self._prepare_port_chain_test(
- project_id, t_ctx, 'pod_1', 0, True, ids)
-
- fake_plugin.delete_port_chain(q_ctx, t_pc_id1)
- pc_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pc_id1, constants.RT_PORT_CHAIN)
- self.assertEqual(len(TOP_PORTCHAINS), 0)
- self.assertEqual(len(BOTTOM1_PORTCHAINS), 0)
- self.assertEqual(len(pc_mappings), 0)
-
- t_pc_id2, _ = self._prepare_port_chain_test(
- project_id, t_ctx, 'pod_1', 0, True, ids)
- BOTTOM1_PORTCHAINS.pop()
- fake_plugin.delete_port_chain(q_ctx, t_pc_id2)
- pc_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pc_id2, constants.RT_PORT_CHAIN)
- self.assertEqual(len(TOP_PORTCHAINS), 0)
- self.assertEqual(len(pc_mappings), 0)
-
- @patch.object(sfc_db.SfcDbPlugin, '_make_port_pair_group_dict',
- new=fake_make_port_pair_group_dict)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_port_pair_group(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakeSfcPlugin()
-
- t_pp_id = uuidutils.generate_uuid()
- b_pp_id = uuidutils.generate_uuid()
-
- t_ppg_id1, _ = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
- fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id1)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_ppg_id1, constants.RT_PORT_PAIR_GROUP)
- self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
- self.assertEqual(len(BOTTOM1_PORTPAIRGROUPS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- t_ppg_id2, _ = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
- BOTTOM1_PORTPAIRGROUPS.pop()
- fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id2)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_ppg_id2, constants.RT_PORT_PAIR_GROUP)
- self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- @patch.object(sfc_db.SfcDbPlugin, '_make_port_pair_dict',
- new=fake_make_port_pair_dict)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_port_pair(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakeSfcPlugin()
-
- ingress = uuidutils.generate_uuid()
- egress = uuidutils.generate_uuid()
- t_pp1_id, _ = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress, egress, True)
- fake_plugin.delete_port_pair(q_ctx, t_pp1_id)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pp1_id, constants.RT_PORT_PAIR_GROUP)
- self.assertEqual(len(TOP_PORTPAIRS), 0)
- self.assertEqual(len(BOTTOM1_PORTPAIRS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- t_pp2_id, _ = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress, egress, True)
- BOTTOM1_PORTPAIRS.pop()
- fake_plugin.delete_port_pair(q_ctx, t_pp2_id)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_pp2_id, constants.RT_PORT_PAIR_GROUP)
- self.assertEqual(len(TOP_PORTPAIRS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_flow_classifier(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakeFcPlugin()
-
- src_port_id = uuidutils.generate_uuid()
-
- t_fc_id1, _ = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, True)
- fake_plugin.delete_flow_classifier(q_ctx, t_fc_id1)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_fc_id1, constants.RT_FLOW_CLASSIFIER)
- self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
- self.assertEqual(len(BOTTOM1_FLOWCLASSIFIERS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- t_fc_id2, _ = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, True)
- BOTTOM1_FLOWCLASSIFIERS.pop()
- fake_plugin.delete_flow_classifier(q_ctx, t_fc_id2)
- ppg_mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_fc_id2, constants.RT_FLOW_CLASSIFIER)
- self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
- self.assertEqual(len(ppg_mappings), 0)
-
- @patch.object(sfc_db.SfcDbPlugin, '_validate_pps_in_ppg',
- new=fake_validate_pps_in_ppg)
- @patch.object(sfc_db.SfcDbPlugin, '_make_port_chain_dict',
- new=fake_make_port_chain_dict)
- @patch.object(sfc_db.SfcDbPlugin, '_make_port_pair_group_dict',
- new=fake_make_port_pair_group_dict)
- @patch.object(sfc_db.SfcDbPlugin, '_make_port_pair_dict',
- new=fake_make_port_pair_dict)
- @patch.object(sfc_db.SfcDbPlugin, 'get_port_chain',
- new=FakeSfcPlugin.get_port_chain)
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port',
- new=FakeCorePlugin.get_port)
- @patch.object(sfc_db.SfcDbPlugin, 'get_port_pairs',
- new=FakeSfcPlugin.get_port_pairs)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_update_service_function_chain(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_sfc_plugin = FakeSfcPlugin()
- fake_fc_plugin = FakeFcPlugin()
-
- t_net_id = self._prepare_net_test(project_id, t_ctx, 'pod_1')
- src_port_id = self._prepare_port_test(project_id,
- t_ctx, 'pod_1', t_net_id)
- ingress1 = self._prepare_port_test(project_id, t_ctx,
- 'pod_1', t_net_id)
- egress1 = self._prepare_port_test(project_id, t_ctx, 'pod_1', t_net_id)
- t_pp1_id, b_pp1_id = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress1, egress1, True)
- t_ppg1_id, b_ppg1_id = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0,
- [t_pp1_id], True, [b_pp1_id])
- ppg_mapping = {t_pp1_id: t_ppg1_id}
- self._update_port_pair_test(ppg_mapping, TOP_PORTPAIRS)
- t_fc1_id, b_fc1_id = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, True)
-
- ids = {'t_ppg_id': [t_ppg1_id],
- 'b_ppg_id': [b_ppg1_id],
- 't_fc_id': [t_fc1_id],
- 'b_fc_id': [b_fc1_id]}
-
- t_pc1_id, b_pc1_id = self._prepare_port_chain_test(
- project_id, t_ctx, 'pod_1', 0, True, ids)
- self._prepare_chain_group_assoc_test(t_pc1_id, t_ppg1_id)
- self._prepare_chain_classifier_assoc_test(t_pc1_id, t_fc1_id)
-
- pp_body = {'port_pair': {
- 'name': 'new_name',
- 'description': 'new_pp_description'}}
- fake_sfc_plugin.update_port_pair(q_ctx, t_pp1_id, pp_body)
- self.assertEqual(TOP_PORTPAIRS[0]['description'], 'new_pp_description')
- self.assertEqual(TOP_PORTPAIRS[0]['name'], 'new_name')
- self.assertEqual(BOTTOM1_PORTPAIRS[0]['description'],
- 'new_pp_description')
- self.assertEqual(BOTTOM1_PORTPAIRS[0]['name'], 'new_name')
-
- fc_body = {'flow_classifier': {
- 'name': 'new_name',
- 'description': 'new_fc_description'}}
- fake_fc_plugin.update_flow_classifier(q_ctx, t_fc1_id, fc_body)
- self.assertEqual(TOP_FLOWCLASSIFIERS[0]['name'], 'new_name')
- self.assertEqual(TOP_FLOWCLASSIFIERS[0]['description'],
- 'new_fc_description')
- self.assertEqual(BOTTOM1_FLOWCLASSIFIERS[0]['name'], 'new_name')
- self.assertEqual(BOTTOM1_FLOWCLASSIFIERS[0]['description'],
- 'new_fc_description')
-
- ingress2 = self._prepare_port_test(project_id, t_ctx,
- 'pod_1', t_net_id)
- egress2 = self._prepare_port_test(project_id, t_ctx, 'pod_1', t_net_id)
- t_pp2_id, b_pp2_id = self._prepare_port_pair_test(
- project_id, t_ctx, 'pod_1', 0, ingress2, egress2, True)
- ppg_body = {'port_pair_group': {
- 'name': 'new_name',
- 'description': 'new_ppg_description',
- 'port_pairs': [t_pp1_id, t_pp2_id]}}
- ppg_mapping = {t_pp2_id: t_ppg1_id}
- self._update_port_pair_test(ppg_mapping, TOP_PORTPAIRS)
-
- fake_sfc_plugin.update_port_pair_group(q_ctx, t_ppg1_id, ppg_body)
- self.assertEqual(TOP_PORTPAIRGROUPS[0]['name'], 'new_name')
- self.assertEqual(TOP_PORTPAIRGROUPS[0]['description'],
- 'new_ppg_description')
- self.assertEqual(TOP_PORTPAIRGROUPS[0]['port_pairs'],
- [t_pp1_id, t_pp2_id])
- self.assertEqual(BOTTOM1_PORTPAIRGROUPS[0]['name'], 'new_name')
- self.assertEqual(BOTTOM1_PORTPAIRGROUPS[0]['description'],
- 'new_ppg_description')
- self.assertEqual(BOTTOM1_PORTPAIRGROUPS[0]['port_pairs'],
- [b_pp1_id, b_pp2_id])
- t_ppg2_id, b_ppg2_id = self._prepare_port_pair_group_test(
- project_id, t_ctx, 'pod_1', 0,
- [], True, [])
- t_fc2_id, b_fc2_id = self._prepare_flow_classifier_test(
- project_id, t_ctx, 'pod_1', 0, src_port_id, True)
- self._prepare_chain_group_assoc_test(t_pc1_id, t_ppg2_id)
- self._prepare_chain_classifier_assoc_test(t_pc1_id, t_fc2_id)
- pc_body = {'port_chain': {
- 'name': 'new_name',
- 'description': 'new_pc_description',
- 'port_pair_groups': [t_ppg1_id, t_ppg2_id],
- 'flow_classifiers': [t_fc1_id, t_fc2_id]}}
-
- fake_sfc_plugin.update_port_chain(q_ctx, t_pc1_id, pc_body)
- self.assertEqual(TOP_PORTCHAINS[0]['name'], 'new_name')
- self.assertEqual(TOP_PORTCHAINS[0]['description'],
- 'new_pc_description')
- self.assertEqual(TOP_PORTCHAINS[0]['port_pair_groups'],
- [t_ppg1_id, t_ppg2_id])
- self.assertEqual(TOP_PORTCHAINS[0]['flow_classifiers'],
- [t_fc1_id, t_fc2_id])
- self.assertEqual(BOTTOM1_PORTCHAINS[0]['name'], 'new_name')
- self.assertEqual(BOTTOM1_PORTCHAINS[0]['description'],
- 'new_pc_description')
- self.assertEqual(BOTTOM1_PORTCHAINS[0]['port_pair_groups'],
- [b_ppg1_id, b_ppg2_id])
- self.assertEqual(BOTTOM1_PORTCHAINS[0]['flow_classifiers'],
- [b_fc1_id, b_fc2_id])
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
- xmanager.IN_TEST = False
diff --git a/tricircle/tests/unit/network/test_central_trunk_driver.py b/tricircle/tests/unit/network/test_central_trunk_driver.py
deleted file mode 100644
index 0484f29b..00000000
--- a/tricircle/tests/unit/network/test_central_trunk_driver.py
+++ /dev/null
@@ -1,682 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from mock import patch
-import six
-import unittest
-
-from six.moves import xrange
-
-import neutron.conf.common as q_config
-from neutron.db import db_base_plugin_v2
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.plugins import directory
-from neutron_lib.plugins import utils
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import central_plugin
-import tricircle.network.central_trunk_driver as trunk_driver
-from tricircle.network import helper
-import tricircle.tests.unit.utils as test_utils
-from tricircle.xjob import xmanager
-
-
-_resource_store = test_utils.get_resource_store()
-TOP_TRUNKS = _resource_store.TOP_TRUNKS
-TOP_SUBPORTS = _resource_store.TOP_SUBPORTS
-TOP_PORTS = _resource_store.TOP_PORTS
-BOTTOM1_TRUNKS = _resource_store.BOTTOM1_TRUNKS
-BOTTOM2_TRUNKS = _resource_store.BOTTOM2_TRUNKS
-BOTTOM1_SUBPORTS = _resource_store.BOTTOM1_SUBPORTS
-BOTTOM2_SUBPORTS = _resource_store.BOTTOM2_SUBPORTS
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-
-
-class FakeBaseXManager(xmanager.XManager):
- def __init__(self):
- self.clients = {constants.TOP: client.Client()}
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
-
-class FakeXManager(FakeBaseXManager):
- def __init__(self, fake_plugin):
- super(FakeXManager, self).__init__()
- self.xjob_handler = FakeBaseRPCAPI(fake_plugin)
- self.helper = helper.NetworkHelper()
-
-
-class FakeBaseRPCAPI(object):
- def __init__(self, fake_plugin):
- self.xmanager = FakeBaseXManager()
-
- def sync_trunk(self, ctxt, project_id, trunk_id, pod_id):
- combine_id = '%s#%s' % (pod_id, trunk_id)
- self.xmanager.sync_trunk(
- ctxt, payload={constants.JT_TRUNK_SYNC: combine_id})
-
- def configure_security_group_rules(self, ctxt, project_id):
- pass
-
-
-class FakeRPCAPI(FakeBaseRPCAPI):
- def __init__(self, fake_plugin):
- self.xmanager = FakeXManager(fake_plugin)
-
-
-class FakeNeutronClient(test_utils.FakeNeutronClient):
- _resource = 'trunk'
- trunks_path = ''
-
-
-class FakeClient(test_utils.FakeClient):
- def __init__(self, region_name=None):
- super(FakeClient, self).__init__(region_name)
- self.client = FakeNeutronClient(self.region_name)
-
- def get_native_client(self, resource, ctx):
- return self.client
-
- def get_trunks(self, ctx, trunk_id):
- return self.get_resource(constants.RT_TRUNK, ctx, trunk_id)
-
- def update_trunks(self, context, trunk_id, trunk):
- self.update_resources(constants.RT_TRUNK, context, trunk_id, trunk)
-
- def delete_trunks(self, context, trunk_id):
- self.delete_resources(constants.RT_TRUNK, context, trunk_id)
-
- def action_trunks(self, ctx, action, resource_id, body):
- if self.region_name == 'pod_1':
- btm_trunks = BOTTOM1_TRUNKS
- else:
- btm_trunks = BOTTOM2_TRUNKS
-
- for trunk in btm_trunks:
- if trunk['id'] == resource_id:
- subports = body['sub_ports']
- if action == 'add_subports':
- for subport in subports:
- subport['trunk_id'] = resource_id
- trunk['sub_ports'].extend(subports)
- return
- elif action == 'remove_subports':
- for subport in subports:
- for b_subport in trunk['sub_ports']:
- if subport['port_id'] == b_subport['port_id']:
- trunk['sub_ports'].remove(b_subport)
- return
-
- def list_trunks(self, ctx, filters=None):
- filter_dict = {}
- filters = filters or []
- for query_filter in filters:
- key = query_filter['key']
- # when querying trunks, "fields" is passed in the query string to
- # ask the server to only return necessary fields, which can reduce
- # the data being transferred. In test, we just return all the
- # fields since there's no need to optimize
- if key != 'fields':
- value = query_filter['value']
- filter_dict[key] = value
- return self.client.get('', filter_dict)['trunks']
-
- def get_ports(self, ctx, port_id):
- pass
-
- def list_ports(self, ctx, filters=None):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- _filters = {}
- for f in filters:
- _filters[f['key']] = [f['value']]
- return fake_plugin.get_trunk_subports(q_ctx, _filters)
-
- def create_ports(self, ctx, body):
- if 'ports' in body:
- ret = []
- for port in body['ports']:
- p = self.create_resources('port', ctx, {'port': port})
- p['id'] = p['device_id']
- ret.append(p)
- return ret
- return self.create_resources('port', ctx, body)
-
-
-class FakeNeutronContext(test_utils.FakeNeutronContext):
- def session_class(self):
- return FakeSession
-
-
-class FakeSession(test_utils.FakeSession):
- def add_hook(self, model_obj, model_dict):
- if model_obj.__tablename__ == 'subports':
- for top_trunk in TOP_TRUNKS:
- if top_trunk['id'] == model_dict['trunk_id']:
- top_trunk['sub_ports'].append(model_dict)
-
- def delete_top_subport(self, port_id):
- for res_list in self.resource_store.store_map.values():
- for res in res_list:
- sub_ports = res.get('sub_ports')
- if sub_ports:
- for sub_port in sub_ports:
- if sub_port['port_id'] == port_id:
- sub_ports.remove(sub_port)
-
- def delete_hook(self, model_obj):
- if model_obj.get('segmentation_type'):
- self.delete_top_subport(model_obj['port_id'])
- return 'port_id'
-
-
-class FakePlugin(trunk_driver.TricircleTrunkDriver):
- def __init__(self):
- self._segmentation_types = {'vlan': utils.is_valid_vlan_tag}
- self.xjob_handler = FakeRPCAPI(self)
- self.helper = helper.NetworkHelper(self)
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
-
-def fake_get_context_from_neutron_context(q_context):
- ctx = context.get_db_context()
- return ctx
-
-
-def fake_get_min_search_step(self):
- return 2
-
-
-class FakeCorePlugin(central_plugin.TricirclePlugin):
- def __init__(self):
- self.type_manager = test_utils.FakeTypeManager()
-
- def get_port(self, context, port_id):
- return {portbindings.HOST_ID: None,
- 'device_id': None}
-
- def get_ports(self, ctx, filters):
- top_client = FakeClient()
- _filters = []
- for key, values in six.iteritems(filters):
- for v in values:
- _filters.append({'key': key, 'comparator': 'eq', 'value': v})
- return top_client.list_resources('port', ctx, _filters)
-
- def update_port(self, context, id, port):
- port_body = port['port']
- for _port in TOP_PORTS:
- if _port['id'] == id:
- for key, value in six.iteritems(port_body):
- _port[key] = value
-
-
-class PluginTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- self.context = context.Context()
- cfg.CONF.set_override('tenant_network_types', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('bridge_network_type', 'vlan',
- group='tricircle')
- setattr(cfg.CONF, 'setproctitle', 'central-trunk-driver')
- xmanager.IN_TEST = True
-
- def fake_get_plugin(alias='core'):
- if alias == 'trunk':
- return FakePlugin()
- return FakeCorePlugin()
- directory.get_plugin = fake_get_plugin
-
- def _basic_pod_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
-
- def _prepare_port_test(self, tenant_id, ctx, pod_name, index,
- device_onwer='compute:None', create_bottom=True):
- t_port_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- t_net_id = uuidutils.generate_uuid()
-
- t_port = {
- 'id': t_port_id,
- 'name': 'top_port_%d' % index,
- 'description': 'old_top_description',
- 'extra_dhcp_opts': [],
- 'device_owner': device_onwer,
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': tenant_id,
- 'mac_address': 'fa:16:3e:cd:76:4%s' % index,
- 'project_id': 'tenant_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE',
- 'network_id': t_net_id,
- 'fixed_ips': [{'subnet_id': t_subnet_id}]
- }
- TOP_PORTS.append(test_utils.DotDict(t_port))
-
- if create_bottom:
- b_port = {
- 'id': t_port_id,
- 'name': t_port_id,
- 'description': 'old_bottom_description',
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': tenant_id,
- 'device_owner': 'compute:None',
- 'extra_dhcp_opts': [],
- 'mac_address': 'fa:16:3e:cd:76:40',
- 'project_id': 'tenant_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE',
- 'network_id': t_net_id,
- 'fixed_ips': [{'subnet_id': t_subnet_id}]
- }
- if pod_name == 'pod_1':
- BOTTOM1_PORTS.append(test_utils.DotDict(b_port))
- else:
- BOTTOM2_PORTS.append(test_utils.DotDict(b_port))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_port_id,
- 'bottom_id': t_port_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_PORT})
-
- return t_port_id
-
- def _prepare_trunk_test(self, project_id, ctx, pod_name, index,
- is_create_bottom, t_uuid=None, b_uuid=None):
- t_trunk_id = t_uuid or uuidutils.generate_uuid()
- b_trunk_id = b_uuid or uuidutils.generate_uuid()
- t_parent_port_id = uuidutils.generate_uuid()
- t_sub_port_id = self._prepare_port_test(
- project_id, ctx, pod_name, index, create_bottom=is_create_bottom)
-
- t_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': t_trunk_id}
-
- t_trunk = {
- 'id': t_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'DOWN',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [t_subport]
- }
- TOP_TRUNKS.append(test_utils.DotDict(t_trunk))
- TOP_SUBPORTS.append(test_utils.DotDict(t_subport))
-
- b_trunk = None
- if is_create_bottom:
- b_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': b_trunk_id}
-
- b_trunk = {
- 'id': b_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'UP',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [b_subport]
- }
-
- if pod_name == 'pod_1':
- BOTTOM1_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM1_TRUNKS.append(test_utils.DotDict(b_trunk))
- else:
- BOTTOM2_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM2_TRUNKS.append(test_utils.DotDict(b_trunk))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_trunk_id,
- 'bottom_id': b_trunk_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_TRUNK})
-
- return t_trunk, b_trunk
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_get_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- res = fake_plugin.get_trunk(q_ctx, t_trunk['id'])
- t_trunk['status'] = b_trunk['status']
- t_trunk['sub_ports'][0].pop('trunk_id')
- six.assertCountEqual(self, t_trunk, res)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_get_trunks(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk1, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- t_trunk2, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 2, True)
- t_trunk3, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_2', 3, True)
- t_trunk4, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_2', 4, True)
- t_trunk5, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 5, False)
- t_trunk6, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 6, False)
- res = fake_plugin.get_trunks(q_ctx)
- self.assertEqual(len(res), 6)
-
- res = fake_plugin.get_trunks(
- q_ctx, filters={'id': [t_trunk1['id']]}, limit=3)
-
- t_trunk1['status'] = 'UP'
- res[0]['sub_ports'][0]['trunk_id'] = t_trunk1['id']
- six.assertCountEqual(self, [t_trunk1], res)
-
- res = fake_plugin.get_trunks(q_ctx, filters={'id': [t_trunk5['id']]})
- t_trunk5['sub_ports'][0].pop('trunk_id')
- six.assertCountEqual(self, [t_trunk5], res)
-
- trunks = fake_plugin.get_trunks(q_ctx,
- filters={'status': ['UP'],
- 'description': ['created']})
- self.assertEqual(len(trunks), 4)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(FakePlugin, '_get_min_search_step',
- new=fake_get_min_search_step)
- def test_get_trunks_pagination(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk1, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_1', 1, True,
- '101779d0-e30e-495a-ba71-6265a1669701',
- '1b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk2, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_1', 2, True,
- '201779d0-e30e-495a-ba71-6265a1669701',
- '2b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk3, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 3, True,
- '301779d0-e30e-495a-ba71-6265a1669701',
- '3b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk4, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 4, True,
- '401779d0-e30e-495a-ba71-6265a1669701',
- '4b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk5, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 5, False,
- '501779d0-e30e-495a-ba71-6265a1669701')
- t_trunk6, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 6, False,
- '601779d0-e30e-495a-ba71-6265a1669701')
- t_trunk7, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 7, False,
- '601779d0-e30e-495a-ba71-6265a1669701')
-
- # limit no marker
- res = fake_plugin.get_trunks(q_ctx, limit=3)
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk1['id'], t_trunk2['id'], t_trunk3['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and top pod's marker
- res = fake_plugin.get_trunks(q_ctx, limit=3, marker=t_trunk5['id'])
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk6['id'], t_trunk7['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and bottom pod's marker
- res = fake_plugin.get_trunks(q_ctx, limit=6, marker=t_trunk1['id'])
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk2['id'], t_trunk3['id'], t_trunk4['id'],
- t_trunk5['id'], t_trunk6['id'], t_trunk7['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and bottom pod's marker and filters
- res = fake_plugin.get_trunks(q_ctx, limit=6, marker=t_trunk1['id'],
- filters={'status': ['UP']})
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk2['id'], t_trunk3['id'], t_trunk4['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_update_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- update_body = {'trunk': {
- 'name': 'new_name',
- 'description': 'updated',
- 'admin_state_up': False}
- }
- updated_top_trunk = fake_plugin.update_trunk(q_ctx, t_trunk['id'],
- update_body)
- self.assertEqual(updated_top_trunk['name'], 'new_name')
- self.assertEqual(updated_top_trunk['description'], 'updated')
- self.assertFalse(updated_top_trunk['admin_state_up'])
-
- updated_btm_trunk = fake_plugin.get_trunk(q_ctx, t_trunk['id'])
- self.assertEqual(updated_btm_trunk['name'], 'new_name')
- self.assertEqual(updated_btm_trunk['description'], 'updated')
- self.assertFalse(updated_btm_trunk['admin_state_up'])
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
-
- fake_plugin.delete_trunk(q_ctx, t_trunk['id'])
- self.assertEqual(len(TOP_TRUNKS), 0)
- self.assertEqual(len(BOTTOM1_TRUNKS), 0)
- route_filters = [{'key': 'top_id',
- 'comparator': 'eq',
- 'value': t_trunk['id']}]
- routes = core.query_resource(t_ctx, models.ResourceRouting,
- route_filters, [])
- self.assertEqual(len(routes), 0)
-
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports',
- new=FakeCorePlugin.get_ports)
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_port',
- new=FakeCorePlugin.update_port)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_action_subports(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
-
- add_subport_id1 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 1, create_bottom=False)
- add_subport_id2 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 2, create_bottom=False)
- add_subport_id3 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 3, create_bottom=False)
- add_subport_id4 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 4, create_bottom=False)
- add_subport_id5 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 5, create_bottom=False)
- add_subport_id6 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 6, create_bottom=True)
- add_subport_id7 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 7, create_bottom=True)
- add_subport_id8 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 8, create_bottom=False)
- add_subport_id9 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 9, create_bottom=False)
-
- # Avoid warning: assigned to but never used
- ids = [add_subport_id1, add_subport_id2, add_subport_id3,
- add_subport_id4, add_subport_id5, add_subport_id6,
- add_subport_id7, add_subport_id8, add_subport_id9]
- ids.sort()
-
- remove_subports = {'segmentation_type': 'vlan',
- 'port_id': uuidutils.generate_uuid(),
- 'segmentation_id': 165}
- b_trunk['sub_ports'].append(remove_subports)
-
- add_subports = []
- for _id in xrange(1, 10):
- port_id = eval("add_subport_id%d" % _id)
- subport = {
- 'segmentation_type': 'vlan',
- 'port_id': port_id,
- 'segmentation_id': _id}
- add_subports.append(subport)
-
- fake_plugin.add_subports(q_ctx, t_trunk['id'],
- {'sub_ports': add_subports})
-
- top_subports = TOP_TRUNKS[0]['sub_ports']
- btm_subports = BOTTOM1_TRUNKS[0]['sub_ports']
-
- except_btm_subports = []
- for subport in b_trunk['sub_ports']:
- if subport['segmentation_id'] == 164:
- except_btm_subports.extend([subport])
- for subport in add_subports:
- subport['trunk_id'] = b_trunk['id']
- except_btm_subports.extend(add_subports)
- six.assertCountEqual(self, btm_subports, except_btm_subports)
-
- except_top_subports = []
- for subport in t_trunk['sub_ports']:
- if subport['segmentation_id'] == 164:
- except_top_subports.extend([subport])
- for subport in add_subports:
- subport['trunk_id'] = t_trunk['id']
- except_top_subports.extend(add_subports)
- except_btm_subports.extend(add_subports)
- six.assertCountEqual(self, top_subports, except_top_subports)
-
- self.assertEqual(len(BOTTOM1_PORTS), 10)
- map_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': constants.RT_PORT},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
-
- port_mappings = db_api.list_resource_routings(t_ctx, map_filters)
- self.assertEqual(len(port_mappings), 10)
-
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_port',
- new=FakeCorePlugin.update_port)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_remove_subports(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- subport_id = t_trunk['sub_ports'][0]['port_id']
-
- remove_subport = {'sub_ports': [{'port_id': subport_id}]}
- fake_plugin.remove_subports(q_ctx, t_trunk['id'], remove_subport)
-
- top_subports = TOP_TRUNKS[0]['sub_ports']
- btm_subports = BOTTOM1_TRUNKS[0]['sub_ports']
- self.assertEqual(len(top_subports), 0)
- self.assertEqual(len(btm_subports), 0)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
- xmanager.IN_TEST = False
diff --git a/tricircle/tests/unit/network/test_central_trunk_plugin.py b/tricircle/tests/unit/network/test_central_trunk_plugin.py
deleted file mode 100644
index 6d638b45..00000000
--- a/tricircle/tests/unit/network/test_central_trunk_plugin.py
+++ /dev/null
@@ -1,682 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from mock import patch
-import six
-import unittest
-
-from six.moves import xrange
-
-import neutron.conf.common as q_config
-from neutron.db import db_base_plugin_v2
-from neutron_lib.api.definitions import portbindings
-from neutron_lib.plugins import directory
-from neutron_lib.plugins import utils
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import central_plugin
-import tricircle.network.central_trunk_plugin as trunk_plugin
-from tricircle.network import helper
-import tricircle.tests.unit.utils as test_utils
-from tricircle.xjob import xmanager
-
-
-_resource_store = test_utils.get_resource_store()
-TOP_TRUNKS = _resource_store.TOP_TRUNKS
-TOP_SUBPORTS = _resource_store.TOP_SUBPORTS
-TOP_PORTS = _resource_store.TOP_PORTS
-BOTTOM1_TRUNKS = _resource_store.BOTTOM1_TRUNKS
-BOTTOM2_TRUNKS = _resource_store.BOTTOM2_TRUNKS
-BOTTOM1_SUBPORTS = _resource_store.BOTTOM1_SUBPORTS
-BOTTOM2_SUBPORTS = _resource_store.BOTTOM2_SUBPORTS
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-
-
-class FakeBaseXManager(xmanager.XManager):
- def __init__(self):
- self.clients = {constants.TOP: client.Client()}
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
-
-class FakeXManager(FakeBaseXManager):
- def __init__(self, fake_plugin):
- super(FakeXManager, self).__init__()
- self.xjob_handler = FakeBaseRPCAPI(fake_plugin)
- self.helper = helper.NetworkHelper()
-
-
-class FakeBaseRPCAPI(object):
- def __init__(self, fake_plugin):
- self.xmanager = FakeBaseXManager()
-
- def sync_trunk(self, ctxt, project_id, trunk_id, pod_id):
- combine_id = '%s#%s' % (pod_id, trunk_id)
- self.xmanager.sync_trunk(
- ctxt, payload={constants.JT_TRUNK_SYNC: combine_id})
-
- def configure_security_group_rules(self, ctxt, project_id):
- pass
-
-
-class FakeRPCAPI(FakeBaseRPCAPI):
- def __init__(self, fake_plugin):
- self.xmanager = FakeXManager(fake_plugin)
-
-
-class FakeNeutronClient(test_utils.FakeNeutronClient):
- _resource = 'trunk'
- trunks_path = ''
-
-
-class FakeClient(test_utils.FakeClient):
- def __init__(self, region_name=None):
- super(FakeClient, self).__init__(region_name)
- self.client = FakeNeutronClient(self.region_name)
-
- def get_native_client(self, resource, ctx):
- return self.client
-
- def get_trunks(self, ctx, trunk_id):
- return self.get_resource(constants.RT_TRUNK, ctx, trunk_id)
-
- def update_trunks(self, context, trunk_id, trunk):
- self.update_resources(constants.RT_TRUNK, context, trunk_id, trunk)
-
- def delete_trunks(self, context, trunk_id):
- self.delete_resources(constants.RT_TRUNK, context, trunk_id)
-
- def action_trunks(self, ctx, action, resource_id, body):
- if self.region_name == 'pod_1':
- btm_trunks = BOTTOM1_TRUNKS
- else:
- btm_trunks = BOTTOM2_TRUNKS
-
- for trunk in btm_trunks:
- if trunk['id'] == resource_id:
- subports = body['sub_ports']
- if action == 'add_subports':
- for subport in subports:
- subport['trunk_id'] = resource_id
- trunk['sub_ports'].extend(subports)
- return
- elif action == 'remove_subports':
- for subport in subports:
- for b_subport in trunk['sub_ports']:
- if subport['port_id'] == b_subport['port_id']:
- trunk['sub_ports'].remove(b_subport)
- return
-
- def list_trunks(self, ctx, filters=None):
- filter_dict = {}
- filters = filters or []
- for query_filter in filters:
- key = query_filter['key']
- # when querying trunks, "fields" is passed in the query string to
- # ask the server to only return necessary fields, which can reduce
- # the data being transferred. In test, we just return all the
- # fields since there's no need to optimize
- if key != 'fields':
- value = query_filter['value']
- filter_dict[key] = value
- return self.client.get('', filter_dict)['trunks']
-
- def get_ports(self, ctx, port_id):
- pass
-
- def list_ports(self, ctx, filters=None):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- _filters = {}
- for f in filters:
- _filters[f['key']] = [f['value']]
- return fake_plugin.get_trunk_subports(q_ctx, _filters)
-
- def create_ports(self, ctx, body):
- if 'ports' in body:
- ret = []
- for port in body['ports']:
- p = self.create_resources('port', ctx, {'port': port})
- p['id'] = p['device_id']
- ret.append(p)
- return ret
- return self.create_resources('port', ctx, body)
-
-
-class FakeNeutronContext(test_utils.FakeNeutronContext):
- def session_class(self):
- return FakeSession
-
-
-class FakeSession(test_utils.FakeSession):
- def add_hook(self, model_obj, model_dict):
- if model_obj.__tablename__ == 'subports':
- for top_trunk in TOP_TRUNKS:
- if top_trunk['id'] == model_dict['trunk_id']:
- top_trunk['sub_ports'].append(model_dict)
-
- def delete_top_subport(self, port_id):
- for res_list in self.resource_store.store_map.values():
- for res in res_list:
- sub_ports = res.get('sub_ports')
- if sub_ports:
- for sub_port in sub_ports:
- if sub_port['port_id'] == port_id:
- sub_ports.remove(sub_port)
-
- def delete_hook(self, model_obj):
- if model_obj.get('segmentation_type'):
- self.delete_top_subport(model_obj['port_id'])
- return 'port_id'
-
-
-class FakePlugin(trunk_plugin.TricircleTrunkPlugin):
- def __init__(self):
- self._segmentation_types = {'vlan': utils.is_valid_vlan_tag}
- self.xjob_handler = FakeRPCAPI(self)
- self.helper = helper.NetworkHelper(self)
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
-
-def fake_get_context_from_neutron_context(q_context):
- ctx = context.get_db_context()
- return ctx
-
-
-def fake_get_min_search_step(self):
- return 2
-
-
-class FakeCorePlugin(central_plugin.TricirclePlugin):
- def __init__(self):
- self.type_manager = test_utils.FakeTypeManager()
-
- def get_port(self, context, port_id):
- return {portbindings.HOST_ID: None,
- 'device_id': None}
-
- def get_ports(self, ctx, filters):
- top_client = FakeClient()
- _filters = []
- for key, values in six.iteritems(filters):
- for v in values:
- _filters.append({'key': key, 'comparator': 'eq', 'value': v})
- return top_client.list_resources('port', ctx, _filters)
-
- def update_port(self, context, id, port):
- port_body = port['port']
- for _port in TOP_PORTS:
- if _port['id'] == id:
- for key, value in six.iteritems(port_body):
- _port[key] = value
-
-
-class PluginTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- cfg.CONF.register_opts(q_config.core_opts)
- self.context = context.Context()
- cfg.CONF.set_override('tenant_network_types', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('bridge_network_type', 'vlan',
- group='tricircle')
- xmanager.IN_TEST = True
-
- def fake_get_plugin(alias='core'):
- if alias == 'trunk':
- return FakePlugin()
- return FakeCorePlugin()
- directory.get_plugin = fake_get_plugin
-
- def _basic_pod_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
-
- def _prepare_port_test(self, tenant_id, ctx, pod_name, index,
- device_onwer='compute:None', create_bottom=True):
- t_port_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- t_net_id = uuidutils.generate_uuid()
-
- t_port = {
- 'id': t_port_id,
- 'name': 'top_port_%d' % index,
- 'description': 'old_top_description',
- 'extra_dhcp_opts': [],
- 'device_owner': device_onwer,
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': tenant_id,
- 'mac_address': 'fa:16:3e:cd:76:4%s' % index,
- 'project_id': 'tenant_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE',
- 'network_id': t_net_id,
- 'fixed_ips': [{'subnet_id': t_subnet_id}]
- }
- TOP_PORTS.append(test_utils.DotDict(t_port))
-
- if create_bottom:
- b_port = {
- 'id': t_port_id,
- 'name': t_port_id,
- 'description': 'old_bottom_description',
- 'security_groups': [],
- 'device_id': '68f46ee4-d66a-4c39-bb34-ac2e5eb85470',
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'tenant_id': tenant_id,
- 'device_owner': 'compute:None',
- 'extra_dhcp_opts': [],
- 'mac_address': 'fa:16:3e:cd:76:40',
- 'project_id': 'tenant_id',
- 'binding:host_id': 'zhiyuan-5',
- 'status': 'ACTIVE',
- 'network_id': t_net_id,
- 'fixed_ips': [{'subnet_id': t_subnet_id}]
- }
- if pod_name == 'pod_1':
- BOTTOM1_PORTS.append(test_utils.DotDict(b_port))
- else:
- BOTTOM2_PORTS.append(test_utils.DotDict(b_port))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_port_id,
- 'bottom_id': t_port_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_PORT})
-
- return t_port_id
-
- def _prepare_trunk_test(self, project_id, ctx, pod_name, index,
- is_create_bottom, t_uuid=None, b_uuid=None):
- t_trunk_id = t_uuid or uuidutils.generate_uuid()
- b_trunk_id = b_uuid or uuidutils.generate_uuid()
- t_parent_port_id = uuidutils.generate_uuid()
- t_sub_port_id = self._prepare_port_test(
- project_id, ctx, pod_name, index, create_bottom=is_create_bottom)
-
- t_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': t_trunk_id}
-
- t_trunk = {
- 'id': t_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'DOWN',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [t_subport]
- }
- TOP_TRUNKS.append(test_utils.DotDict(t_trunk))
- TOP_SUBPORTS.append(test_utils.DotDict(t_subport))
-
- b_trunk = None
- if is_create_bottom:
- b_subport = {
- 'segmentation_type': 'vlan',
- 'port_id': t_sub_port_id,
- 'segmentation_id': 164,
- 'trunk_id': b_trunk_id}
-
- b_trunk = {
- 'id': b_trunk_id,
- 'name': 'top_trunk_%d' % index,
- 'status': 'UP',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': t_parent_port_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'sub_ports': [b_subport]
- }
-
- if pod_name == 'pod_1':
- BOTTOM1_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM1_TRUNKS.append(test_utils.DotDict(b_trunk))
- else:
- BOTTOM2_SUBPORTS.append(test_utils.DotDict(t_subport))
- BOTTOM2_TRUNKS.append(test_utils.DotDict(b_trunk))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_trunk_id,
- 'bottom_id': b_trunk_id,
- 'pod_id': pod_id,
- 'project_id': project_id,
- 'resource_type': constants.RT_TRUNK})
-
- return t_trunk, b_trunk
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_get_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- res = fake_plugin.get_trunk(q_ctx, t_trunk['id'])
- t_trunk['status'] = b_trunk['status']
- t_trunk['sub_ports'][0].pop('trunk_id')
- six.assertCountEqual(self, t_trunk, res)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_get_trunks(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk1, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- t_trunk2, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 2, True)
- t_trunk3, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_2', 3, True)
- t_trunk4, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_2', 4, True)
- t_trunk5, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 5, False)
- t_trunk6, _ = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 6, False)
- res = fake_plugin.get_trunks(q_ctx)
- self.assertEqual(len(res), 6)
-
- res = fake_plugin.get_trunks(
- q_ctx, filters={'id': [t_trunk1['id']]}, limit=3)
-
- t_trunk1['status'] = 'UP'
- res[0]['sub_ports'][0]['trunk_id'] = t_trunk1['id']
- six.assertCountEqual(self, [t_trunk1], res)
-
- res = fake_plugin.get_trunks(q_ctx, filters={'id': [t_trunk5['id']]})
- t_trunk5['sub_ports'][0].pop('trunk_id')
- six.assertCountEqual(self, [t_trunk5], res)
-
- trunks = fake_plugin.get_trunks(q_ctx,
- filters={'status': ['UP'],
- 'description': ['created']})
- self.assertEqual(len(trunks), 4)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(FakePlugin, '_get_min_search_step',
- new=fake_get_min_search_step)
- def test_get_trunks_pagination(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk1, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_1', 1, True,
- '101779d0-e30e-495a-ba71-6265a1669701',
- '1b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk2, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_1', 2, True,
- '201779d0-e30e-495a-ba71-6265a1669701',
- '2b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk3, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 3, True,
- '301779d0-e30e-495a-ba71-6265a1669701',
- '3b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk4, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 4, True,
- '401779d0-e30e-495a-ba71-6265a1669701',
- '4b1779d0-e30e-495a-ba71-6265a1669701')
- t_trunk5, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 5, False,
- '501779d0-e30e-495a-ba71-6265a1669701')
- t_trunk6, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 6, False,
- '601779d0-e30e-495a-ba71-6265a1669701')
- t_trunk7, _ = self._prepare_trunk_test(
- project_id, t_ctx, 'pod_2', 7, False,
- '601779d0-e30e-495a-ba71-6265a1669701')
-
- # limit no marker
- res = fake_plugin.get_trunks(q_ctx, limit=3)
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk1['id'], t_trunk2['id'], t_trunk3['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and top pod's marker
- res = fake_plugin.get_trunks(q_ctx, limit=3, marker=t_trunk5['id'])
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk6['id'], t_trunk7['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and bottom pod's marker
- res = fake_plugin.get_trunks(q_ctx, limit=6, marker=t_trunk1['id'])
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk2['id'], t_trunk3['id'], t_trunk4['id'],
- t_trunk5['id'], t_trunk6['id'], t_trunk7['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- # limit and bottom pod's marker and filters
- res = fake_plugin.get_trunks(q_ctx, limit=6, marker=t_trunk1['id'],
- filters={'status': ['UP']})
- res_trunk_ids = [trunk['id'] for trunk in res]
- except_trunk_ids = [t_trunk2['id'], t_trunk3['id'], t_trunk4['id']]
- self.assertEqual(res_trunk_ids, except_trunk_ids)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_update_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- update_body = {'trunk': {
- 'name': 'new_name',
- 'description': 'updated',
- 'admin_state_up': False}
- }
- updated_top_trunk = fake_plugin.update_trunk(q_ctx, t_trunk['id'],
- update_body)
- self.assertEqual(updated_top_trunk['name'], 'new_name')
- self.assertEqual(updated_top_trunk['description'], 'updated')
- self.assertFalse(updated_top_trunk['admin_state_up'])
-
- updated_btm_trunk = fake_plugin.get_trunk(q_ctx, t_trunk['id'])
- self.assertEqual(updated_btm_trunk['name'], 'new_name')
- self.assertEqual(updated_btm_trunk['description'], 'updated')
- self.assertFalse(updated_btm_trunk['admin_state_up'])
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_delete_trunk(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
-
- fake_plugin.delete_trunk(q_ctx, t_trunk['id'])
- self.assertEqual(len(TOP_TRUNKS), 0)
- self.assertEqual(len(BOTTOM1_TRUNKS), 0)
- route_filters = [{'key': 'top_id',
- 'comparator': 'eq',
- 'value': t_trunk['id']}]
- routes = core.query_resource(t_ctx, models.ResourceRouting,
- route_filters, [])
- self.assertEqual(len(routes), 0)
-
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports',
- new=FakeCorePlugin.get_ports)
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_port',
- new=FakeCorePlugin.update_port)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_action_subports(self):
- project_id = TEST_TENANT_ID
- t_ctx = context.get_db_context()
- q_ctx = FakeNeutronContext()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
-
- add_subport_id1 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 1, create_bottom=False)
- add_subport_id2 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 2, create_bottom=False)
- add_subport_id3 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 3, create_bottom=False)
- add_subport_id4 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 4, create_bottom=False)
- add_subport_id5 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 5, create_bottom=False)
- add_subport_id6 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 6, create_bottom=True)
- add_subport_id7 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 7, create_bottom=True)
- add_subport_id8 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 8, create_bottom=False)
- add_subport_id9 = self._prepare_port_test(project_id, t_ctx, 'pod_1',
- 9, create_bottom=False)
-
- # Avoid warning: assigned to but never used
- ids = [add_subport_id1, add_subport_id2, add_subport_id3,
- add_subport_id4, add_subport_id5, add_subport_id6,
- add_subport_id7, add_subport_id8, add_subport_id9]
- ids.sort()
-
- remove_subports = {'segmentation_type': 'vlan',
- 'port_id': uuidutils.generate_uuid(),
- 'segmentation_id': 165}
- b_trunk['sub_ports'].append(remove_subports)
-
- add_subports = []
- for _id in xrange(1, 10):
- port_id = eval("add_subport_id%d" % _id)
- subport = {
- 'segmentation_type': 'vlan',
- 'port_id': port_id,
- 'segmentation_id': _id}
- add_subports.append(subport)
-
- fake_plugin.add_subports(q_ctx, t_trunk['id'],
- {'sub_ports': add_subports})
-
- top_subports = TOP_TRUNKS[0]['sub_ports']
- btm_subports = BOTTOM1_TRUNKS[0]['sub_ports']
-
- except_btm_subports = []
- for subport in b_trunk['sub_ports']:
- if subport['segmentation_id'] == 164:
- except_btm_subports.extend([subport])
- for subport in add_subports:
- subport['trunk_id'] = b_trunk['id']
- except_btm_subports.extend(add_subports)
- six.assertCountEqual(self, btm_subports, except_btm_subports)
-
- except_top_subports = []
- for subport in t_trunk['sub_ports']:
- if subport['segmentation_id'] == 164:
- except_top_subports.extend([subport])
- for subport in add_subports:
- subport['trunk_id'] = t_trunk['id']
- except_top_subports.extend(add_subports)
- except_btm_subports.extend(add_subports)
- six.assertCountEqual(self, top_subports, except_top_subports)
-
- self.assertEqual(len(BOTTOM1_PORTS), 10)
- map_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': constants.RT_PORT},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
-
- port_mappings = db_api.list_resource_routings(t_ctx, map_filters)
- self.assertEqual(len(port_mappings), 10)
-
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_port',
- new=FakeCorePlugin.update_port)
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- def test_remove_subports(self):
- project_id = TEST_TENANT_ID
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- self._basic_pod_setup()
- fake_plugin = FakePlugin()
-
- t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx,
- 'pod_1', 1, True)
- subport_id = t_trunk['sub_ports'][0]['port_id']
-
- remove_subport = {'sub_ports': [{'port_id': subport_id}]}
- fake_plugin.remove_subports(q_ctx, t_trunk['id'], remove_subport)
-
- top_subports = TOP_TRUNKS[0]['sub_ports']
- btm_subports = BOTTOM1_TRUNKS[0]['sub_ports']
- self.assertEqual(len(top_subports), 0)
- self.assertEqual(len(btm_subports), 0)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
- xmanager.IN_TEST = False
diff --git a/tricircle/tests/unit/network/test_helper.py b/tricircle/tests/unit/network/test_helper.py
deleted file mode 100644
index f51caaf7..00000000
--- a/tricircle/tests/unit/network/test_helper.py
+++ /dev/null
@@ -1,505 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from mock import patch
-import six
-import unittest
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-import neutron.conf.common as q_config
-from neutron_lib.api.definitions import portbindings
-import neutron_lib.constants as q_constants
-import neutron_lib.exceptions as q_exceptions
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from tricircle.common import context
-from tricircle.common import exceptions as t_exceptions
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.network import helper
-import tricircle.tests.unit.utils as test_utils
-
-_resource_store = test_utils.get_resource_store()
-TOP_NETS = _resource_store.TOP_NETWORKS
-TOP_SUBNETS = _resource_store.TOP_SUBNETS
-BOTTOM1_NETS = _resource_store.BOTTOM1_NETWORKS
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM1_ROUTERS = _resource_store.BOTTOM1_ROUTERS
-
-
-def get_resource_list(_type, is_top):
- pod = 'top' if is_top else 'pod_1'
- return _resource_store.pod_store_map[pod][_type]
-
-
-def get_resource(_type, is_top, resource_id):
- for resource in get_resource_list(_type, is_top):
- if resource['id'] == resource_id:
- return resource
- raise q_exceptions.NotFound()
-
-
-def list_resource(_type, is_top, filters=None):
- resource_list = get_resource_list(_type, is_top)
- if not filters:
- return [resource for resource in get_resource_list(
- _type, is_top)]
- ret = []
- for resource in resource_list:
- pick = True
- for key, value in six.iteritems(filters):
- if resource.get(key) not in value:
- pick = False
- break
- if pick:
- ret.append(resource)
- return ret
-
-
-class FakeClient(test_utils.FakeClient):
- def __init__(self, region_name=None):
- super(FakeClient, self).__init__(region_name)
-
- def create_ports(self, context, body):
- for port in body['ports']:
- index = int(port['name'].split('-')[-1])
- if index in (1, 3, 6, 7, 8, 14, 19):
- raise q_cli_exceptions.MacAddressInUseClient(
- message='fa:16:3e:d4:01:%02x' % index)
- port['id'] = port['name'].split('_')[-1]
- return body['ports']
-
- def list_networks(self, ctx, filters=None):
- networks = self.list_resources('network', ctx, filters)
- return networks
-
- def delete_networks(self, ctx, net_id):
- self.delete_resources('network', ctx, net_id)
-
- def list_subnets(self, ctx, filters=None):
- return self.list_resources('subnet', ctx, filters)
-
- def get_subnets(self, ctx, subnet_id):
- return self.get_resource('subnet', ctx, subnet_id)
-
- def delete_subnets(self, ctx, subnet_id):
- self.delete_resources('subnet', ctx, subnet_id)
-
- def list_routers(self, ctx, filters=None):
- return self.list_resources('router', ctx, filters)
-
- def delete_routers(self, ctx, router_id):
- self.delete_resources('router', ctx, router_id)
-
- def action_routers(self, ctx, action, *args, **kwargs):
- router_id, body = args
- if action == 'add_gateway':
- port = {
- 'admin_state_up': True,
- 'id': uuidutils.generate_uuid(),
- 'name': '',
- 'network_id': body['network_id'],
- 'fixed_ips': '10.0.1.1',
- 'mac_address': '',
- 'device_id': router_id,
- 'device_owner': 'network:router_gateway',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': 'host_1'
- }
- BOTTOM1_PORTS.append(test_utils.DotDict(port))
- elif action == 'remove_gateway':
- self.delete_routers(ctx, router_id)
-
-
-class HelperTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- cfg.CONF.register_opts(q_config.core_opts)
- self.helper = helper.NetworkHelper()
- self.context = context.Context()
-
- def _prepare_pods(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
-
- def _prepare_top_network(self, project_id,
- network_type='vlan', az_hints=None):
- t_net_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- t_net = {
- 'id': t_net_id,
- 'name': t_net_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'admin_state_up': False,
- 'shared': False,
- 'provider:network_type': network_type,
- 'availability_zone_hints': az_hints
- }
- t_subnet = {
- 'id': t_subnet_id,
- 'network_id': t_net_id,
- 'name': t_subnet_id,
- 'ip_version': 4,
- 'cidr': '10.0.1.0/24',
- 'allocation_pools': [],
- 'enable_dhcp': True,
- 'gateway_ip': '10.0.1.1',
- 'ipv6_address_mode': q_constants.IPV6_SLAAC,
- 'ipv6_ra_mode': q_constants.IPV6_SLAAC,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'host_routes': [],
- 'dns_nameservers': [],
- 'segment_id': 'b85fd910-e483-4ef1-bdf5-b0f747d0b0d5'
- }
- TOP_NETS.append(test_utils.DotDict(t_net))
- TOP_SUBNETS.append(test_utils.DotDict(t_subnet))
- return t_net, t_subnet
-
- def _prepare_bottom_network(self, project_id, b_uuid=None,
- network_type='vlan', az_hints=None):
- b_net_id = b_uuid or uuidutils.generate_uuid()
- b_net = {
- 'id': b_net_id,
- 'name': b_net_id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'description': 'description',
- 'admin_state_up': False,
- 'shared': False,
- 'provider:network_type': network_type,
- 'availability_zone_hints': az_hints
- }
- BOTTOM1_NETS.append(test_utils.DotDict(b_net))
- return b_net
-
- def _prepare_router(self, project_id, router_az_hints=None):
- b_router_id = uuidutils.generate_uuid()
- b_router = {
- 'id': b_router_id,
- 'name': b_router_id,
- 'distributed': False,
- 'tenant_id': project_id,
- 'attached_ports': test_utils.DotList(),
- 'extra_attributes': {
- 'availability_zone_hints': router_az_hints
- }
- }
- BOTTOM1_ROUTERS.append(test_utils.DotDict(b_router))
- return b_router_id
-
- def test_is_local_network(self):
- net = {
- 'provider:network_type': 'vlan',
- 'availability_zone_hints': []
- }
- self.assertFalse(self.helper.is_local_network(self.context, net))
-
- net = {
- 'provider:network_type': 'vlan',
- 'availability_zone_hints': ['pod_1', 'pod_1']
- }
- self.assertFalse(self.helper.is_local_network(self.context, net))
-
- net = {
- 'provider:network_type': 'vlan',
- 'availability_zone_hints': ['pod_1']
- }
- self._prepare_pods()
- self.assertTrue(self.helper.is_local_network(self.context, net))
-
- def test_fill_binding_info(self):
- port_body = {
- portbindings.PROFILE: 'Open vSwitch agent'
- }
- self.helper.fill_binding_info(port_body)
- self.assertEqual(port_body, {
- portbindings.PROFILE: 'Open vSwitch agent',
- portbindings.VIF_DETAILS: {'port_filter': True,
- 'ovs_hybrid_plug': True},
- portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
- portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL
- })
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_prepare_top_element(self, mock_context):
- mock_context.return_value = self.context
- self._prepare_pods()
- t_port_id = uuidutils.generate_uuid()
- port_body = {
- 'port': {
- 'id': t_port_id,
- 'name': t_port_id,
- 'fixed_ips': [{'ip_address': '10.0.1.1'}],
- 'mac_address': 'fa:16:3e:d4:01:01',
- 'device_id': None
- }
- }
- self.helper.prepare_top_element(
- self.context, None, test_utils.TEST_TENANT_ID,
- {'pod_id': 'pod_id_0', 'region_name': 'top_pod'},
- {'id': t_port_id}, 'port', port_body)
- t_ports = list_resource('port', True)
- self.assertEqual(t_ports[0]['id'], t_port_id)
-
- def test_get_create_subnet_body(self):
- t_net_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- b_net_id = uuidutils.generate_uuid()
- project_id = uuidutils.generate_uuid()
-
- t_subnet = {
- 'network_id': t_net_id,
- 'id': t_subnet_id,
- 'ip_version': 4,
- 'cidr': '10.0.1.0/24',
- 'gateway_ip': '10.0.1.1',
- 'allocation_pools': [{'start': '10.0.1.10', 'end': '10.0.1.254'}],
- 'enable_dhcp': True,
- 'tenant_id': project_id
- }
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.10')
- six.assertCountEqual(self,
- [{'start': '10.0.1.1', 'end': '10.0.1.1'},
- {'start': '10.0.1.11', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.10', body['subnet']['gateway_ip'])
-
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.254')
- six.assertCountEqual(self,
- [{'start': '10.0.1.1', 'end': '10.0.1.1'},
- {'start': '10.0.1.10', 'end': '10.0.1.253'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
-
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.8')
- six.assertCountEqual(self,
- [{'start': '10.0.1.1', 'end': '10.0.1.1'},
- {'start': '10.0.1.10', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.8', body['subnet']['gateway_ip'])
-
- t_subnet['allocation_pools'] = [
- {'start': '10.0.1.2', 'end': '10.0.1.10'},
- {'start': '10.0.1.20', 'end': '10.0.1.254'}]
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.5')
- six.assertCountEqual(self,
- [{'start': '10.0.1.1', 'end': '10.0.1.4'},
- {'start': '10.0.1.6', 'end': '10.0.1.10'},
- {'start': '10.0.1.20', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
-
- t_subnet['gateway_ip'] = '10.0.1.11'
- t_subnet['allocation_pools'] = [
- {'start': '10.0.1.2', 'end': '10.0.1.10'},
- {'start': '10.0.1.12', 'end': '10.0.1.254'}]
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.5')
- six.assertCountEqual(self,
- [{'start': '10.0.1.2', 'end': '10.0.1.4'},
- {'start': '10.0.1.6', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- def test_prepare_shadow_ports(self):
- port_bodys = [{
- 'id': 'port-id-%d' % i,
- 'fixed_ips': [{'ip_address': '10.0.1.%d' % i}],
- 'mac_address': 'fa:16:3e:d4:01:%02x' % i,
- 'binding:host_id': 'host1',
- 'device_id': None
- } for i in range(1, 20)]
- agents = [{'type': 'Open vSwitch agent',
- 'tunnel_ip': '192.168.1.101'} for _ in range(1, 20)]
- # we just want to test the logic, so we pass None for context, a
- # malformed dict for target_pod
- ret_port_ids = self.helper.prepare_shadow_ports(
- None, 'project_id', {'region_name': 'pod1'}, 'net-id-1',
- port_bodys, agents, 5)
- req_port_ids = [port['id'] for port in port_bodys]
- six.assertCountEqual(self, ret_port_ids, req_port_ids)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- def test_prepare_shadow_port(self):
- self._prepare_pods()
- port_body = {
- 'id': uuidutils.generate_uuid(),
- 'fixed_ips': [{'ip_address': '10.0.1.1'}],
- 'mac_address': 'fa:16:3e:d4:01:01',
- 'binding:host_id': 'host1',
- 'device_id': None
- }
- agent = {'type': 'Open vSwitch agent', 'tunnel_ip': '192.168.1.101'}
- self.helper.prepare_shadow_port(
- self.context, 'project_id',
- {'pod_id': 'pod_id_1', 'region_name': 'pod_1'},
- 'net-id-1', port_body, agent)
- sw_ports = list_resource('port', False)
- self.assertEqual(len(sw_ports), 1)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_prepare_bottom_router(self, mock_context):
- self._prepare_pods()
- mock_context.return_value = self.context
- net = {
- 'availability_zone_hints': ['az_name_1'],
- 'tenant_id': test_utils.TEST_TENANT_ID
- }
- self.helper.prepare_bottom_router(self.context, net, 'fake_router_1')
- b_routers = list_resource('router', False)
- self.assertEqual(len(b_routers), 1)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_remove_bottom_router_by_name(self, mock_context):
- router_id = self._prepare_router(test_utils.TEST_TENANT_ID,
- router_az_hints='az_name_1')
- mock_context.return_value = self.context
- b_router = get_resource('router', False, router_id)
- self.assertIsNotNone(b_router['id'])
- self.helper.remove_bottom_router_by_name(
- self.context, 'pod_1', router_id)
- self.assertRaises(q_exceptions.NotFound, get_resource,
- 'router', False, router_id)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_prepare_bottom_router_gateway(self, mock_context):
- self._prepare_pods()
- mock_context.return_value = self.context
- self.assertRaises(t_exceptions.NotFound,
- self.helper.prepare_bottom_router_gateway,
- self.context, 'pod_1', 'fake_router')
-
- router_id = self._prepare_router(test_utils.TEST_TENANT_ID,
- router_az_hints='az_name_1')
- self.assertRaises(t_exceptions.NotFound,
- self.helper.prepare_bottom_router_gateway,
- self.context, 'pod_1', router_id)
- b_net_id = uuidutils.generate_uuid()
- b_net = {
- 'id': b_net_id,
- 'name': router_id,
- 'tenant_id': test_utils.TEST_TENANT_ID,
- 'project_id': test_utils.TEST_TENANT_ID,
- 'description': 'description',
- 'admin_state_up': False,
- 'shared': False,
- 'provider:network_type': 'vlan',
- 'availability_zone_hints': None
- }
- BOTTOM1_NETS.append(test_utils.DotDict(b_net))
-
- self.helper.prepare_bottom_router_gateway(
- self.context, 'pod_1', router_id)
- b_gw_ports = list_resource('port', False)
- self.assertEqual(b_gw_ports[0]['device_id'], router_id)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_remove_bottom_router_gateway(self, mock_context):
- self._prepare_pods()
- mock_context.return_value = self.context
- self.assertRaises(t_exceptions.NotFound,
- self.helper.remove_bottom_router_gateway,
- self.context, 'pod_1', 'fake_router')
-
- router_id = self._prepare_router(test_utils.TEST_TENANT_ID,
- router_az_hints='az_name_1')
- b_routers = list_resource('router', False)
- self.assertEqual(b_routers[0]['id'], router_id)
- self.helper.remove_bottom_router_gateway(
- self.context, 'pod_1', router_id)
- self.assertRaises(q_exceptions.NotFound, get_resource,
- 'router', False, router_id)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_remove_bottom_external_network_by_name(self, mock_context):
- mock_context.return_value = self.context
- b_net = self._prepare_bottom_network(test_utils.TEST_TENANT_ID,
- az_hints='az_name_1')
- b_net_req = get_resource('network', False, b_net['id'])
- self.assertEqual(b_net_req['id'], b_net['id'])
- self.helper.remove_bottom_external_network_by_name(
- self.context, 'pod_1', b_net['id'])
- self.assertRaises(q_exceptions.NotFound, get_resource,
- 'network', False, b_net['id'])
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_prepare_bottom_external_subnet_by_bottom_name(self, mock_context):
- self._prepare_pods()
- mock_context.return_value = self.context
- t_net, t_subnet = self._prepare_top_network(test_utils.TEST_TENANT_ID)
- self.assertRaises(
- t_exceptions.InvalidInput,
- self.helper.prepare_bottom_external_subnet_by_bottom_name,
- self.context, t_subnet, 'pod_1',
- 'fake_bottom_network_name', t_subnet['id'])
-
- b_net = self._prepare_bottom_network(
- test_utils.TEST_TENANT_ID, b_uuid=t_net['id'])
- self.helper.prepare_bottom_external_subnet_by_bottom_name(
- self.context, t_subnet, 'pod_1',
- b_net['name'], t_subnet['id'])
- b_subnets = list_resource('subnet', False)
- self.assertEqual(len(b_subnets), 1)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_remove_bottom_external_subnet_by_name(self, mock_context):
- self._prepare_pods()
- mock_context.return_value = self.context
- t_net, t_subnet = self._prepare_top_network(test_utils.TEST_TENANT_ID)
- b_net = self._prepare_bottom_network(
- test_utils.TEST_TENANT_ID, b_uuid=t_net['id'])
- self.helper.prepare_bottom_external_subnet_by_bottom_name(
- self.context, t_subnet, 'pod_1',
- b_net['name'], t_subnet['id'])
-
- b_subnets = list_resource('subnet', False)
- self.helper.remove_bottom_external_subnet_by_name(
- self.context, 'pod_1', b_subnets[0]['name'])
- self.assertRaises(q_exceptions.NotFound, get_resource,
- 'subnet', False, t_subnet['id'])
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
diff --git a/tricircle/tests/unit/network/test_local_plugin.py b/tricircle/tests/unit/network/test_local_plugin.py
deleted file mode 100644
index 812acaa5..00000000
--- a/tricircle/tests/unit/network/test_local_plugin.py
+++ /dev/null
@@ -1,1191 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-from mock import patch
-import six
-import unittest
-
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-import neutron.conf.common as q_config
-import neutron.extensions.securitygroup as ext_sg
-from neutron.services.trunk import exceptions as t_exc
-from neutron_lib.api.definitions import portbindings
-import neutron_lib.constants as q_constants
-import neutron_lib.exceptions as q_exceptions
-from neutron_lib.plugins import directory
-
-from tricircle.common import client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.network import helper
-import tricircle.network.local_plugin as plugin
-import tricircle.tests.unit.utils as test_utils
-
-
-_resource_store = test_utils.get_resource_store()
-TOP_NETS = _resource_store.TOP_NETWORKS
-TOP_SUBNETS = _resource_store.TOP_SUBNETS
-TOP_PORTS = _resource_store.TOP_PORTS
-TOP_SGS = _resource_store.TOP_SECURITYGROUPS
-TOP_TRUNKS = _resource_store.TOP_TRUNKS
-BOTTOM_NETS = _resource_store.BOTTOM1_NETWORKS
-BOTTOM_SUBNETS = _resource_store.BOTTOM1_SUBNETS
-BOTTOM_PORTS = _resource_store.BOTTOM1_PORTS
-BOTTOM_SGS = _resource_store.BOTTOM1_SECURITYGROUPS
-BOTTOM_AGENTS = _resource_store.BOTTOM1_AGENTS
-
-
-def get_resource_list(_type, is_top):
- pod = 'top' if is_top else 'pod_1'
- return _resource_store.pod_store_map[pod][_type]
-
-
-def create_resource(_type, is_top, body):
- get_resource_list(_type, is_top).append(body)
-
-
-def update_resource(_type, is_top, resource_id, body):
- for resource in get_resource_list(_type, is_top):
- if resource['id'] == resource_id:
- resource.update(body)
- return copy.deepcopy(resource)
- raise q_exceptions.NotFound()
-
-
-def get_resource(_type, is_top, resource_id):
- for resource in get_resource_list(_type, is_top):
- if resource['id'] == resource_id:
- return copy.deepcopy(resource)
- raise q_exceptions.NotFound()
-
-
-def list_resource(_type, is_top, filters=None):
- resource_list = get_resource_list(_type, is_top)
- if not filters:
- return [copy.deepcopy(resource) for resource in get_resource_list(
- _type, is_top)]
- ret = []
- for resource in resource_list:
- pick = True
- for key, value in six.iteritems(filters):
- if resource.get(key) not in value:
- pick = False
- break
- if pick:
- ret.append(copy.deepcopy(resource))
- return ret
-
-
-def delete_resource(_type, is_top, resource_id):
- for resource in get_resource_list(_type, is_top):
- if resource['id'] == resource_id:
- return get_resource_list(_type, is_top).remove(resource)
- raise q_exceptions.NotFound()
-
-
-class FakeTypeManager(object):
-
- def __init__(self):
- self.drivers = {}
-
-
-class FakeCorePlugin(object):
- supported_extension_aliases = ['agent']
-
- def __init__(self):
- self.type_manager = FakeTypeManager()
-
- def create_network(self, context, network):
- create_resource('network', False, network['network'])
- return network['network']
-
- def get_network(self, context, _id, fields=None):
- return get_resource('network', False, _id)
-
- def get_networks(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- return list_resource('network', False, filters)
-
- def create_subnet(self, context, subnet):
- create_resource('subnet', False, subnet['subnet'])
- return subnet['subnet']
-
- def update_subnet(self, context, _id, subnet):
- return update_resource('subnet', False, _id, subnet['subnet'])
-
- def delete_subnet(self, context, _id):
- return delete_resource('subnet', False, _id)
-
- def get_subnet(self, context, _id, fields=None):
- return get_resource('subnet', False, _id)
-
- def get_subnets(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- return list_resource('subnet', False, filters)
-
- def create_port(self, context, port):
- create_resource('port', False, port['port'])
- return port['port']
-
- def create_port_bulk(self, context, ports):
- ret_ports = []
- for port in ports['ports']:
- create_resource('port', False, port['port'])
- ret_ports.append(port['port'])
- return ret_ports
-
- def update_port(self, context, _id, port):
- return update_resource('port', False, _id, port['port'])
-
- def get_port(self, context, _id, fields=None):
- return get_resource('port', False, _id)
-
- def get_ports(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- return list_resource('port', False, filters)
-
- def delete_port(self, context, _id, l3_port_check=False):
- delete_resource('port', False, _id)
-
- def create_security_group(self, context, security_group, default_sg=False):
- create_resource('security_group', False,
- security_group['security_group'])
- return security_group['security_group']
-
- def get_security_group(self, context, _id, fields=None, tenant_id=None):
- return get_resource('security_group', False, _id)
-
- def get_security_groups(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False, default_sg=False):
- return list_resource('security_group', False, filters)
-
- def get_agents(self, context, filters=None, fields=None):
- return list_resource('agent', False, filters)
-
- def create_or_update_agent(self, context, agent_state):
- pass
-
-
-class FakeContext(object):
- def __init__(self):
- self.session = test_utils.FakeSession()
- self.auth_token = 'token'
- self.project_id = ''
- self.request_id = 'req-' + uuidutils.generate_uuid()
-
-
-def fake_get_trunk_plugin(trunk):
- return FakeTrunkPlugin()
-
-
-class FakeTrunkPlugin(object):
-
- def get_trunk(self, context, trunk_id, fields=None):
- raise t_exc.TrunkNotFound(trunk_id=trunk_id)
-
- def get_trunks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- return []
-
- def create_trunk(self, context, trunk):
- pass
-
-
-class FakeClient(object):
- def list_networks(self, **kwargs):
- return {'networks': list_resource('network', True, kwargs)}
-
- def list_subnets(self, **kwargs):
- return {'subnets': list_resource('subnet', True, kwargs)}
-
- def create_port(self, port):
- if 'id' not in port['port']:
- port['port']['id'] = uuidutils.generate_uuid()
- if 'fixed_ips' not in port['port']:
- for subnet in TOP_SUBNETS:
- if subnet['network_id'] == port['port']['network_id']:
- ip = {'subnet_id': subnet['id'],
- 'ip_address': subnet['cidr'][:-4] + '3'}
- port['port']['fixed_ips'] = [ip]
- create_resource('port', True, port['port'])
- return port
-
- def show_port(self, port_id):
- return {'port': get_resource('port', True, port_id)}
-
- def list_ports(self, **kwargs):
- def find_ip_address(port, ip_address):
- for ip in port.get('fixed_ips', []):
- if ip['ip_address'] == ip_address:
- return True
- return False
-
- ports = []
- for port in TOP_PORTS:
- pick = True
- for key, value in six.iteritems(kwargs):
- if key == 'fixed_ips':
- if not find_ip_address(port, value.split('=')[1]):
- pick = False
- break
- elif port.get(key) != value:
- pick = False
- break
- if pick:
- ports.append(copy.deepcopy(port))
- return {'ports': ports}
-
- def list_security_groups(self, **kwargs):
- return {'security_groups': list_resource('security_group',
- True, kwargs)}
-
-
-class FakeNeutronHandle(object):
- def _get_client(self, context):
- return FakeClient()
-
- def handle_get(self, context, _type, _id):
- return get_resource(_type, True, _id)
-
- def handle_create(self, context, _type, body):
- if _type == 'port':
- return FakeClient().create_port(body)['port']
- create_resource(_type, True, body[_type])
- return body[_type]
-
- def handle_update(self, context, _type, _id, body):
- pass
-
- def handle_list(self, cxt, resource, filters):
- if resource == 'trunk':
- for trunk in TOP_TRUNKS:
- if trunk['port_id'] == filters[0]['value']:
- return [trunk]
- return []
-
- def handle_delete(self, context, _type, _id):
- delete_resource(_type, True, _id)
-
-
-class FakePlugin(plugin.TricirclePlugin):
- def __init__(self):
- self.core_plugin = FakeCorePlugin()
- self.neutron_handle = FakeNeutronHandle()
- self.on_trunk_create = {}
- self.on_subnet_delete = {}
-
-
-class PluginTest(unittest.TestCase):
- def setUp(self):
- cfg.CONF.register_opts(q_config.core_opts)
- self.tenant_id = uuidutils.generate_uuid()
- self.plugin = FakePlugin()
- self.context = FakeContext()
-
- def _prepare_resource(self, az_hints=None, enable_dhcp=True):
- network_id = uuidutils.generate_uuid()
- subnet_id = uuidutils.generate_uuid()
- port_id = uuidutils.generate_uuid()
- sg_id = uuidutils.generate_uuid()
- t_net = {'id': network_id,
- 'tenant_id': self.tenant_id,
- 'name': 'net1',
- 'provider:network_type': constants.NT_VLAN,
- 'subnets': [subnet_id],
- 'availability_zone_hints': az_hints}
- t_subnet = {'id': subnet_id,
- 'tenant_id': self.tenant_id,
- 'name': 'subnet1',
- 'network_id': network_id,
- 'cidr': '10.0.1.0/24',
- 'gateway_ip': '10.0.1.1',
- 'ip_version': 4,
- 'allocation_pools': [{'start': '10.0.1.2',
- 'end': '10.0.1.254'}],
- 'enable_dhcp': enable_dhcp}
- t_port = {'id': port_id,
- 'tenant_id': self.tenant_id,
- 'admin_state_up': True,
- 'name': constants.dhcp_port_name % subnet_id,
- 'network_id': network_id,
- 'mac_address': 'fa:16:3e:96:41:02',
- 'device_owner': 'network:dhcp',
- 'device_id': 'reserved_dhcp_port',
- 'fixed_ips': [{'subnet_id': subnet_id,
- 'ip_address': '10.0.1.2'}],
- 'binding:profile': {}}
- t_sg = {
- 'id': sg_id,
- 'tenant_id': self.tenant_id,
- 'name': 'default',
- 'security_group_rules': [{
- 'remote_group_id': sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id}]
- }
- TOP_NETS.append(t_net)
- TOP_SUBNETS.append(t_subnet)
- TOP_PORTS.append(t_port)
- TOP_SGS.append(t_sg)
- return t_net, t_subnet, t_port, t_sg
-
- def _get_bottom_resources_with_net(self, net, subnet, port):
- b_net = get_resource('network', False, net['id'])
- b_subnet = get_resource('subnet', False, subnet['id'])
- b_port = get_resource('port', False, port['id'])
- b_net.pop('project_id')
- return b_net, b_subnet, b_port
-
- def _get_bottom_resources_without_net(self, subnet, port):
- b_net = get_resource('network', False, subnet['network_id'])
- b_subnet = get_resource('subnet', False, subnet['id'])
- b_port = get_resource('port', False, port['id'])
- return b_net, b_subnet, b_port
-
- def _validate(self, b_net, b_subnet, b_port, t_net, t_subnet, t_port):
-
- t_net.pop('provider:network_type')
- t_net.pop('availability_zone_hints')
- b_net_type = b_net.pop('provider:network_type')
- b_subnet.pop('project_id')
- pool = t_subnet.pop('allocation_pools')[0]
- b_pools = b_subnet.pop('allocation_pools')
- t_gateway_ip = t_subnet.pop('gateway_ip')
- b_gateway_ip = b_subnet.pop('gateway_ip')
-
- def ip_to_digit(ip):
- return int(ip[ip.rindex('.') + 1:])
-
- if t_gateway_ip:
- pool_range = list(range(ip_to_digit(t_gateway_ip),
- ip_to_digit(pool['end']) + 1))
- # we include the top gateway ip in the bottom ip allocation pool
- b_pool_range1 = list(range(ip_to_digit(b_pools[0]['start']),
- ip_to_digit(b_pools[0]['end']) + 1))
- b_pool_range2 = list(range(ip_to_digit(b_pools[1]['start']),
- ip_to_digit(b_pools[1]['end']) + 1))
- b_pool_range = b_pool_range1 + [
- ip_to_digit(b_gateway_ip)] + b_pool_range2
- else:
- self.assertIsNone(t_gateway_ip)
- self.assertIsNone(b_gateway_ip)
- pool_range = list(range(ip_to_digit(pool['start']),
- ip_to_digit(pool['end'])))
- b_pool_range = list(range(ip_to_digit(b_pools[0]['start']),
- ip_to_digit(b_pools[0]['end'])))
- t_port.pop('name')
- b_port.pop('name')
- self.assertDictEqual(t_net, b_net)
- self.assertDictEqual(t_subnet, b_subnet)
- self.assertSetEqual(set(pool_range), set(b_pool_range))
- self.assertEqual('vlan', b_net_type)
- self.assertDictEqual(t_port, b_port)
-
- def _prepare_vm_port(self, t_net, t_subnet, index, t_sgs=[]):
- port_id = uuidutils.generate_uuid()
- cidr = t_subnet['cidr']
- ip_address = '%s.%d' % (cidr[:cidr.rindex('.')], index + 3)
- mac_address = 'fa:16:3e:96:41:0%d' % (index + 3)
- t_port = {'id': port_id,
- 'tenant_id': self.tenant_id,
- 'admin_state_up': True,
- 'network_id': t_net['id'],
- 'mac_address': mac_address,
- 'fixed_ips': [{'subnet_id': t_subnet['id'],
- 'ip_address': ip_address}],
- 'binding:profile': {},
- 'security_groups': t_sgs}
- TOP_PORTS.append(t_port)
- return t_port
-
- def test__in_subnet_delete(self):
- self.context.request_id = None
- self.assertEqual(False, self.plugin._in_subnet_delete(self.context))
-
- def test__adapt_network_body(self):
- network = {'provider:network_type': constants.NT_LOCAL}
- self.plugin._adapt_network_body(network)
- self.assertEqual({}, network)
-
- def test__adapt_port_body_for_call(self):
- port = {}
- self.plugin._adapt_port_body_for_call(port)
- self.assertIsNotNone(port['mac_address'])
- self.assertIsNotNone(port['fixed_ips'])
-
- def test__construct_params(self):
- filters = {'filter': 'aaa'}
- sorts = [['name', True]]
- limit = 10
- marker = 'bbb'
- params = {'filter': 'aaa', 'sort_key': ['name'],
- 'limit': limit, 'marker': marker}
-
- params.update({'sort_dir': ['desc']})
- self.assertEqual(params,
- self.plugin._construct_params(
- filters, sorts, limit, marker, True))
-
- params.update({'sort_dir': ['asc']})
- self.assertEqual(params,
- self.plugin._construct_params(
- filters, sorts, limit, marker, False))
-
- def test__get_neutron_region(self):
- cfg.CONF.set_override('local_region_name', None, 'tricircle')
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- self.assertEqual('Pod1', self.plugin._get_neutron_region())
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test__ensure_subnet(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- b_net = copy.deepcopy(t_net)
-
- subnet_ids = self.plugin._ensure_subnet(
- self.context, b_net, is_top=False)
- self.assertEqual(t_net['subnets'], subnet_ids)
-
- b_net['subnets'] = []
- subnet_ids = self.plugin._ensure_subnet(
- self.context, b_net, is_top=False)
- self.assertEqual(t_net['subnets'], subnet_ids)
-
- t_net['subnets'] = []
- self.assertEqual([], self.plugin._ensure_subnet(self.context, t_net))
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_get_subnet_no_bottom_network(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- self.plugin.get_subnet(self.context, t_subnet['id'])
- b_net, b_subnet, b_port = self._get_bottom_resources_without_net(
- t_subnet, t_port)
- self._validate(b_net, b_subnet, b_port, t_net, t_subnet, t_port)
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- def test_get_subnet(self, mock_context):
- mock_context.return_value = self.context
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- self.plugin.get_network(self.context, t_net['id'])
- self.plugin.get_subnet(self.context, t_subnet['id'])
- b_net, b_subnet, b_port = self._get_bottom_resources_with_net(
- t_net, t_subnet, t_port)
- self._validate(b_net, b_subnet, b_port, t_net, t_subnet, t_port)
-
- def test_create_subnet(self):
- _, t_subnet, _, _ = self._prepare_resource()
- subnet = {'subnet': t_subnet}
- self.plugin.create_subnet(self.context, subnet)
- self.assertDictEqual(t_subnet,
- get_resource('subnet', False, t_subnet['id']))
-
- delete_resource('subnet', False, t_subnet['id'])
- t_subnet['name'] = t_subnet['id']
- self.plugin.create_subnet(self.context, subnet)
- self.assertDictEqual(t_subnet,
- get_resource('subnet', False, t_subnet['id']))
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test__create_bottom_network(self):
- self.plugin.neutron_handle.handle_get = mock.Mock(return_value=None)
- self.assertRaises(q_exceptions.NetworkNotFound,
- self.plugin._create_bottom_network,
- self.context, 'fake_net_id')
-
- t_net, _, _, _ = self._prepare_resource()
- self.plugin.neutron_handle.handle_get = mock.Mock(return_value=t_net)
- _, b_net = self.plugin._create_bottom_network(
- self.context, t_net['id'])
- self.assertDictEqual(b_net,
- get_resource('network', False, t_net['id']))
-
- def test_create_network(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- network = {'network': t_net}
- self.plugin.create_network(self.context, network)
- b_net = get_resource('network', False, t_net['id'])
- self.assertDictEqual(t_net, b_net)
-
- t_net['id'] = uuidutils.generate_uuid()
- t_net['name'] = None
- self.plugin.create_network(self.context, network)
- b_net = get_resource('network', False, t_net['id'])
- self.assertDictEqual(t_net, b_net)
-
- t_net['id'] = None
- t_net['name'] = uuidutils.generate_uuid()
- self.plugin.create_network(self.context, network)
- b_net = get_resource('network', False, t_net['id'])
- t_net['id'] = t_net['name']
- self.assertDictEqual(t_net, b_net)
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- def test_get_network(self, mock_context):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
-
- self.plugin._start_subnet_delete(self.context)
- self.assertRaises(q_exceptions.NotFound,
- self.plugin.get_network, self.context, t_net['id'])
- self.plugin._end_subnet_delete(self.context)
-
- self.plugin.get_network(self.context, t_net['id'])
- b_net, b_subnet, b_port = self._get_bottom_resources_with_net(
- t_net, t_subnet, t_port)
- self._validate(b_net, b_subnet, b_port, t_net, t_subnet, t_port)
-
- mock_context.return_value = self.context
- mock_context.return_value.auth_token = None
- self.assertRaises(q_exceptions.NetworkNotFound,
- self.plugin.get_network,
- self.context, uuidutils.generate_uuid())
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_get_network_no_gateway(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- update_resource('subnet', True, t_subnet['id'], {'gateway_ip': None})
- self.plugin.get_network(self.context, t_net['id'])
- b_net, b_subnet, b_port = self._get_bottom_resources_with_net(
- t_net, t_subnet, t_port)
- self._validate(b_net, b_subnet, b_port, t_net, t_subnet, t_port)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(client.Client, 'get_admin_token', new=mock.Mock)
- def test_get_networks(self):
- az_hints = ['Pod1', 'Pod2']
- t_net1, t_subnet1, t_port1, _ = self._prepare_resource()
- t_net2, t_subnet2, t_port2, _ = self._prepare_resource(az_hints)
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- self.plugin.get_networks(self.context,
- {'id': [t_net1['id'], t_net2['id'],
- 'fake_net_id']})
- b_net1, b_subnet1, b_port1 = self._get_bottom_resources_with_net(
- t_net1, t_subnet1, t_port1)
- b_net2, b_subnet2, b_port2 = self._get_bottom_resources_with_net(
- t_net2, t_subnet2, t_port2)
- self._validate(b_net1, b_subnet1, b_port1, t_net1, t_subnet1, t_port1)
- self._validate(b_net2, b_subnet2, b_port2, t_net2, t_subnet2, t_port2)
-
- except_networks = [{
- 'id': net['id'],
- 'name': net['name'],
- 'project_id': net['tenant_id'],
- 'provider:network_type': constants.NT_VLAN,
- 'subnets': net['subnets'],
- 'tenant_id': net['tenant_id']
- } for net in [t_net1, t_net2]]
- self.assertListEqual(
- except_networks, self.plugin.get_networks(self.context))
- self.assertListEqual(
- except_networks, self.plugin.get_networks(self.context,
- {'id': [t_net1['id'],
- t_net2['id']]}))
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(client.Client, 'get_admin_token', new=mock.Mock)
- def test_get_invaild_networks(self):
- az_hints = ['Pod2', 'Pod3']
- t_net1, t_subnet1, t_port1, _ = self._prepare_resource(az_hints)
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- net_filter = {
- 'id': [t_net1.get('id')]
- }
- nets = self.plugin.get_networks(self.context, net_filter)
- six.assertCountEqual(self, nets, [])
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- @patch.object(FakeNeutronHandle, 'handle_get')
- def test_get_subnet_notfound(self, mock_handle_get, mock_context):
- t_net, t_subnet, t_port, _ = self._prepare_resource(
- az_hints='fake_region')
- self.assertRaises(q_exceptions.SubnetNotFound,
- self.plugin.get_subnet,
- self.context, t_port['id'])
-
- mock_handle_get.return_value = None
- self.assertRaises(q_exceptions.SubnetNotFound,
- self.plugin.get_subnet,
- self.context, uuidutils.generate_uuid())
-
- mock_context.return_value = self.context
- mock_context.return_value.auth_token = None
- self.assertRaises(q_exceptions.SubnetNotFound,
- self.plugin.get_subnet,
- self.context, uuidutils.generate_uuid())
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- @patch.object(client.Client, 'get_admin_token', new=mock.Mock)
- def test_get_subnets(self, mock_context):
- az_hints = ['Pod1', 'Pod2']
- mock_context.return_value = self.context
- t_net1, t_subnet1, t_port1, _ = self._prepare_resource()
- t_net2, t_subnet2, t_port2, _ = self._prepare_resource(az_hints)
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- self.plugin.get_subnets(self.context,
- {'id': [t_subnet1['id'], t_subnet2['id'],
- 'fake_net_id']})
- b_net1, b_subnet1, b_port1 = self._get_bottom_resources_without_net(
- t_subnet1, t_port1)
- b_net2, b_subnet2, b_port2 = self._get_bottom_resources_without_net(
- t_subnet2, t_port2)
- self._validate(b_net1, b_subnet1, b_port1, t_net1, t_subnet1, t_port1)
- self._validate(b_net2, b_subnet2, b_port2, t_net2, t_subnet2, t_port2)
-
- delete_resource('subnet', False, t_subnet1['id'])
- t_net1, t_subnet1, t_port1, _ = self._prepare_resource()
- b_subnets = self.plugin.get_subnets(self.context)
- self.assertEqual(len(b_subnets), 1)
-
- b_subnets = self.plugin.get_subnets(self.context, {
- 'id': [t_subnet1['id'], t_subnet2['id']]})
- self.assertEqual(len(b_subnets), 2)
-
- mock_context.return_value.auth_token = None
- b_subnets = self.plugin.get_subnets(self.context, {
- 'id': [t_subnet1['id'], t_subnet2['id'], 'fake_net_id']})
- self.assertEqual(len(b_subnets), 2)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(client.Client, 'get_admin_token', new=mock.Mock)
- def test_get_invaild_subnets(self):
- az_hints = ['Pod2', 'Pod3']
- t_net1, t_subnet1, t_port1, _ = self._prepare_resource(az_hints)
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- net_filter = {
- 'id': [t_subnet1.get('id')]
- }
- subnets = self.plugin.get_subnets(self.context, net_filter)
- six.assertCountEqual(self, subnets, [])
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- port = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'security_groups': []}
- }
- t_port = self.plugin.create_port(self.context, port)
- b_port = get_resource('port', False, t_port['id'])
- self.assertDictEqual(t_port, b_port)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port_route_snat(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- port = {'name': 'route_snat',
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'network_id': t_net['id'],
- 'device_owner': q_constants.DEVICE_OWNER_ROUTER_SNAT}
-
- t_port = self.plugin.create_port(self.context, {'port': port})
- b_port = get_resource('port', False, t_port['id'])
- self.assertDictEqual(t_port, b_port)
-
- port = {'id': uuidutils.generate_uuid(),
- 'name': 'route_snat',
- 'fixed_ips': [{'subnet_id': t_subnet['id'],
- 'ip_address': '10.0.1.3'}],
- 'network_id': t_net['id'],
- 'device_owner': q_constants.DEVICE_OWNER_ROUTER_SNAT}
-
- t_snat_port = {'id': uuidutils.generate_uuid(),
- 'tenant_id': self.tenant_id,
- 'admin_state_up': True,
- 'name': constants.snat_port_name % t_subnet['id'],
- 'network_id': t_net['id'],
- 'mac_address': 'fa:16:3e:96:41:03',
- 'device_owner': q_constants.DEVICE_OWNER_ROUTER_SNAT,
- 'device_id': 'reserved_snat_port',
- 'fixed_ips': [{'subnet_id': t_subnet['id'],
- 'ip_address': '10.0.1.3'}],
- 'binding:profile': {}}
- TOP_PORTS.append(t_snat_port)
-
- t_port = self.plugin.create_port(self.context, {'port': port})
- b_port = get_resource('port', False, t_port['id'])
- self.assertDictEqual(t_port, b_port)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port_lbaas(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
- port = {'name': 'loadbalancer-lb-1',
- 'network_id': t_net['id'],
- 'mac_address': q_constants.ATTR_NOT_SPECIFIED,
- 'admin_state_up': False,
- 'device_id': 'lb_1',
- 'device_owner': q_constants.DEVICE_OWNER_LOADBALANCERV2,
- 'fixed_ips': [{'subnet_id': t_subnet['id']}]}
-
- t_port = self.plugin.create_port(self.context, {'port': port})
- b_port = get_resource('port', False, t_port['id'])
- self.assertDictEqual(t_port, b_port)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port_ip_specified(self):
- t_net, t_subnet, t_port, t_sg = self._prepare_resource()
-
- port_body = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': [{'subnet_id': t_subnet['id']}]}
- }
- self.assertRaises(q_exceptions.InvalidIpForNetwork,
- self.plugin.create_port, self.context, port_body)
-
- port_body = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': [{'ip_address': '10.0.1.4'}]}
- }
- self.assertRaises(q_exceptions.InvalidIpForNetwork,
- self.plugin.create_port, self.context, port_body)
-
- t_vm_port = self._prepare_vm_port(t_net, t_subnet, 1, [t_sg['id']])
- b_port = self.plugin.create_port(self.context, port_body)
- self.assertDictEqual(t_vm_port, b_port)
-
- @patch.object(FakeCorePlugin, 'create_or_update_agent')
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port_with_tunnel_ip(self, mock_agent):
- t_net, t_subnet, t_port, t_sg = self._prepare_resource()
-
- # core plugin supports "agent" extension and body contains tunnel ip
- port_body = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'security_groups': [],
- portbindings.HOST_ID: 'host1',
- portbindings.PROFILE: {
- constants.PROFILE_TUNNEL_IP: '192.168.1.101',
- constants.PROFILE_AGENT_TYPE: 'Open vSwitch agent'}}
- }
- self.plugin.create_port(self.context, port_body)
- agent_state = copy.copy(helper.OVS_AGENT_DATA_TEMPLATE)
- agent_state['agent_type'] = 'Open vSwitch agent'
- agent_state['host'] = 'host1'
- agent_state['configurations']['tunneling_ip'] = '192.168.1.101'
- mock_agent.assert_called_once_with(self.context, agent_state)
-
- # core plugin supports "agent" extension but body doesn't contain
- # tunnel ip
- port_body = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'security_groups': []}
- }
- self.plugin.create_port(self.context, port_body)
-
- # core plugin doesn't support "agent" extension but body contains
- # tunnel ip
- FakeCorePlugin.supported_extension_aliases = []
- port_body = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'security_groups': [],
- portbindings.HOST_ID: 'host1',
- portbindings.PROFILE: {
- constants.PROFILE_TUNNEL_IP: '192.168.1.101',
- constants.PROFILE_AGENT_TYPE: 'Open vSwitch agent'}}
- }
- self.plugin.create_port(self.context, port_body)
- FakeCorePlugin.supported_extension_aliases = ['agent']
-
- # create_or_update_agent is called only when core plugin supports
- # "agent" extension and body contains tunnel ip
- mock_agent.assert_has_calls([mock.call(self.context, agent_state)])
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_create_port_bulk(self):
- t_net, t_subnet, t_port, t_sg = self._prepare_resource()
- t_ports = []
- for i in (1, 2):
- t_vm_port = self._prepare_vm_port(t_net, t_subnet, i, [t_sg['id']])
- t_ports.append(t_vm_port)
- self.plugin.get_ports(self.context,
- {'id': [t_ports[0]['id'], t_ports[1]['id'],
- 'fake_port_id']})
- b_ports = []
- b_port1 = get_resource('port', False, t_ports[0]['id'])
- b_port1['device_owner'] = constants.DEVICE_OWNER_SHADOW
- b_port1['name'] = 'shadow_' + b_port1['id']
- b_ports.append({'port': b_port1})
- b_port2 = get_resource('port', False, t_ports[1]['id'])
- b_port2['device_owner'] = constants.DEVICE_OWNER_SUBPORT
- b_port2['device_id'] = b_port2['id']
- b_ports.append({'port': b_port2})
-
- t_vm_port = self._prepare_vm_port(t_net, t_subnet, 3, [t_sg['id']])
- t_vm_port['device_owner'] = None
- b_ports.append({'port': t_vm_port})
-
- ret_b_ports = self.plugin.create_port_bulk(
- self.context, {'ports': b_ports})
- self.assertEqual(len(ret_b_ports), 2)
- self.assertListEqual(b_ports, [{'port': b_port2}, {'port': t_vm_port}])
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(FakePlugin, '_ensure_trunk', new=mock.Mock)
- def test_get_port(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource()
-
- t_vm_port = self._prepare_vm_port(t_net, t_subnet, 1)
- t_port = self.plugin.get_port(self.context, t_vm_port['id'])
- b_port = get_resource('port', False, t_port['id'])
- self.assertDictEqual(t_port, b_port)
-
- @patch.object(FakePlugin, '_ensure_trunk', new=mock.Mock)
- @patch.object(t_context, 'get_context_from_neutron_context')
- @patch.object(FakeNeutronHandle, 'handle_get')
- def test_get_port_notfound(self, mock_handle_get, mock_context):
- mock_context.return_value = self.context
- mock_context.return_value.auth_token = None
- self.assertRaises(q_exceptions.PortNotFound,
- self.plugin.get_port, self.context, 'fake_port_id')
-
- mock_context.return_value.auth_token = 'fake_auth_token'
- mock_handle_get.return_value = None
- self.assertRaises(q_exceptions.PortNotFound,
- self.plugin.get_port,
- self.context, uuidutils.generate_uuid())
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(plugin.TricirclePlugin, '_handle_security_group',
- new=mock.Mock)
- @patch.object(directory, 'get_plugin', new=fake_get_trunk_plugin)
- @patch.object(FakeTrunkPlugin, 'create_trunk')
- def test_get_port_trunk(self, mock_create_trunk):
- _, _, parent_port, _ = self._prepare_resource()
- _, _, subport, _ = self._prepare_resource()
- t_trunk_id = uuidutils.generate_uuid()
- parent_port['trunk_details'] = {'trunk_id': t_trunk_id,
- 'sub_ports': [
- {"segmentation_type": "vlan",
- "port_id": subport['id'],
- "segmentation_id": 100}]}
- t_trunk = {
- 'id': t_trunk_id,
- 'name': 'top_trunk_1',
- 'status': 'DOWN',
- 'description': 'created',
- 'admin_state_up': True,
- 'port_id': parent_port['id'],
- 'sub_ports': []
- }
- TOP_TRUNKS.append(t_trunk)
-
- self.plugin.get_port(self.context, parent_port['id'])
- mock_create_trunk.assert_called_once_with(self.context,
- {'trunk': t_trunk})
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- @patch.object(FakeCorePlugin, 'get_ports')
- def test_get_ports(self, mock_get_ports, mock_context):
- mock_context.return_value = self.context
- t_net, t_subnet, t_port, t_sg = self._prepare_resource()
- t_ports = []
- for i in (1, 2):
- t_vm_port = self._prepare_vm_port(t_net, t_subnet, i, [t_sg['id']])
- t_ports.append(t_vm_port)
- self.plugin.get_ports(self.context,
- {'id': [t_ports[0]['id'], t_ports[1]['id'],
- 'fake_port_id']})
- for i in (0, 1):
- b_port = get_resource('port', False, t_ports[i]['id'])
- b_port.pop('project_id')
- self.assertDictEqual(t_ports[i], b_port)
-
- self.plugin.get_ports(self.context)
- mock_get_ports.assert_called_with(self.context,
- None, None, None, None, None, False)
-
- mock_get_ports.return_value = t_ports
- b_ports = self.plugin.get_ports(
- self.context, {'id': [t_ports[0]['id'], t_ports[1]['id']]})
- self.assertEqual(len(b_ports), 2)
-
- mock_context.return_value.auth_token = None
- b_ports = self.plugin.get_ports(
- self.context, {'id': [t_ports[0]['id'], t_ports[1]['id'],
- 'fake_port_id']})
- self.assertEqual(len(b_ports), 2)
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- @patch.object(FakeNeutronHandle, 'handle_update')
- def test_update_port(self, mock_update, mock_context):
- t_net, t_subnet, _, _ = self._prepare_resource()
- b_net = self.plugin.get_network(self.context, t_net['id'])
- cfg.CONF.set_override('region_name', 'Pod1', 'nova')
- mock_context.return_value = self.context
- port_id = 'fake_port_id'
- host_id = 'fake_host'
- fake_port = {
- 'id': port_id,
- 'network_id': b_net['id'],
- 'binding:vif_type': 'fake_vif_type',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'
- }
- fake_agent = {
- 'agent_type': 'Open vSwitch agent',
- 'host': host_id,
- 'configurations': {
- 'tunneling_ip': '192.168.1.101'}}
- create_resource('port', False, fake_port)
- create_resource('agent', False, fake_agent)
- update_body = {'port': {'device_owner': 'compute:None',
- 'binding:host_id': host_id}}
-
- self.plugin.update_port(self.context, port_id, update_body)
- # network is not vxlan type
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'fake_vif_type',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- # update network type from vlan to vxlan
- update_resource('network', False, b_net['id'],
- {'provider:network_type': 'vxlan'})
-
- self.plugin.update_port(self.context, port_id, update_body)
- # port vif type is not recognized
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'fake_vif_type',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- # update network type from fake_vif_type to ovs
- update_resource('port', False, port_id,
- {'binding:vif_type': 'ovs'})
-
- self.plugin.update_port(self.context, port_id,
- {'port': {'device_owner': 'compute:None',
- 'binding:host_id': 'fake_another_host'}})
- # agent in the specific host is not found
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id':
- 'fake_another_host',
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- self.plugin.update_port(self.context, port_id, update_body)
- # default p2p mode, update with agent host tunnel ip
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'tunnel_ip': '192.168.1.101',
- 'type': 'Open vSwitch agent',
- 'host': host_id,
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- cfg.CONF.set_override('cross_pod_vxlan_mode', 'l2gw', 'client')
- cfg.CONF.set_override('l2gw_tunnel_ip', '192.168.1.105', 'tricircle')
- update_body = {'port': {'device_owner': 'compute:None',
- 'binding:host_id': host_id}}
- self.plugin.update_port(self.context, port_id, update_body)
- # l2gw mode, update with configured l2 gateway tunnel ip
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'tunnel_ip': '192.168.1.105',
- 'type': 'Open vSwitch agent',
- 'host': 'fake_host',
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- cfg.CONF.set_override('l2gw_tunnel_ip', None, 'tricircle')
- cfg.CONF.set_override('cross_pod_vxlan_mode', 'l2gw', 'client')
- self.plugin.update_port(self.context, port_id, update_body)
- # l2gw mode, but l2 gateway tunnel ip is not configured
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- cfg.CONF.set_override('cross_pod_vxlan_mode', 'noop', 'client')
- self.plugin.update_port(self.context, port_id, update_body)
- # noop mode
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
-
- FakeCorePlugin.supported_extension_aliases = []
- self.plugin.update_port(self.context, port_id, update_body)
- # core plugin doesn't support "agent" extension
- mock_update.assert_called_with(
- self.context, 'port', port_id,
- {'port': {'binding:profile': {'region': 'Pod1',
- 'device': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': host_id,
- portbindings.VIF_DETAILS: {},
- portbindings.VNIC_TYPE: 'normal'}}})
- FakeCorePlugin.supported_extension_aliases = ['agent']
-
- self.plugin.update_port(self.context, port_id,
- {'port': {portbindings.PROFILE: {
- constants.PROFILE_FORCE_UP: True}}})
- b_port = get_resource('port', False, port_id)
- # port status is update to active
- self.assertEqual(q_constants.PORT_STATUS_ACTIVE, b_port['status'])
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- def test_delete_port(self, mock_context):
- mock_context.return_value = self.context
- t_net, _, t_port, _ = self._prepare_resource()
-
- port = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'device_owner': q_constants.DEVICE_OWNER_ROUTER_SNAT,
- 'name': 'test-port',
- 'security_groups': []}
- }
- b_port = self.plugin.create_port(self.context, port)
- b_port_valid = get_resource('port', False, b_port['id'])
- self.assertEqual(b_port_valid['id'], b_port['id'])
-
- self.plugin.delete_port(self.context, b_port['id'])
- self.assertRaises(q_exceptions.NotFound,
- get_resource, 'port', False, b_port['id'])
-
- port = {
- 'port': {'network_id': t_net['id'],
- 'fixed_ips': q_constants.ATTR_NOT_SPECIFIED,
- 'device_owner': q_constants.DEVICE_OWNER_COMPUTE_PREFIX,
- 'name': 'test-port',
- 'security_groups': []}
- }
- b_port = self.plugin.create_port(self.context, port)
- b_port_valid = get_resource('port', False, b_port['id'])
- self.assertEqual(b_port_valid['id'], b_port['id'])
- t_port = get_resource('port', True, b_port['id'])
- self.assertEqual(b_port['id'], t_port['id'])
-
- self.plugin.delete_port(self.context, b_port['id'])
- self.assertRaises(q_exceptions.NotFound,
- get_resource, 'port', False, b_port['id'])
- self.assertRaises(q_exceptions.NotFound,
- get_resource, 'port', True, t_port['id'])
-
- @patch.object(t_context, 'get_context_from_neutron_context')
- def test_update_subnet(self, mock_context):
- _, t_subnet, t_port, _ = self._prepare_resource(enable_dhcp=False)
- mock_context.return_value = self.context
- subnet = {
- 'subnet': {'enable_dhcp': 'True'}
- }
- subnet_id = t_subnet['id']
- port_id = t_port['id']
- self.plugin.get_subnet(self.context, subnet_id)
- self.plugin.update_subnet(self.context, subnet_id, subnet)
- b_port = get_resource('port', False, port_id)
- self.assertEqual(b_port['device_owner'], 'network:dhcp')
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_delete_subnet(self):
- t_net, t_subnet, t_port, _ = self._prepare_resource(enable_dhcp=False)
- self.plugin.get_network(self.context, t_net['id'])
- self.plugin.delete_subnet(self.context, t_subnet['id'])
- self.assertRaises(q_exceptions.NotFound,
- get_resource, 'subnet', False, t_subnet['id'])
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test__handle_security_group(self):
- t_ctx = t_context.get_db_context()
-
- port = {'security_groups': q_constants.ATTR_NOT_SPECIFIED}
- self.plugin._handle_security_group(t_ctx, self.context, port)
- b_sgs = list_resource('security_group', False)
- self.assertListEqual(b_sgs, [])
-
- port = {'security_groups': []}
- self.plugin._handle_security_group(t_ctx, self.context, port)
- b_sgs = list_resource('security_group', False)
- self.assertEqual(b_sgs, [])
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(FakeNeutronHandle, 'handle_get')
- def test_get_security_group(self, mock_handle_get):
- sg_id = uuidutils.generate_uuid()
- mock_handle_get.return_value = None
- self.assertRaises(ext_sg.SecurityGroupNotFound,
- self.plugin.get_security_group,
- self.context, sg_id)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- @patch.object(FakeCorePlugin, 'get_security_groups')
- def test_get_security_groups_mock(self, mock_get_sgs):
- _, _, _, t_sg1 = self._prepare_resource()
- _, _, _, t_sg2 = self._prepare_resource()
- self.plugin.get_security_groups(self.context)
- mock_get_sgs.assert_called_with(self.context,
- None, None, None, None, None,
- False, False)
-
- @patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
- def test_get_security_groups(self):
- _, _, _, t_sg1 = self._prepare_resource()
- _, _, _, t_sg2 = self._prepare_resource()
- self.plugin.get_security_groups(self.context,
- {'id': [t_sg1['id'], t_sg2['id'],
- 'fake_sg_id']})
- b_sg = get_resource('security_group', False, t_sg1['id'])
- self.assertEqual(b_sg['id'], t_sg1['id'])
- b_sg = get_resource('security_group', False, t_sg2['id'])
- self.assertEqual(b_sg['id'], t_sg2['id'])
-
- b_sgs = self.plugin.get_security_groups(self.context,
- {'id': [t_sg1['id'],
- t_sg2['id']]})
- self.assertEqual(len(b_sgs), 2)
-
- def tearDown(self):
- cfg.CONF.unregister_opts(q_config.core_opts)
- test_utils.get_resource_store().clean()
diff --git a/tricircle/tests/unit/network/test_qos.py b/tricircle/tests/unit/network/test_qos.py
deleted file mode 100644
index b91e2b34..00000000
--- a/tricircle/tests/unit/network/test_qos.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.objects.qos import rule
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-from tricircle.db import api as db_api
-
-
-class TricircleQosTestMixin(object):
- def _test_create_policy(self, plugin, q_ctx, t_ctx):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id
- }
- }
-
- res = plugin.create_policy(q_ctx, t_policy)
- res1 = plugin.get_policy(q_ctx, res['id'])
-
- self.assertEqual('test_qos', res['name'])
- self.assertEqual(res1['id'], res['id'])
- self.assertEqual(res1['name'], res['name'])
- self.assertEqual(res['description'], res['description'])
-
- def _test_update_policy(self, plugin, q_ctx, t_ctx,
- pod_id, bottom_policy):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id
- }
- }
-
- res = plugin.create_policy(q_ctx, t_policy)
-
- updated_qos = {
- 'policy': {
- 'name': 'test_updated_qos'
- }
- }
-
- updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
- self.assertEqual(res['id'], updated_res['id'])
- self.assertEqual('test_updated_qos', updated_res['name'])
-
- b_policy_id = uuidutils.generate_uuid()
- b_policy = {
- 'id': b_policy_id, 'name': b_policy_id, 'description': '',
- 'tenant_id': project_id
- }
- bottom_policy.append(b_policy)
- db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
- pod_id, project_id, constants.RT_QOS)
-
- updated_qos = {
- 'policy': {
- 'name': 'test_policy'
- }
- }
-
- updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
- self.assertEqual('test_policy', updated_res['name'])
- self.assertEqual('test_policy', bottom_policy[0]['name'])
-
- def _test_delete_policy(self, plugin, q_ctx,
- t_ctx, pod_id, bottom_policy):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id
- }
- }
-
- res = plugin.create_policy(q_ctx, t_policy)
- b_policy_id = uuidutils.generate_uuid()
- b_policy = {
- 'id': b_policy_id, 'name': b_policy_id, 'description': '',
- 'tenant_id': project_id
- }
- bottom_policy.append(b_policy)
- db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
- pod_id, project_id, constants.RT_QOS)
-
- self.assertEqual(1, len(bottom_policy))
- plugin.delete_policy(q_ctx, res['id'])
- self.assertEqual(0, len(bottom_policy))
-
- def _test_create_policy_rule(self, plugin, q_ctx,
- t_ctx, pod_id, bottom_policy):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id
- }
- }
-
- res = plugin.create_policy(q_ctx, t_policy)
-
- rule_data = {
- "bandwidth_limit_rule": {
- "max_kbps": "10000"
- }
- }
-
- t_rule = plugin.create_policy_rule(
- q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
- res1 = plugin.get_policy(q_ctx, res['id'])
-
- self.assertEqual(1, len(res1['rules']))
- self.assertEqual(t_rule['id'], res1['rules'][0]['id'])
-
- b_policy_id = uuidutils.generate_uuid()
- b_policy = {'id': b_policy_id, 'name': b_policy_id, 'description': '',
- 'tenant_id': project_id, 'rules': []}
- bottom_policy.append(b_policy)
- db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
- pod_id, project_id, constants.RT_QOS)
-
- def _test_delete_policy_rule(self, plugin, q_ctx,
- t_ctx, pod_id, bottom_policy):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id
- }
- }
-
- res = plugin.create_policy(q_ctx, t_policy)
-
- b_policy_id = uuidutils.generate_uuid()
- b_policy = {
- 'id': b_policy_id, 'name': b_policy_id, 'description': '',
- 'tenant_id': project_id, 'rules': []
- }
- bottom_policy.append(b_policy)
- db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
- pod_id, project_id, constants.RT_QOS)
-
- rule_data = {
- "bandwidth_limit_rule": {
- "max_kbps": "10000"
- }
- }
-
- res1 = plugin.create_policy_rule(
- q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
-
- self.assertEqual(1, len(bottom_policy[0]['rules']))
- b_rule = bottom_policy[0]['rules'][0]
- self.assertEqual(b_policy_id, b_rule['qos_policy_id'])
-
- plugin.delete_policy_rule(
- q_ctx, rule.QosBandwidthLimitRule, res1['id'], res['id'])
- self.assertEqual(0, len(bottom_policy[0]['rules']))
-
- @staticmethod
- def _create_policy_in_top(self, plugin, q_ctx, t_ctx,
- pod_id, bottom_policy):
- project_id = 'test_prject_id'
- t_policy = {
- 'policy': {
- 'name': 'test_qos',
- 'description': 'This policy limits the ports to 10Mbit max.',
- 'project_id': project_id,
- }
- }
-
- return plugin.create_policy(q_ctx, t_policy)
-
- def _test_update_network_with_qos_policy(self, plugin, client, q_ctx,
- t_ctx, pod_id, t_net_id,
- bottom_policy):
- res = \
- self._create_policy_in_top(self, plugin, q_ctx, t_ctx,
- pod_id, bottom_policy)
-
- update_body = {
- 'network': {
- 'qos_policy_id': res['id']}
- }
- top_net = plugin.update_network(q_ctx, t_net_id, update_body)
- self.assertEqual(top_net['qos_policy_id'], res['id'])
-
- route_res = \
- db_api.get_bottom_mappings_by_top_id(t_ctx, res['id'],
- constants.RT_QOS)
- bottom_net = client.get_networks(q_ctx, t_net_id)
- self.assertEqual(bottom_net['qos_policy_id'], route_res[0][1])
-
- def _test_update_port_with_qos_policy(self, plugin, client, q_ctx,
- t_ctx, pod_id, t_port_id,
- b_port_id, bottom_policy):
- res = \
- self._create_policy_in_top(self, plugin, q_ctx, t_ctx,
- pod_id, bottom_policy)
-
- update_body = {
- 'port': {
- 'qos_policy_id': res['id']}
- }
- top_port = plugin.update_port(q_ctx, t_port_id, update_body)
- self.assertEqual(top_port['qos_policy_id'], res['id'])
-
- route_res = \
- db_api.get_bottom_mappings_by_top_id(t_ctx, res['id'],
- constants.RT_QOS)
- bottom_port = client.get_ports(q_ctx, b_port_id)
- self.assertEqual(bottom_port['qos_policy_id'], route_res[0][1])
diff --git a/tricircle/tests/unit/network/test_security_groups.py b/tricircle/tests/unit/network/test_security_groups.py
deleted file mode 100644
index dc043e29..00000000
--- a/tricircle/tests/unit/network/test_security_groups.py
+++ /dev/null
@@ -1,357 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.extensions import securitygroup as ext_sg
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-import tricircle.common.constants as t_constants
-import tricircle.common.exceptions as t_exceptions
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import exceptions
-
-
-class TricircleSecurityGroupTestMixin(object):
-
- @staticmethod
- def _build_test_rule(_id, sg_id, project_id, ip_prefix, remote_group=None):
- return {'security_group_id': sg_id,
- 'id': _id,
- 'tenant_id': project_id,
- 'project_id': project_id,
- 'remote_group_id': remote_group,
- 'direction': 'ingress',
- 'remote_ip_prefix': ip_prefix,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'}
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key] and str(rule1[key]) != str(rule2[key]):
- return False
- return True
-
- def _test_create_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id,
- top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- rule = {
- 'security_group_rule': self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.0.0/24')}
- plugin.create_security_group_rule(q_ctx, rule)
-
- self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules']))
- b_rule = bottom1_sgs[0]['security_group_rules'][0]
- self.assertEqual(b_sg_id, b_rule['security_group_id'])
-
- def _test_delete_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id,
- top_sgs, top_rules, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.1.0/24')
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '10.0.2.0/24')
- b_rule1 = self._build_test_rule(
- t_rule1_id, b_sg_id, project_id, '10.0.1.0/24')
- b_rule2 = self._build_test_rule(
- t_rule2_id, b_sg_id, project_id, '10.0.2.0/24')
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [b_rule1, b_rule2]}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- top_rules.append(t_rule2)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- plugin.delete_security_group_rule(q_ctx, t_rule1_id)
-
- self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules']))
- b_rule = bottom1_sgs[0]['security_group_rules'][0]
- self.assertEqual(b_sg_id, b_rule['security_group_id'])
- t_rule2.pop('security_group_id', None)
- b_rule.pop('security_group_id', None)
- self.assertEqual(t_rule2, b_rule)
-
- def _test_handle_remote_group_invalid_input(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg1_id = uuidutils.generate_uuid()
- t_sg2_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg1_id, project_id, None, t_sg1_id)
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg1_id, project_id, None, t_sg2_id)
- t_sg = {'id': t_sg1_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg1_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg1_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- self.assertRaises(exceptions.RemoteGroupNotSupported,
- plugin.create_security_group_rule, q_ctx,
- {'security_group_rule': t_rule2})
- self.assertRaises(exceptions.RemoteGroupNotSupported,
- plugin.delete_security_group_rule, q_ctx, t_rule1_id)
-
- def _test_create_security_group_rule_exception(
- self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- rule = {
- 'security_group_rule': self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.0.0/24')}
- self.assertRaises(exceptions.BottomPodOperationFailure,
- plugin.create_security_group_rule, q_ctx, rule)
-
- def _test_delete_security_group_rule_exception(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule = self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.1.0/24')
- b_rule = self._build_test_rule(
- t_rule_id, b_sg_id, project_id, '10.0.1.0/24')
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule]}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [b_rule]}
- top_sgs.append(t_sg)
- top_rules.append(t_rule)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- self.assertRaises(exceptions.BottomPodOperationFailure,
- plugin.delete_security_group_rule, q_ctx, t_rule_id)
-
- def _test_update_default_sg(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.0.0/24')
- t_sg = {'id': t_sg_id, 'name': 'default', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1]}
- b_sg = {'id': b_sg_id, 'name': 'default', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- bottom1_sgs.append(b_sg)
- route1 = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route1)
-
- t_rule2 = {
- 'security_group_rule': self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '10.0.1.0/24')}
- plugin.create_security_group_rule(q_ctx, t_rule2)
- self.assertEqual(len(top_sgs[0]['security_group_rules']),
- len(bottom1_sgs[0]['security_group_rules']))
-
- for i in range(len(bottom1_sgs[0]['security_group_rules'])):
- self.assertTrue(self._compare_rule(
- bottom1_sgs[0]['security_group_rules'][i],
- top_sgs[0]['security_group_rules'][i]))
-
- plugin.delete_security_group_rule(q_ctx, t_rule1_id)
- self.assertEqual(len(bottom1_sgs[0]['security_group_rules']),
- len(top_sgs[0]['security_group_rules']))
-
- for i in range(len(bottom1_sgs[0]['security_group_rules'])):
- self.assertTrue(self._compare_rule(
- bottom1_sgs[0]['security_group_rules'][i],
- top_sgs[0]['security_group_rules'][i]))
-
- def _test_get_security_group(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.0.0/24')
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '192.168.56.0/24')
- t_sg = {'id': t_sg_id, 'name': 'top_sg', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- b_sg = {'id': b_sg_id, 'name': 'bottom_sg', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
-
- route1 = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route1)
-
- # test get_sg for normal situation
- res = plugin.get_security_group(q_ctx, t_sg_id)
- self.assertTrue(res['id'] == t_sg_id and res['name'] == 'top_sg')
-
- # test get_sg when the top_sg is under deleting
- dict_para = {'resource_id': t_sg_id,
- 'resource_type': t_constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.DeletingResources,
- dict_para)
-
- q_ctx.USER_AGENT = t_constants.LOCAL
- self.assertRaises(t_exceptions.ResourceNotFound,
- plugin.get_security_group,
- q_ctx, t_sg_id)
-
- # test get_sg when the request is from user_agent
- q_ctx.USER_AGENT = t_constants.USER_AGENT
- self.assertRaises(t_exceptions.ResourceIsInDeleting,
- plugin.get_security_group,
- q_ctx, t_sg_id)
-
- def _test_delete_security_group(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.0.0/24')
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '192.168.56.0/24')
- t_sg = {'id': t_sg_id, 'name': 'top_sg', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- b_sg = {'id': b_sg_id, 'name': 'bottom_sg', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
-
- route1 = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route1)
-
- # test delete_sg when sg is not exit
- rand_id = uuidutils.generate_uuid()
- self.assertRaises(ext_sg.SecurityGroupNotFound,
- plugin.delete_security_group, q_ctx, rand_id)
- # when sg is under deleting from Local
- dict_para = {'resource_id': t_sg_id,
- 'resource_type': t_constants.RT_SG}
- q_ctx.USER_AGENT = t_constants.LOCAL
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.DeletingResources,
- dict_para)
- self.assertRaises(t_exceptions.ResourceNotFound,
- plugin.delete_security_group, q_ctx, t_sg_id)
diff --git a/tricircle/tests/unit/network/test_segment_plugin.py b/tricircle/tests/unit/network/test_segment_plugin.py
deleted file mode 100644
index 29780589..00000000
--- a/tricircle/tests/unit/network/test_segment_plugin.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2018 Huazhong University of Science and Technology.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from mock import patch
-import unittest
-
-from neutron_lib.api.definitions import provider_net
-from neutron_lib.plugins import constants as plugin_constants
-from neutron_lib.plugins import directory
-
-import neutron.conf.common as q_config
-from neutron.extensions import segment as extension
-from neutron.plugins.ml2 import managers as n_managers
-from neutron.services.segments import exceptions as sg_excp
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.central_plugin as plugin
-from tricircle.network import helper
-from tricircle.network.segment_plugin import TricircleSegmentPlugin
-from tricircle.tests.unit.network.test_central_plugin import FakeClient as CFC
-from tricircle.tests.unit.network.test_central_plugin import FakePlugin as CFP
-
-import tricircle.tests.unit.utils as test_utils
-
-_resource_store = test_utils.get_resource_store()
-TOP_NETS = _resource_store.TOP_NETWORKS
-TOP_SUBNETS = _resource_store.TOP_SUBNETS
-TOP_PORTS = _resource_store.TOP_PORTS
-TOP_ROUTERS = _resource_store.TOP_ROUTERS
-TOP_SEGMENTS = _resource_store.TOP_NETWORKSEGMENTS
-BOTTOM1_NETS = _resource_store.BOTTOM1_NETWORKS
-BOTTOM1_SUBNETS = _resource_store.BOTTOM1_SUBNETS
-BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-FakeNeutronContext = test_utils.FakeNeutronContext
-TEST_TENANT_ID = test_utils.TEST_TENANT_ID
-
-
-class FakeClient(CFC):
- def __init__(self, region_name=None):
- super(FakeClient, self).__init__(region_name)
-
- def delete_segments(self, ctx, segment_id):
- self.delete_resources('segment', ctx, segment_id)
-
-
-class FakeExtensionManager(n_managers.ExtensionManager):
- def __init__(self):
- super(FakeExtensionManager, self).__init__()
-
-
-class FakeHelper(helper.NetworkHelper):
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
-
-class FakeTrunkPlugin(object):
-
- def get_trunk_subports(self, context, filters):
- return None
-
-
-class FakePlugin(TricircleSegmentPlugin):
- def start_rpc_state_reports_listener(self):
- pass
-
- def __init__(self):
-
- self.type_manager = test_utils.FakeTypeManager()
- self.extension_manager = FakeExtensionManager()
- self.extension_manager.initialize()
- self.helper = FakeHelper(self)
- self.central_plugin = CFP()
-
- def _get_client(self, region_name):
- return FakeClient(region_name)
-
- @staticmethod
- def get_network_availability_zones(network):
- zones = network.get('availability_zone_hints') \
- if network.get('availability_zone_hints') else []
- return list(zones)
-
- def _make_network_dict(self, network, fields=None,
- process_extensions=True, context=None):
- network = _transform_az(network)
- if 'project_id' in network:
- network['tenant_id'] = network['project_id']
- return network
-
-
-def fake_get_client(region_name):
- return FakeClient(region_name)
-
-
-def fake_get_context_from_neutron_context(q_context):
- return context.get_db_context()
-
-
-def _transform_az(network):
- az_hints_key = 'availability_zone_hints'
- if az_hints_key in network:
- ret = test_utils.DotDict(network)
- az_str = network[az_hints_key]
- ret[az_hints_key] = jsonutils.loads(az_str) if az_str else []
- return ret
- return network
-
-
-def fake_delete_network(self, context, network_id):
- fake_client = FakeClient()
- fake_client.delete_networks(context, network_id)
-
-
-class PluginTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
-
- cfg.CONF.register_opts(q_config.core_opts)
- cfg.CONF.register_opts(plugin.tricircle_opts)
- cfg.CONF.set_override('enable_l3_route_network', True,
- group='tricircle')
- plugin_path = \
- 'tricircle.tests.unit.network.test_segment_plugin.FakePlugin'
- cfg.CONF.set_override('core_plugin', plugin_path)
- cfg.CONF.set_override('enable_api_gateway', True)
- self.context = context.Context()
-
- phynet = 'bridge'
- phynet2 = 'bridge2'
- vlan_min, vlan_max = 2000, 3000
- cfg.CONF.set_override('type_drivers', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('tenant_network_types', ['local', 'vlan'],
- group='tricircle')
- cfg.CONF.set_override('network_vlan_ranges',
- ['%s:%d:%d' % (phynet, vlan_min, vlan_max),
- '%s:%d:%d' % (phynet2, vlan_min, vlan_max)],
- group='tricircle')
- cfg.CONF.set_override('bridge_network_type', 'vlan',
- group='tricircle')
-
- def fake_get_plugin(alias=plugin_constants.CORE):
- return None
- directory.get_plugin = fake_get_plugin
-
- global segments_plugin
- segments_plugin = FakePlugin()
-
- def _basic_pod_route_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'region_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'region_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'region_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
- route1 = {
- 'top_id': 'top_id_1',
- 'pod_id': 'pod_id_1',
- 'bottom_id': 'bottom_id_1',
- 'resource_type': 'port'}
- route2 = {
- 'top_id': 'top_id_2',
- 'pod_id': 'pod_id_2',
- 'bottom_id': 'bottom_id_2',
- 'resource_type': 'port'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting, route1)
- core.create_resource(self.context, models.ResourceRouting, route2)
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(TricircleSegmentPlugin, '_get_client',
- new=fake_get_client)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- def test_create_segment(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- # create a routed network
- top_net_id = uuidutils.generate_uuid()
- network = {'network': {
- 'id': top_net_id, 'name': 'multisegment1',
- 'tenant_id': TEST_TENANT_ID,
- 'admin_state_up': True, 'shared': False,
- 'availability_zone_hints': [],
- provider_net.PHYSICAL_NETWORK: 'bridge',
- provider_net.NETWORK_TYPE: 'vlan',
- provider_net.SEGMENTATION_ID: '2016'}}
- fake_plugin.central_plugin.create_network(neutron_context, network)
- net_filter = {'name': ['multisegment1']}
- top_net = fake_plugin.central_plugin.get_networks(
- neutron_context, net_filter)
- self.assertEqual(top_net[0]['id'], top_net_id)
-
- res = fake_plugin.get_segments(neutron_context)
- self.assertEqual(len(res), 1)
-
- # creat segment's name normally
- segment2_id = uuidutils.generate_uuid()
- segment2_name = 'test-segment2'
- segment2 = {'segment': {
- 'id': segment2_id,
- 'name': segment2_name,
- 'network_id': top_net_id,
- extension.PHYSICAL_NETWORK: 'bridge2',
- extension.NETWORK_TYPE: 'flat',
- extension.SEGMENTATION_ID: '2016',
- 'tenant_id': TEST_TENANT_ID,
- 'description': None
- }}
- fake_plugin.create_segment(neutron_context, segment2)
- res = fake_plugin.get_segment(neutron_context, segment2_id)
- self.assertEqual(res['name'], segment2_name)
- net_filter = {'name': [segment2_name]}
- b_net = fake_plugin.central_plugin.get_networks(
- neutron_context, net_filter)
- self.assertFalse(b_net)
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(TricircleSegmentPlugin, '_get_client',
- new=fake_get_client)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- @patch.object(plugin.TricirclePlugin, 'delete_network',
- new=fake_delete_network)
- def test_delete_segment(self, mock_context):
- self._basic_pod_route_setup()
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- # create a routed network
- top_net_id = uuidutils.generate_uuid()
- network = {'network': {
- 'id': top_net_id, 'name': 'multisegment1',
- 'tenant_id': TEST_TENANT_ID,
- 'admin_state_up': True, 'shared': False,
- 'availability_zone_hints': [],
- provider_net.PHYSICAL_NETWORK: 'bridge',
- provider_net.NETWORK_TYPE: 'vlan',
- provider_net.SEGMENTATION_ID: '2016'}}
- fake_plugin.central_plugin.create_network(neutron_context, network)
-
- # create a normal segment
- segment2_id = uuidutils.generate_uuid()
- segment2_name = 'test-segment3'
- segment2 = {'segment': {
- 'id': segment2_id,
- 'name': segment2_name,
- 'network_id': top_net_id,
- extension.PHYSICAL_NETWORK: 'bridge2',
- extension.NETWORK_TYPE: 'flat',
- extension.SEGMENTATION_ID: '2016',
- 'tenant_id': TEST_TENANT_ID,
- 'description': None
- }}
- fake_plugin.create_segment(neutron_context, segment2)
-
- res = fake_plugin.get_segment(neutron_context, segment2_id)
- self.assertEqual(res['name'], segment2_name)
- net_filter = {'name': [segment2_name]}
- b_net = fake_plugin.central_plugin.get_networks(
- neutron_context, net_filter)
- self.assertFalse(b_net)
-
- # delete a normal segment
- fake_plugin.delete_segment(neutron_context, segment2_id)
- self.assertRaises(sg_excp.SegmentNotFound,
- fake_plugin.get_segment,
- neutron_context, segment2_id)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- test_utils.get_resource_store().clean()
- cfg.CONF.unregister_opts(q_config.core_opts)
- cfg.CONF.unregister_opts(plugin.tricircle_opts)
diff --git a/tricircle/tests/unit/utils.py b/tricircle/tests/unit/utils.py
deleted file mode 100644
index 8a1a5bc9..00000000
--- a/tricircle/tests/unit/utils.py
+++ /dev/null
@@ -1,677 +0,0 @@
-# Copyright 2017 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from oslo_utils import uuidutils
-import six
-from sqlalchemy.orm import attributes
-from sqlalchemy.orm import exc
-from sqlalchemy.sql import elements
-import sqlalchemy.sql.expression as sql_expression
-from sqlalchemy.sql import selectable
-
-import neutron_lib.context as q_context
-import neutron_lib.objects.exceptions as q_obj_exceptions
-
-from tricircle.common import constants
-from tricircle.network.drivers import type_flat
-from tricircle.network.drivers import type_local
-from tricircle.network.drivers import type_vlan
-from tricircle.network.drivers import type_vxlan
-from tricircle.network import managers
-
-
-class ResourceStore(object):
- _resource_list = [('networks', constants.RT_NETWORK),
- ('subnets', constants.RT_SUBNET),
- ('ports', constants.RT_PORT),
- ('routers', constants.RT_ROUTER),
- ('routerports', None),
- ('ipallocations', None),
- ('subnetpools', None),
- ('subnetpoolprefixes', None),
- ('ml2_vlan_allocations', None),
- ('ml2_vxlan_allocations', None),
- ('ml2_flat_allocations', None),
- ('networksegments', None),
- ('externalnetworks', None),
- ('floatingips', constants.RT_FIP),
- ('securitygroups', constants.RT_SG),
- ('securitygrouprules', None),
- ('networkrbacs', None),
- ('subnetroutes', None),
- ('dnsnameservers', None),
- ('trunks', 'trunk'),
- ('subports', None),
- ('agents', 'agent'),
- ('sfc_port_pairs', constants.RT_PORT_PAIR),
- ('sfc_port_pair_groups', constants.RT_PORT_PAIR_GROUP),
- ('sfc_port_chains', constants.RT_PORT_CHAIN),
- ('sfc_flow_classifiers', constants.RT_FLOW_CLASSIFIER),
- ('sfc_chain_group_associations', None),
- ('sfc_chain_classifier_associations', None),
- ('qos_policies', constants.RT_QOS),
- ('qos_bandwidth_limit_rules',
- 'qos_bandwidth_limit_rules'),
- ('segments', None)]
-
- def __init__(self):
- self.store_list = []
- self.store_map = {}
- self.pod_store_map = {'top': {}, 'pod_1': {}, 'pod_2': {}}
- for prefix, pod in [('TOP', 'top'), ('BOTTOM1', 'pod_1'),
- ('BOTTOM2', 'pod_2')]:
- for table, resource in self._resource_list:
- store_name = '%s_%s' % (prefix, table.upper())
- setattr(self, store_name, [])
- store = getattr(self, store_name)
- self.store_list.append(store)
- if prefix == 'TOP':
- self.store_map[table] = store
- if resource:
- self.pod_store_map[pod][resource] = store
-
- def clean(self):
- for store in self.store_list:
- del store[:]
-
-
-TEST_TENANT_ID = 'test_tenant_id'
-_RESOURCE_STORE = None
-
-
-def get_resource_store():
- global _RESOURCE_STORE
- if not _RESOURCE_STORE:
- _RESOURCE_STORE = ResourceStore()
- return _RESOURCE_STORE
-
-
-class DotDict(dict):
- def __init__(self, normal_dict=None):
- if normal_dict:
- for key, value in six.iteritems(normal_dict):
- self[key] = value
-
- def __getattr__(self, item):
- dummy_value_map = {
- 'rbac_entries': [],
- 'segment_host_mapping': []
- }
- if item in dummy_value_map:
- return dummy_value_map[item]
- return self.get(item)
-
- def __setattr__(self, name, value):
- self[name] = value
-
- def to_dict(self):
- return self
-
- def __copy__(self):
- return DotDict(self)
-
- def bump_revision(self):
- pass
-
- def save(self, session=None):
- pass
-
- def update_fields(self, obj_data):
- for k, v in obj_data.items():
- if k in self:
- setattr(self, k, v)
-
-
-class DotList(list):
- def all(self):
- return self
-
-
-class FakeQuery(object):
- pk_map = {'ports': 'id'}
-
- def __init__(self, records, table, field=None):
- self.records = records
- self.table = table
- self.index = 0
- self.field = field
-
- def _handle_pagination_by_id(self, record_id):
- for i, record in enumerate(self.records):
- if record['id'] == record_id:
- if i + 1 < len(self.records):
- return FakeQuery(self.records[i + 1:], self.table)
- else:
- return FakeQuery([], self.table)
- return FakeQuery([], self.table, self.field)
-
- def _handle_filter(self, keys, values):
- filtered_list = []
- for record in self.records:
- selected = True
- for i, key in enumerate(keys):
- if key not in record or record[key] != values[i]:
- selected = False
- break
- if selected:
- filtered_list.append(record)
- return FakeQuery(filtered_list, self.table, self.field)
-
- def filter(self, *criteria):
- _filter = []
- keys = []
- values = []
- for e in criteria:
- if isinstance(e, sql_expression.BooleanClauseList):
- e = e.clauses[0]
- if not hasattr(e, 'right') and isinstance(e, elements.False_):
- # filter is a single False value, set key to a 'INVALID_FIELD'
- # then no records will be returned
- keys.append('INVALID_FIELD')
- values.append(False)
- elif hasattr(e, 'right') and not isinstance(e.right,
- elements.Null):
- _filter.append(e)
- elif isinstance(e, selectable.Exists):
- # handle external network filter
- expression = e.element.element._whereclause
- if hasattr(expression, 'right') and (
- expression.right.name == 'network_id'):
- keys.append('router:external')
- values.append(True)
- if not _filter:
- if not keys:
- return FakeQuery(self.records, self.table, self.field)
- else:
- return self._handle_filter(keys, values)
- if hasattr(_filter[0].right, 'value'):
- keys.extend([f.left.name for f in _filter])
- values.extend([f.right.value for f in _filter])
- else:
- keys.extend([f.expression.left.name for f in _filter])
- values.extend(
- [f.expression.right.element.clauses[0].value for f in _filter])
- if _filter[0].expression.operator.__name__ == 'lt':
- return self._handle_pagination_by_id(values[0])
- else:
- return self._handle_filter(keys, values)
-
- def filter_by(self, **kwargs):
- filtered_list = []
- for record in self.records:
- selected = True
- for key, value in six.iteritems(kwargs):
- if key not in record or record[key] != value:
- selected = False
- break
- if selected:
- filtered_list.append(record)
- return FakeQuery(filtered_list, self.table, self.field)
-
- def get(self, pk):
- pk_field = self.pk_map[self.table]
- for record in self.records:
- if record.get(pk_field) == pk:
- return record
-
- def delete(self, synchronize_session=False):
- pass
-
- def outerjoin(self, *props, **kwargs):
- return FakeQuery(self.records, self.table, self.field)
-
- def join(self, *props, **kwargs):
- return FakeQuery(self.records, self.table, self.field)
-
- def order_by(self, func):
- self.records.sort(key=lambda x: x['id'])
- return FakeQuery(self.records, self.table, self.field)
-
- def enable_eagerloads(self, value):
- return FakeQuery(self.records, self.table, self.field)
-
- def limit(self, limit):
- return FakeQuery(self.records[:limit], self.table, self.field)
-
- def next(self):
- if self.index >= len(self.records):
- raise StopIteration
- self.index += 1
- record = self.records[self.index - 1]
- # populate integer indices
- i = 0
- for key, value in list(record.items()):
- if key == self.field:
- record[i] = value
- i += 1
- return record
-
- __next__ = next
-
- def one(self):
- if len(self.records) == 0:
- raise exc.NoResultFound()
- return self.records[0]
-
- def first(self):
- if len(self.records) == 0:
- return None
- else:
- return self.records[0]
-
- def update(self, values):
- for record in self.records:
- for key, value in six.iteritems(values):
- record[key] = value
- return len(self.records)
-
- def all(self):
- return self.records
-
- def count(self):
- return len(self.records)
-
- def __iter__(self):
- return self
-
-
-def delete_model(res_list, model_obj, key=None):
- if not res_list:
- return
- if not key:
- key = 'id'
- if key not in res_list[0] or \
- key not in model_obj:
- return
- index = -1
- for i, res in enumerate(res_list):
- if res[key] == model_obj[key]:
- index = i
- break
- if index != -1:
- del res_list[index]
- return
-
-
-def link_models(model_obj, model_dict, foreign_table, foreign_key, table, key,
- link_prop):
- if model_obj.__tablename__ == foreign_table:
- for instance in get_resource_store().store_map[table]:
- if instance[key] == model_dict[foreign_key]:
- if link_prop not in instance:
- instance[link_prop] = []
- instance[link_prop].append(model_dict)
-
-
-def unlink_models(res_list, model_dict, foreign_key, key, link_prop,
- link_ele_foreign_key, link_ele_key):
- if foreign_key not in model_dict:
- return
- for instance in res_list:
- if instance[key] == model_dict[foreign_key]:
- if link_prop not in instance:
- return
- index = -1
- for i, res in enumerate(instance[link_prop]):
- if res[link_ele_foreign_key] == model_dict[link_ele_key]:
- index = i
- break
- if index != -1:
- del instance[link_prop][index]
- return
-
-
-class FakeSession(object):
- class WithWrapper(object):
- def __enter__(self):
- pass
-
- def __exit__(self, type, value, traceback):
- pass
-
- def __init__(self):
- self.info = {}
- self.resource_store = get_resource_store()
-
- def __getattr__(self, field):
- def dummy_method(*args, **kwargs):
- pass
-
- return dummy_method
-
- def __contains__(self, item):
- return False
-
- @property
- def is_active(self):
- return True
-
- def begin(self, subtransactions=False, nested=True):
- return FakeSession.WithWrapper()
-
- def begin_nested(self):
- return FakeSession.WithWrapper()
-
- def query(self, model):
- field = None
- if isinstance(model, attributes.InstrumentedAttribute):
- field = model.key
- model = model.class_
- if model.__tablename__ not in self.resource_store.store_map:
- return FakeQuery([], model.__tablename__, field)
- return FakeQuery(self.resource_store.store_map[model.__tablename__],
- model.__tablename__, field)
-
- def _extend_standard_attr(self, model_dict):
- if 'standard_attr' in model_dict:
- for field in ('resource_type', 'description', 'revision_number',
- 'created_at', 'updated_at'):
- model_dict[field] = getattr(model_dict['standard_attr'], field)
-
- def add_hook(self, model_obj, model_dict):
- # hook for operations before adding the model_obj to the resource store
- pass
-
- def delete_hook(self, model_obj):
- # hook for operations before deleting the model_obj from the resource
- # store. the default key to find the target object is "id", return
- # non-None value if you would like specify other key
- return None
-
- def add(self, model_obj):
- if model_obj.__tablename__ not in self.resource_store.store_map:
- return
- model_dict = DotDict(model_obj._as_dict())
- if 'project_id' in model_dict:
- model_dict['tenant_id'] = model_dict['project_id']
-
- if model_obj.__tablename__ == 'networks':
- model_dict['subnets'] = []
- if model_obj.__tablename__ == 'ports':
- model_dict['dhcp_opts'] = []
- model_dict['security_groups'] = []
- model_dict['fixed_ips'] = []
-
- link_models(model_obj, model_dict,
- 'subnetpoolprefixes', 'subnetpool_id',
- 'subnetpools', 'id', 'prefixes')
- link_models(model_obj, model_dict,
- 'ipallocations', 'port_id',
- 'ports', 'id', 'fixed_ips')
- link_models(model_obj, model_dict,
- 'subnets', 'network_id', 'networks', 'id', 'subnets')
- link_models(model_obj, model_dict,
- 'securitygrouprules', 'security_group_id',
- 'securitygroups', 'id', 'security_group_rules')
-
- if model_obj.__tablename__ == 'routerports':
- for port in self.resource_store.TOP_PORTS:
- if port['id'] == model_dict['port_id']:
- model_dict['port'] = port
- port.update(model_dict)
- break
- if model_obj.__tablename__ == 'externalnetworks':
- for net in self.resource_store.TOP_NETWORKS:
- if net['id'] == model_dict['network_id']:
- net['external'] = True
- net['router:external'] = True
- break
- if model_obj.__tablename__ == 'networkrbacs':
- if (model_dict['action'] == 'access_as_shared' and
- model_dict['target_tenant'] == '*'):
-
- for net in self.resource_store.TOP_NETWORKS:
- if net['id'] == model_dict['object_id']:
- net['shared'] = True
- break
-
- link_models(model_obj, model_dict,
- 'routerports', 'router_id',
- 'routers', 'id', 'attached_ports')
-
- if model_obj.__tablename__ == 'subnetroutes':
- for subnet in self.resource_store.TOP_SUBNETS:
- if subnet['id'] != model_dict['subnet_id']:
- continue
- host_route = {'nexthop': model_dict['nexthop'],
- 'destination': model_dict['destination']}
- subnet['host_routes'].append(host_route)
- break
-
- if model_obj.__tablename__ == 'dnsnameservers':
- for subnet in self.resource_store.TOP_SUBNETS:
- if subnet['id'] != model_dict['subnet_id']:
- continue
- dnsnameservers = model_dict['address']
- subnet['dns_nameservers'].append(dnsnameservers)
- break
-
- if model_obj.__tablename__ == 'ml2_flat_allocations':
- for alloc in self.resource_store.TOP_ML2_FLAT_ALLOCATIONS:
- if alloc['physical_network'] == model_dict['physical_network']:
- raise q_obj_exceptions.NeutronDbObjectDuplicateEntry(
- model_obj.__class__,
- DotDict({'columns': '', 'value': ''}))
-
- self._extend_standard_attr(model_dict)
-
- self.add_hook(model_obj, model_dict)
- self.resource_store.store_map[
- model_obj.__tablename__].append(model_dict)
-
- def _cascade_delete(self, model_dict, foreign_key, table, key):
- if key not in model_dict:
- return
- index = -1
- for i, instance in enumerate(self.resource_store.store_map[table]):
- if instance[foreign_key] == model_dict[key]:
- index = i
- break
- if index != -1:
- del self.resource_store.store_map[table][index]
-
- def delete(self, model_obj):
- unlink_models(self.resource_store.store_map['routers'], model_obj,
- 'router_id', 'id', 'attached_ports', 'port_id', 'id')
- unlink_models(self.resource_store.store_map['securitygroups'],
- model_obj, 'security_group_id', 'id',
- 'security_group_rules', 'id', 'id')
- self._cascade_delete(model_obj, 'port_id', 'ipallocations', 'id')
- key = self.delete_hook(model_obj)
- for res_list in self.resource_store.store_map.values():
- delete_model(res_list, model_obj, key)
-
-
-class FakeNeutronContext(q_context.Context):
- def __init__(self):
- self._session = None
- self.is_admin = True
- self.is_advsvc = False
- self.tenant_id = TEST_TENANT_ID
-
- def session_class(self):
- return FakeSession
-
- @property
- def session(self):
- if not self._session:
- self._session = self.session_class()()
- return self._session
-
- def elevated(self):
- return self
-
-
-def filter_resource(resource_list, params):
- if not params:
- return resource_list
-
- params_copy = copy.deepcopy(params)
- limit = params_copy.pop('limit', None)
- marker = params_copy.pop('marker', None)
-
- if params_copy:
- return_list = []
- for resource in resource_list:
- is_selected = True
- for key, value in six.iteritems(params_copy):
- if (key not in resource
- or not resource[key]
- or resource[key] not in value):
- is_selected = False
- break
- if is_selected:
- return_list.append(resource)
- else:
- return_list = resource_list
-
- if marker:
- sorted_list = sorted(return_list, key=lambda x: x['id'])
- for i, resource in enumerate(sorted_list):
- if resource['id'] == marker:
- return_list = sorted_list[i + 1:]
-
- if limit:
- sorted_list = sorted(return_list, key=lambda x: x['id'])
- if limit > len(sorted_list):
- last_index = len(sorted_list)
- else:
- last_index = limit
- return_list = sorted_list[0: last_index]
- return return_list
-
-
-class FakeNeutronClient(object):
- # override this
- _resource = None
-
- def __init__(self, region_name):
- self.region_name = region_name
- self._res_map = get_resource_store().pod_store_map
-
- def get(self, path, params=None):
- if self.region_name in ['pod_1', 'pod_2', 'top']:
- res_list = self._res_map[self.region_name][self._resource]
- filtered_res_list = filter_resource(res_list, params)
- return_list = []
- for res in filtered_res_list:
- if self.region_name != 'top':
- res = copy.copy(res)
- return_list.append(res)
- return {self._resource + 's': return_list}
- else:
- raise Exception()
-
-
-class FakeClient(object):
- def __init__(self, region_name=None):
- if not region_name:
- self.region_name = 'top'
- else:
- self.region_name = region_name
- self._res_map = get_resource_store().pod_store_map
-
- def create_resources(self, _type, ctx, body):
- res_list = self._res_map[self.region_name][_type]
- if _type == 'qos_policy':
- _type = 'policy'
- res = dict(body[_type])
- if 'id' not in res:
- res['id'] = uuidutils.generate_uuid()
- if _type == 'policy' and 'rules' not in res:
- res['rules'] = []
- res_list.append(res)
- return res
-
- def list_resources(self, _type, ctx, filters=None):
- res_list = self._res_map[self.region_name][_type]
- ret_list = []
- for res in res_list:
- is_selected = True
- for _filter in filters:
- if _filter['key'] not in res:
- is_selected = False
- break
- if _filter['value'] != res[_filter['key']]:
- is_selected = False
- break
- if is_selected:
- ret_list.append(res)
- return ret_list
-
- def get_resource(self, _type, ctx, _id):
- res = self.list_resources(
- _type, ctx, [{'key': 'id', 'comparator': 'eq', 'value': _id}])
- if res:
- return res[0]
- return None
-
- def delete_resources(self, _type, ctx, _id):
- if _type == 'policy':
- _type = 'qos_policy'
- index = -1
- res_list = self._res_map[self.region_name][_type]
- for i, res in enumerate(res_list):
- if res['id'] == _id:
- index = i
- if index != -1:
- del res_list[index]
-
- def update_resources(self, _type, ctx, _id, body):
- if _type == 'policy':
- res_list = self._res_map[self.region_name]['qos_policy']
- else:
- res_list = self._res_map[self.region_name][_type]
- updated = False
- for res in res_list:
- if res['id'] == _id:
- updated = True
- res.update(body[_type])
- return updated
-
-
-class FakeTypeManager(managers.TricircleTypeManager):
- def _register_types(self):
- local_driver = type_local.LocalTypeDriver()
- self.drivers[constants.NT_LOCAL] = FakeExtension(local_driver)
- vlan_driver = type_vlan.VLANTypeDriver()
- self.drivers[constants.NT_VLAN] = FakeExtension(vlan_driver)
- vxlan_driver = type_vxlan.VxLANTypeDriver()
- self.drivers[constants.NT_VxLAN] = FakeExtension(vxlan_driver)
- local_driver = type_flat.FlatTypeDriver()
- self.drivers[constants.NT_FLAT] = FakeExtension(local_driver)
-
- def extend_network_dict_provider(self, cxt, net):
- target_net = None
- for t_net in get_resource_store().TOP_NETWORKS:
- if t_net['id'] == net['id']:
- target_net = t_net
- if not target_net:
- return
- for segment in get_resource_store().TOP_NETWORKSEGMENTS:
- if target_net['id'] == segment['network_id']:
- target_net['provider:network_type'] = segment['network_type']
- target_net[
- 'provider:physical_network'] = segment['physical_network']
- target_net[
- 'provider:segmentation_id'] = segment['segmentation_id']
- break
-
-
-class FakeExtension(object):
- def __init__(self, ext_obj):
- self.obj = ext_obj
diff --git a/tricircle/tests/unit/xjob/__init__.py b/tricircle/tests/unit/xjob/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/tests/unit/xjob/test_xmanager.py b/tricircle/tests/unit/xjob/test_xmanager.py
deleted file mode 100644
index d669f106..00000000
--- a/tricircle/tests/unit/xjob/test_xmanager.py
+++ /dev/null
@@ -1,1189 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-import datetime
-import mock
-from mock import patch
-import six
-from six.moves import xrange
-import unittest
-
-import neutron_lib.constants as q_constants
-import neutronclient.common.exceptions as q_cli_exceptions
-from oslo_config import cfg
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.exceptions as t_network_exc
-from tricircle.network import helper
-from tricircle.xjob import xmanager
-from tricircle.xjob import xservice
-
-
-TOP_NETWORK = []
-BOTTOM1_NETWORK = []
-BOTTOM2_NETWORK = []
-TOP_SUBNET = []
-BOTTOM1_SUBNET = []
-BOTTOM2_SUBNET = []
-BOTTOM1_PORT = []
-BOTTOM2_PORT = []
-TOP_ROUTER = []
-BOTTOM1_ROUTER = []
-BOTTOM2_ROUTER = []
-TOP_SG = []
-BOTTOM1_SG = []
-BOTTOM2_SG = []
-TOP_FIP = []
-BOTTOM1_FIP = []
-BOTTOM2_FIP = []
-RES_LIST = [TOP_NETWORK, BOTTOM1_NETWORK, BOTTOM2_NETWORK, TOP_SUBNET,
- BOTTOM1_SUBNET, BOTTOM2_SUBNET, BOTTOM1_PORT, BOTTOM2_PORT,
- TOP_ROUTER, BOTTOM1_ROUTER, BOTTOM2_ROUTER, TOP_SG, BOTTOM1_SG,
- BOTTOM2_SG, TOP_FIP, BOTTOM1_FIP, BOTTOM2_FIP]
-RES_MAP = {'top': {'network': TOP_NETWORK,
- 'subnet': TOP_SUBNET,
- 'router': TOP_ROUTER,
- 'security_group': TOP_SG,
- 'floatingip': TOP_FIP},
- 'pod_1': {'network': BOTTOM1_NETWORK,
- 'subnet': BOTTOM1_SUBNET,
- 'port': BOTTOM1_PORT,
- 'router': BOTTOM1_ROUTER,
- 'security_group': BOTTOM1_SG,
- 'floatingip': BOTTOM1_FIP},
- 'pod_2': {'network': BOTTOM2_NETWORK,
- 'subnet': BOTTOM2_SUBNET,
- 'port': BOTTOM2_PORT,
- 'router': BOTTOM2_ROUTER,
- 'security_group': BOTTOM2_SG,
- 'floatingip': BOTTOM2_FIP}}
-
-
-def fake_get_client(self, region_name=None):
- return FakeClient(region_name)
-
-
-def fake_create_floatingips(self, ctx, filters=None):
- raise q_cli_exceptions.IpAddressInUseClient(
- message='fake_create_floatingips')
-
-
-class FakeBaseXManager(xmanager.XManager):
- def __init__(self):
- self.clients = {'top': FakeClient(),
- 'pod_1': FakeClient('pod_1'),
- 'pod_2': FakeClient('pod_2')}
- self.helper = helper.NetworkHelper()
- self.job_handles = {
- constants.JT_CONFIGURE_ROUTE: self.configure_route,
- constants.JT_ROUTER_SETUP: self.setup_bottom_router,
- constants.JT_SHADOW_PORT_SETUP: self.setup_shadow_ports,
- constants.JT_PORT_DELETE: self.delete_server_port}
-
- def _get_client(self, region_name=None):
- return FakeClient(region_name)
-
- def setup_bottom_router(self, ctx, payload):
- super(FakeBaseXManager, self).setup_bottom_router(ctx, payload=payload)
-
-
-class FakeXManager(FakeBaseXManager):
- def __init__(self):
- super(FakeXManager, self).__init__()
- self.xjob_handler = FakeXJobAPI()
-
-
-class FakeXJobAPI(object):
- def __init__(self):
- self.xmanager = FakeBaseXManager()
-
- def configure_route(self, ctxt, project_id, router_id):
- pass
-
- def setup_bottom_router(self, ctxt, project_id, net_id, router_id, pod_id):
- combine_id = '%s#%s#%s' % (pod_id, router_id, net_id)
- self.xmanager.setup_bottom_router(
- ctxt, payload={constants.JT_ROUTER_SETUP: combine_id})
-
- def setup_shadow_ports(self, ctxt, pod_id, net_id):
- combine_id = '%s#%s' % (pod_id, net_id)
- self.xmanager.setup_shadow_ports(
- ctxt, payload={constants.JT_SHADOW_PORT_SETUP: combine_id})
-
-
-class FakeClient(object):
- def __init__(self, region_name=None):
- if region_name:
- self.region_name = region_name
- else:
- self.region_name = 'top'
-
- def list_resources(self, resource, ctx, filters=None):
- res_list = []
- filters = filters or []
- for res in RES_MAP[self.region_name][resource]:
- is_selected = True
- for _filter in filters:
- if _filter['key'] == 'fields':
- # in test, we don't need to filter fields
- continue
- if _filter['key'] not in res:
- is_selected = False
- break
- if res[_filter['key']] not in _filter['value']:
- is_selected = False
- break
- if is_selected:
- res_list.append(copy.copy(res))
- return res_list
-
- def create_resources(self, resource, ctx, body):
- res = body[resource]
- if 'id' not in res:
- res['id'] = uuidutils.generate_uuid()
- RES_MAP[self.region_name][resource].append(res)
- return res
-
- def update_resources(self, resource, ctx, _id, body):
- for res in RES_MAP[self.region_name][resource]:
- if res['id'] == _id:
- res.update(body[resource])
-
- def create_ports(self, ctx, body):
- if 'ports' in body:
- ret = []
- for port in body['ports']:
- ret.append(self.create_resources('port', ctx, {'port': port}))
- return ret
- return self.create_resources('port', ctx, body)
-
- def list_ports(self, ctx, filters=None):
- return self.list_resources('port', ctx, filters)
-
- def get_ports(self, ctx, port_id):
- return self.list_resources(
- 'port', ctx,
- [{'key': 'id', 'comparator': 'eq', 'value': port_id}])[0]
-
- def update_ports(self, ctx, _id, body):
- self.update_resources('port', ctx, _id, body)
-
- def list_subnets(self, ctx, filters=None):
- return self.list_resources('subnet', ctx, filters)
-
- def get_subnets(self, ctx, subnet_id):
- return self.list_resources(
- 'subnet', ctx,
- [{'key': 'id', 'comparator': 'eq', 'value': subnet_id}])[0]
-
- def update_subnets(self, ctx, subnet_id, body):
- pass
-
- def list_networks(self, ctx, filters=None):
- return self.list_resources('network', ctx, filters)
-
- def get_networks(self, ctx, net_id):
- return self.list_resources(
- 'network', ctx,
- [{'key': 'id', 'comparator': 'eq', 'value': net_id}])[0]
-
- def get_routers(self, ctx, router_id):
- return self.list_resources(
- 'router', ctx,
- [{'key': 'id', 'comparator': 'eq', 'value': router_id}])[0]
-
- def update_routers(self, ctx, *args, **kwargs):
- pass
-
- def list_security_groups(self, ctx, filters=None):
- return self.list_resources('security_group', ctx, filters)
-
- def get_security_groups(self, ctx, sg_id):
- return self.list_resources(
- 'security_group', ctx,
- [{'key': 'id', 'comparator': 'eq', 'value': sg_id}])[0]
-
- def delete_security_group_rules(self, ctx, sg_id):
- pass
-
- def create_security_group_rules(self, ctx, *args, **kwargs):
- pass
-
- def list_floatingips(self, ctx, filters=None):
- return self.list_resources('floatingip', ctx, filters)
-
- def create_floatingips(self, ctx, body):
- fip = self.create_resources('floatingip', ctx, body)
- for key in ['fixed_port_id']:
- if key not in fip:
- fip[key] = None
- return fip
-
- def update_floatingips(self, ctx, _id, body):
- self.update_resources('floatingip', ctx, _id, body)
-
-
-class XManagerTest(unittest.TestCase):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- # enforce foreign key constraint for sqlite
- core.get_engine().execute('pragma foreign_keys=on')
- for opt in xservice.common_opts:
- if opt.name in ('worker_handle_timeout', 'job_run_expire',
- 'worker_sleep_time', 'redo_time_span'):
- cfg.CONF.register_opt(opt)
- self.context = context.Context()
- self.xmanager = FakeXManager()
-
- def _prepare_dnat_test(self):
- for subnet in BOTTOM2_SUBNET:
- if 'ext' in subnet['id']:
- ext_subnet = subnet
- ext_cidr = ext_subnet['cidr']
- ext_cidr_prefix = ext_cidr[:ext_cidr.rindex('.')]
- vm_ports = []
- # get one vm port from each bottom pod
- for ports in [BOTTOM1_PORT, BOTTOM2_PORT]:
- for port in ports:
- if port['device_owner'] == 'compute:None':
- vm_ports.append(port)
- break
- for i, vm_port in enumerate(vm_ports):
- vm_ip = vm_port['fixed_ips'][0]['ip_address']
- fip = {'floating_network_id': ext_subnet['network_id'],
- 'floating_ip_address': '%s.%d' % (ext_cidr_prefix, i + 1),
- 'port_id': vm_port['id'],
- 'fixed_ip_address': vm_ip}
- TOP_FIP.append(fip)
- BOTTOM2_FIP.append(fip)
-
- def _prepare_snat_test(self, top_router_id):
- ext_network = {'id': 'ext_network_id',
- 'router:external': True}
- ext_subnet = {
- 'id': 'ext_subnet_id',
- 'network_id': ext_network['id'],
- 'cidr': '162.3.124.0/24',
- 'gateway_ip': '162.3.124.1'
- }
- for router in TOP_ROUTER:
- if router['id'] == top_router_id:
- router['external_gateway_info'] = {
- 'network_id': ext_network['id']}
- router = {'id': 'ns_router_id'}
- for subnet in BOTTOM2_SUBNET:
- if 'bridge' in subnet['id']:
- bridge_subnet = subnet
- bridge_port = {
- 'network_id': bridge_subnet['network_id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': bridge_subnet['id'],
- 'ip_address': bridge_subnet['gateway_ip']}]
- }
- BOTTOM2_NETWORK.append(ext_network)
- BOTTOM2_SUBNET.append(ext_subnet)
- BOTTOM2_PORT.append(bridge_port)
- BOTTOM2_ROUTER.append(router)
- route = {'top_id': top_router_id, 'bottom_id': router['id'],
- 'pod_id': 'pod_id_2', 'resource_type': constants.RT_NS_ROUTER}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting, route)
- return bridge_subnet['gateway_ip'], router['id']
-
- def _prepare_east_west_network_test(self, top_router_id):
- bridge_infos = []
-
- router = {'id': top_router_id}
- TOP_ROUTER.append(router)
- for i in xrange(1, 3):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
-
- network = {'id': 'network_%d_id' % i}
- bridge_network = {'id': 'bridge_network_%d_id' % i}
- router = {'id': 'router_%d_id' % i}
- subnet = {
- 'id': 'subnet_%d_id' % i,
- 'network_id': network['id'],
- 'cidr': '10.0.%d.0/24' % i,
- 'gateway_ip': '10.0.%d.1' % i,
- }
- bridge_subnet = {
- 'id': 'bridge_subnet_%d_id' % i,
- 'network_id': bridge_network['id'],
- 'cidr': '100.0.1.0/24',
- 'gateway_ip': '100.0.1.1',
- }
- port = {
- 'network_id': network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': subnet['id'],
- 'ip_address': subnet['gateway_ip']}]
- }
- vm_port = {
- 'id': 'vm_port_%d_id' % i,
- 'network_id': network['id'],
- 'device_id': 'vm%d_id' % i,
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': subnet['id'],
- 'ip_address': '10.0.%d.3' % i}]
- }
- bridge_cidr = bridge_subnet['cidr']
- bridge_port_ip = '%s.%d' % (bridge_cidr[:bridge_cidr.rindex('.')],
- 2 + i)
- bridge_port = {
- 'network_id': bridge_network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_gateway',
- 'fixed_ips': [{'subnet_id': bridge_subnet['id'],
- 'ip_address': bridge_port_ip}]
- }
- region_name = 'pod_%d' % i
- RES_MAP[region_name]['network'].append(network)
- RES_MAP[region_name]['network'].append(bridge_network)
- RES_MAP[region_name]['subnet'].append(subnet)
- RES_MAP[region_name]['subnet'].append(bridge_subnet)
- RES_MAP[region_name]['port'].append(port)
- RES_MAP[region_name]['port'].append(vm_port)
- RES_MAP[region_name]['port'].append(bridge_port)
- RES_MAP[region_name]['router'].append(router)
-
- route = {'top_id': top_router_id, 'bottom_id': router['id'],
- 'pod_id': pod_dict['pod_id'], 'resource_type': 'router'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting,
- route)
-
- bridge_info = {
- 'router_id': router['id'],
- 'bridge_ip': bridge_port['fixed_ips'][0]['ip_address'],
- 'vm_ips': ['10.0.%d.3' % i]}
- bridge_infos.append(bridge_info)
-
- BOTTOM1_NETWORK.append({'id': 'network_3_id'})
- BOTTOM1_SUBNET.append({'id': 'subnet_3_id',
- 'network_id': 'network_3_id',
- 'cidr': '10.0.3.0/24',
- 'gateway_ip': '10.0.3.1'})
- BOTTOM1_PORT.append({'network_id': 'network_3_id',
- 'device_id': 'router_1_id',
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': 'subnet_3_id',
- 'ip_address': '10.0.3.1'}]})
- BOTTOM1_PORT.append({'network_id': 'network_3_id',
- 'device_id': 'vm3_id',
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': 'subnet_3_id',
- 'ip_address': '10.0.3.3'}]})
- bridge_infos[0]['vm_ips'].append('10.0.3.3')
- return bridge_infos
-
- def _check_extra_routes_calls(self, except_list, actual_list):
- except_map = {}
- for except_call in except_list:
- ctx, router_id, routes_body = except_call[1]
- except_map[router_id] = (ctx, routes_body['router']['routes'])
- for actual_call in actual_list:
- ctx, router_id, routes_body = actual_call[0]
- expect_ctx, expect_routes = except_map[router_id]
- self.assertEqual(expect_ctx, ctx)
- six.assertCountEqual(self, expect_routes,
- routes_body['router']['routes'])
-
- def _prepare_top_router(self, project_id):
- for i in (0, 1, 2):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
- t_net = {'id': 'network_1_id'}
- t_subnet = {'id': 'subnet_1_id',
- 'network_id': t_net['id'],
- 'cidr': '10.0.1.0/24',
- 'gateway_ip': '10.0.1.1'}
- bridge_network = {'id': 'bridge_network_1_id',
- 'name': constants.bridge_net_name % project_id}
- bridge_subnet = {
- 'id': 'bridge_subnet_1_id',
- 'name': constants.bridge_subnet_name % project_id,
- 'network_id': bridge_network['id'],
- 'cidr': '100.0.1.0/24',
- 'gateway_ip': '100.0.1.1',
- }
- RES_MAP['top']['network'].append(t_net)
- RES_MAP['top']['subnet'].append(t_subnet)
- RES_MAP['top']['network'].append(bridge_network)
- RES_MAP['top']['subnet'].append(bridge_subnet)
-
- top_router_id = 'router_id'
- t_router = {'id': top_router_id, 'tenant_id': project_id,
- 'extra_attributes': {
- 'availability_zone_hints': ['pod_1']}}
- TOP_ROUTER.append(t_router)
- return t_net, t_subnet, t_router
-
- @patch.object(helper.NetworkHelper, '_get_client', new=fake_get_client)
- def test_redo_failed_or_new_job(self):
- project_id = uuidutils.generate_uuid()
- t_net, _, t_router = self._prepare_top_router(project_id)
-
- resource_id = 'pod_id_1#%s#%s' % (t_router['id'], t_net['id'])
- db_api.new_job(self.context, project_id, constants.JT_ROUTER_SETUP,
- resource_id)
- self.xmanager.redo_failed_or_new_job(self.context)
- self.assertEqual(len(RES_MAP['pod_1']['router']), 1)
-
- TOP_ROUTER.remove(t_router)
- router = {'id': t_router['id'], 'tenant_id': project_id,
- 'extra_attributes': {'availability_zone_hints': ['pod_2']}}
- TOP_ROUTER.append(router)
- jobs = db_api.list_jobs(self.context)
- for job in jobs:
- db_api.delete_job(self.context, job['id'])
- resource_id = 'pod_id_2#%s#%s' % (t_router['id'], t_net['id'])
- with self.context.session.begin():
- job_dict = {'id': uuidutils.generate_uuid(),
- 'type': constants.JT_ROUTER_SETUP,
- 'status': constants.JS_Fail,
- 'project_id': project_id,
- 'resource_id': resource_id,
- 'extra_id': uuidutils.generate_uuid()}
- core.create_resource(self.context, models.AsyncJob, job_dict)
- self.xmanager.redo_failed_or_new_job(self.context)
- self.assertEqual(len(RES_MAP['pod_2']['router']), 1)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=fake_get_client)
- def test_setup_bottom_router_not_special(self):
- project_id = uuidutils.generate_uuid()
- t_net, _, t_router = self._prepare_top_router(project_id)
- resource_id = 'pod_id_1#%s#%s' % (t_router['id'], t_net['id'])
- db_api.new_job(self.context, project_id, constants.JT_ROUTER_SETUP,
- resource_id)
- db_api.create_resource_mapping(
- self.context, t_net['id'], t_net['id'],
- 'pod_id_1', project_id, constants.RT_NETWORK)
- combine_id = '%s#%s#%s' % (constants.POD_NOT_SPECIFIED,
- t_router['id'], t_net['id'])
- db_api.new_job(self.context, project_id, constants.JT_ROUTER_SETUP,
- combine_id)
- self.xmanager.setup_bottom_router(self.context, payload={
- constants.JT_ROUTER_SETUP: combine_id
- })
- self.assertEqual(len(RES_MAP['pod_1']['router']), 1)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=fake_get_client)
- @patch.object(FakeClient, 'create_floatingips',
- new=fake_create_floatingips)
- def test__safe_create_bottom_floatingip(self):
- client = FakeClient('pod_1')
- pod = {'region_name': 'pod_1'}
- self.assertRaises(t_network_exc.BottomPodOperationFailure,
- self.xmanager._safe_create_bottom_floatingip,
- self.context, pod, client, None, None, None)
-
- fip_net_id = 'fip_net_id_1'
- fip_address = '10.0.1.55'
- port_id = 'port_id_1'
- RES_MAP['pod_1']['floatingip'].append(
- {'floating_network_id': fip_net_id,
- 'floating_ip_address': fip_address,
- 'port_id': port_id,
- 'id': uuidutils.generate_uuid()})
- self.xmanager._safe_create_bottom_floatingip(
- self.context, pod, client, fip_net_id, fip_address, port_id)
- self.assertEqual(RES_MAP['pod_1']['floatingip'][0]['port_id'],
- port_id)
- RES_MAP['pod_1']['floatingip'][0]['port_id'] = None
- self.xmanager._safe_create_bottom_floatingip(
- self.context, pod, client, fip_net_id, fip_address, 'fake_port_id')
- self.assertEqual(RES_MAP['pod_1']['floatingip'][0]['port_id'],
- 'fake_port_id')
-
- @patch.object(FakeClient, 'update_routers')
- def test_configure_extra_routes_with_floating_ips(self, mock_update):
- top_router_id = 'router_id'
- project_id = uuidutils.generate_uuid()
- bridge_infos = self._prepare_east_west_network_test(top_router_id)
- ns_bridge_ip, ns_router_id = self._prepare_snat_test(top_router_id)
- self._prepare_dnat_test()
- db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
- top_router_id)
- self.xmanager.configure_route(
- self.context,
- payload={constants.JT_CONFIGURE_ROUTE: top_router_id})
- calls = []
- ns_routes = []
- for i in range(2):
- routes = []
- for ip in bridge_infos[i]['vm_ips']:
- route = {'nexthop': bridge_infos[i]['bridge_ip'],
- 'destination': ip + '/32'}
- routes.append(route)
- ns_routes.append(route)
- routes.append({'nexthop': ns_bridge_ip,
- 'destination': '0.0.0.0/0'})
- call = mock.call(self.context, bridge_infos[1 - i]['router_id'],
- {'router': {'routes': routes}})
- calls.append(call)
- calls.append(mock.call(self.context, ns_router_id,
- {'router': {'routes': ns_routes}}))
- self._check_extra_routes_calls(calls, mock_update.call_args_list)
-
- @patch.object(FakeClient, 'update_routers')
- def test_configure_extra_routes_with_external_network(self, mock_update):
- top_router_id = 'router_id'
- project_id = uuidutils.generate_uuid()
- bridge_infos = self._prepare_east_west_network_test(top_router_id)
- ns_bridge_ip, ns_router_id = self._prepare_snat_test(top_router_id)
- db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
- top_router_id)
- self.xmanager.configure_route(
- self.context,
- payload={constants.JT_CONFIGURE_ROUTE: top_router_id})
- calls = []
- ns_routes = []
- for i in range(2):
- routes = []
- for ip in bridge_infos[i]['vm_ips']:
- route = {'nexthop': bridge_infos[i]['bridge_ip'],
- 'destination': ip + '/32'}
- routes.append(route)
- ns_routes.append(route)
- routes.append({'nexthop': ns_bridge_ip,
- 'destination': '0.0.0.0/0'})
- call = mock.call(self.context, bridge_infos[1 - i]['router_id'],
- {'router': {'routes': routes}})
- calls.append(call)
- calls.append(mock.call(self.context, ns_router_id,
- {'router': {'routes': ns_routes}}))
- self._check_extra_routes_calls(calls, mock_update.call_args_list)
-
- @patch.object(FakeClient, 'update_routers')
- def test_configure_route(self, mock_update):
- top_router_id = 'router_id'
- project_id = uuidutils.generate_uuid()
- bridge_infos = self._prepare_east_west_network_test(top_router_id)
- db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
- top_router_id)
- self.xmanager.configure_route(
- self.context,
- payload={constants.JT_CONFIGURE_ROUTE: top_router_id})
- calls = []
- for i in range(2):
- routes = []
- for ip in bridge_infos[i]['vm_ips']:
- routes.append({'nexthop': bridge_infos[i]['bridge_ip'],
- 'destination': ip + '/32'})
- call = mock.call(self.context, bridge_infos[1 - i]['router_id'],
- {'router': {'routes': routes}})
- calls.append(call)
- self._check_extra_routes_calls(calls, mock_update.call_args_list)
-
- @patch.object(FakeClient, 'update_subnets')
- @patch.object(FakeClient, 'update_routers')
- def test_configure_extra_routes_ew_gw(self, router_update, subnet_update):
- for i in (1, 2):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
- for i in (1, 2, 3):
- router = {'id': 'top_router_%d_id' % i}
- TOP_ROUTER.append(router)
-
- # gateway in podX is attached to routerX
- gw_map = {'net1_pod1_gw': '10.0.1.1',
- 'net2_pod2_gw': '10.0.2.1',
- 'net3_pod1_gw': '10.0.3.3',
- 'net3_pod2_gw': '10.0.3.4'}
- # interfaces are all attached to router3
- inf_map = {'net1_pod1_inf': '10.0.1.3',
- 'net2_pod2_inf': '10.0.2.3',
- 'net3_pod1_inf': '10.0.3.5',
- 'net3_pod2_inf': '10.0.3.6'}
- get_gw_map = lambda n_idx, p_idx: gw_map[ # noqa: E731
- 'net%d_pod%d_gw' % (n_idx, p_idx)]
- get_inf_map = lambda n_idx, p_idx: inf_map[ # noqa: E731
- 'net%d_pod%d_inf' % (n_idx, p_idx)]
- bridge_infos = []
-
- for net_idx, router_idx, pod_idx in [(1, 1, 1), (3, 1, 1), (1, 3, 1),
- (3, 3, 1), (2, 2, 2), (3, 2, 2),
- (2, 3, 2), (3, 3, 2)]:
- region_name = 'pod_%d' % pod_idx
- pod_id = 'pod_id_%d' % pod_idx
- top_router_id = 'top_router_%d_id' % router_idx
-
- network = {'id': 'network_%d_id' % net_idx}
- router = {'id': 'router_%d_%d_id' % (pod_idx, router_idx)}
- subnet = {'id': 'subnet_%d_id' % net_idx,
- 'network_id': network['id'],
- 'cidr': '10.0.%d.0/24' % net_idx,
- 'gateway_ip': get_gw_map(net_idx, pod_idx)}
- port = {'network_id': network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': subnet['id']}]}
- if router_idx == 3:
- port['fixed_ips'][0][
- 'ip_address'] = get_inf_map(net_idx, pod_idx)
- else:
- port['fixed_ips'][0][
- 'ip_address'] = get_gw_map(net_idx, pod_idx)
-
- if net_idx == pod_idx and router_idx == 3:
- vm_idx = net_idx * 2 + pod_idx + 10
- vm_ip = '10.0.%d.%d' % (net_idx, vm_idx)
- vm_port = {'id': 'vm_port_%d_id' % vm_idx,
- 'network_id': network['id'],
- 'device_id': 'vm%d_id' % vm_idx,
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': subnet['id'],
- 'ip_address': vm_ip}]}
- bridge_network = {'id': 'bridge_network_%d_id' % net_idx}
- bridge_subnet = {'id': 'bridge_subnet_%d_id' % net_idx,
- 'network_id': bridge_network['id'],
- 'cidr': '100.0.1.0/24',
- 'gateway_ip': '100.0.1.1'}
- bridge_cidr = bridge_subnet['cidr']
- bridge_port_ip = '%s.%d' % (
- bridge_cidr[:bridge_cidr.rindex('.')], 2 + pod_idx)
- bridge_infos.append({'router_id': router['id'],
- 'bridge_ip': bridge_port_ip,
- 'vm_ip': vm_ip})
- bridge_port = {
- 'network_id': bridge_network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_gateway',
- 'fixed_ips': [{'subnet_id': bridge_subnet['id'],
- 'ip_address': bridge_port_ip}]
- }
- RES_MAP[region_name]['port'].append(vm_port)
- RES_MAP[region_name]['network'].append(bridge_network)
- RES_MAP[region_name]['subnet'].append(bridge_subnet)
- RES_MAP[region_name]['port'].append(bridge_port)
-
- RES_MAP[region_name]['network'].append(network)
- RES_MAP[region_name]['subnet'].append(subnet)
- RES_MAP[region_name]['port'].append(port)
- RES_MAP[region_name]['router'].append(router)
-
- db_api.create_resource_mapping(self.context, top_router_id,
- router['id'], pod_id, 'project_id',
- constants.RT_ROUTER)
- # the above codes create this topology
- # pod1: net1 is attached to R1, default gateway is set on R1
- # net1 is attached to R3
- # net3 is attached to R1, default gateway is set on R1
- # net3 is attached to R3
- # pod2: net2 is attached to R2, default gateway is set on R2
- # net2 is attached to R3
- # net3 is attached to R2, default gateway is set on R2
- # net3 is attached to R3
-
- target_router_id = 'top_router_3_id'
- project_id = uuidutils.generate_uuid()
- db_api.new_job(self.context, project_id,
- constants.JT_CONFIGURE_ROUTE, target_router_id)
- self.xmanager.configure_route(
- self.context,
- payload={constants.JT_CONFIGURE_ROUTE: target_router_id})
-
- # for the following paths, packets will go to R3 via the interface
- # which is attached to R3
- # net1 in pod1 -> net2 in pod2
- # net2 in pod2 -> net1 in pod1
- # net3 in pod1 -> net2 in pod2
- # net3 in pod2 -> net1 in pod1
- expect_calls = [
- mock.call(self.context, 'subnet_1_id', {'subnet': {
- 'host_routes': [{'nexthop': get_inf_map(1, 1),
- 'destination': '10.0.2.0/24'}]}}),
- mock.call(self.context, 'subnet_2_id', {'subnet': {
- 'host_routes': [{'nexthop': get_inf_map(2, 2),
- 'destination': '10.0.1.0/24'}]}}),
- mock.call(self.context, 'subnet_3_id', {'subnet': {
- 'host_routes': [{'nexthop': get_inf_map(3, 1),
- 'destination': '10.0.2.0/24'}]}}),
- mock.call(self.context, 'subnet_3_id', {'subnet': {
- 'host_routes': [{'nexthop': get_inf_map(3, 2),
- 'destination': '10.0.1.0/24'}]}})]
- subnet_update.assert_has_calls(expect_calls, any_order=True)
- expect_calls = []
- for i in (0, 1):
- bridge_info = bridge_infos[i]
- expect_call = mock.call(
- self.context, bridge_infos[1 - i]['router_id'],
- {'router': {'routes': [
- {'nexthop': bridge_info['bridge_ip'],
- 'destination': bridge_info['vm_ip'] + '/32'}]}})
- expect_calls.append(expect_call)
- router_update.assert_has_calls(expect_calls, any_order=True)
-
- @patch.object(FakeClient, 'delete_security_group_rules')
- @patch.object(FakeClient, 'create_security_group_rules')
- def test_configure_security_group_rules(self, mock_create, mock_delete):
- project_id = uuidutils.generate_uuid()
- sg_id = uuidutils.generate_uuid()
- sg_rule_id_1 = uuidutils.generate_uuid()
- sg_rule_id_2 = uuidutils.generate_uuid()
- sg_rule_id_3 = uuidutils.generate_uuid()
-
- sg = {'id': sg_id,
- 'tenant_id': project_id,
- 'name': 'default',
- 'security_group_rules': [{
- 'id': sg_rule_id_1,
- 'remote_group_id': sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id},
- {'id': sg_rule_id_2,
- 'remote_group_id': None,
- 'direction': 'engress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id},
- {'id': sg_rule_id_3,
- 'remote_group_id': None,
- 'direction': 'ingress',
- 'remote_ip_prefix': '2001:db8::/64',
- 'protocol': None,
- 'ethertype': 'IPv6',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id}]}
- RES_MAP['top']['security_group'].append(sg)
-
- for i in xrange(1, 3):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
-
- network = {'id': 'network_%d_id' % i,
- 'tenant_id': project_id}
- subnet = {'id': 'subnet_%d_id' % i,
- 'network_id': network['id'],
- 'cidr': '10.0.%d.0/24' % i,
- 'gateway_ip': '10.0.%d.1' % i,
- 'tenant_id': project_id,
- 'ip_version': q_constants.IP_VERSION_4}
- RES_MAP['top']['network'].append(network)
- RES_MAP['top']['subnet'].append(subnet)
-
- region_name = 'pod_%d' % i
- RES_MAP[region_name]['security_group'].append(sg)
- route = {'top_id': sg_id, 'bottom_id': sg_id,
- 'pod_id': pod_dict['pod_id'],
- 'resource_type': 'security_group'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting,
- route)
-
- network_ipv6 = {'id': 'network_ipv6_1',
- 'tenant_id': project_id}
- subnet_ipv6 = {'id': 'subnet_ipv6_1',
- 'network_id': network_ipv6['id'],
- 'cidr': '2001:db8::/64',
- 'gateway_ip': '2001:db8::2',
- 'tenant_id': project_id,
- 'ip_version': q_constants.IP_VERSION_6}
- RES_MAP['top']['network'].append(network_ipv6)
- RES_MAP['top']['subnet'].append(subnet_ipv6)
- RES_MAP['pod_1']['security_group'].append(sg)
-
- db_api.new_job(self.context, project_id, constants.JT_SEG_RULE_SETUP,
- project_id)
- self.xmanager.configure_security_group_rules(
- self.context, payload={constants.JT_SEG_RULE_SETUP: project_id})
-
- calls = [mock.call(self.context, sg_rule_id_1)]
- mock_delete.assert_has_calls(calls)
- call_rules_id = [
- call_arg[0][1] for call_arg in mock_delete.call_args_list]
- # bottom security group already has sg_rule_id_2, so this rule will
- # not be deleted
- self.assertNotIn(sg_rule_id_2, call_rules_id)
-
- calls = [mock.call(self.context,
- {'security_group_rules': [
- {'remote_group_id': None,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.1.0/24',
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id},
- {'remote_group_id': None,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.2.0/24',
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id}]})]
- mock_create.assert_has_calls(calls)
-
- @patch.object(FakeClient, 'delete_security_group_rules')
- @patch.object(FakeClient, 'create_security_group_rules')
- def test_configure_security_group_rules_duplicated_cidr(self, mock_create,
- mock_delete):
- project_id = uuidutils.generate_uuid()
- sg_id = uuidutils.generate_uuid()
- sg_rule_id_1 = uuidutils.generate_uuid()
- sg_rule_id_2 = uuidutils.generate_uuid()
-
- sg = {'id': sg_id,
- 'tenant_id': project_id,
- 'name': 'default',
- 'security_group_rules': [{
- 'id': sg_rule_id_1,
- 'remote_group_id': sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id},
- {'id': sg_rule_id_2,
- 'remote_group_id': None,
- 'direction': 'engress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id}]}
- RES_MAP['top']['security_group'].append(sg)
-
- for i in xrange(1, 3):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
-
- network = {'id': 'network_%d_id' % i,
- 'tenant_id': project_id}
- # we create two subnets with identical cidr but different
- # allocation pools
- subnet = {'id': 'subnet_%d_id' % i,
- 'network_id': network['id'],
- 'cidr': '10.0.1.0/24',
- 'gateway_ip': '10.0.1.%d' % i,
- 'tenant_id': project_id,
- 'allocation_pools': {'start': '10.0.1.%d' % 10 * i,
- 'end': '10.0.1.%d' % (10 * i + 9)},
- 'ip_version': q_constants.IP_VERSION_4}
- RES_MAP['top']['network'].append(network)
- RES_MAP['top']['subnet'].append(subnet)
-
- region_name = 'pod_%d' % i
- RES_MAP[region_name]['security_group'].append(sg)
- route = {'top_id': sg_id, 'bottom_id': sg_id,
- 'pod_id': pod_dict['pod_id'],
- 'resource_type': 'security_group'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting,
- route)
-
- db_api.new_job(self.context, project_id, constants.JT_SEG_RULE_SETUP,
- project_id)
- self.xmanager.configure_security_group_rules(
- self.context, payload={constants.JT_SEG_RULE_SETUP: project_id})
-
- calls = [mock.call(self.context, sg_rule_id_1)]
- mock_delete.assert_has_calls(calls)
- call_rules_id = [
- call_arg[0][1] for call_arg in mock_delete.call_args_list]
- # bottom security group already has sg_rule_id_2, so this rule will
- # not be deleted
- self.assertNotIn(sg_rule_id_2, call_rules_id)
-
- calls = [mock.call(self.context,
- {'security_group_rules': [
- {'remote_group_id': None,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.1.0/24',
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'port_range_max': -1,
- 'port_range_min': -1,
- 'security_group_id': sg_id}]})]
- mock_create.assert_has_calls(calls)
-
- @patch.object(helper.NetworkHelper, '_get_client', new=fake_get_client)
- @patch.object(FakeXJobAPI, 'setup_shadow_ports')
- def test_setup_shadow_ports(self, mock_setup):
- project_id = uuidutils.generate_uuid()
- net1_id = uuidutils.generate_uuid()
- subnet1_id = uuidutils.generate_uuid()
- port1_id = uuidutils.generate_uuid()
- port2_id = uuidutils.generate_uuid()
- for i in (1, 2):
- pod_id = 'pod_id_%d' % i
- pod_dict = {'pod_id': pod_id,
- 'region_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
- db_api.create_resource_mapping(
- self.context, net1_id, net1_id, pod_id, project_id,
- constants.RT_NETWORK)
- TOP_NETWORK.append({'id': net1_id, 'tenant_id': project_id})
- BOTTOM1_PORT.append({'id': port1_id,
- 'network_id': net1_id,
- 'device_owner': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': 'host1',
- 'device_id': None,
- 'mac_address': 'fa:16:3e:d4:01:03',
- 'fixed_ips': [{'subnet_id': subnet1_id,
- 'ip_address': '10.0.1.3'}]})
- BOTTOM2_PORT.append({'id': port2_id,
- 'network_id': net1_id,
- 'device_owner': 'compute:None',
- 'binding:vif_type': 'ovs',
- 'binding:host_id': 'host2',
- 'device_id': None,
- 'mac_address': 'fa:16:3e:d4:01:03',
- 'fixed_ips': [{'subnet_id': subnet1_id,
- 'ip_address': '10.0.1.4'}]})
- db_api.ensure_agent_exists(
- self.context, 'pod_id_1', 'host1', q_constants.AGENT_TYPE_OVS,
- '192.168.1.101')
- db_api.ensure_agent_exists(
- self.context, 'pod_id_2', 'host2', q_constants.AGENT_TYPE_OVS,
- '192.168.1.102')
-
- resource_id = 'pod_id_1#' + net1_id
- db_api.new_job(self.context, project_id,
- constants.JT_SHADOW_PORT_SETUP, resource_id)
- self.xmanager.setup_shadow_ports(
- self.context,
- payload={constants.JT_SHADOW_PORT_SETUP: resource_id})
-
- # check shadow port in pod1 is created and updated
- client1 = FakeClient('pod_1')
- sd_ports = client1.list_ports(
- self.context, [{'key': 'device_owner',
- 'comparator': 'eq',
- 'value': constants.DEVICE_OWNER_SHADOW}])
- self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
- '10.0.1.4')
- self.assertIn(constants.PROFILE_FORCE_UP,
- sd_ports[0]['binding:profile'])
-
- # check job to setup shadow ports for pod2 is registered
- mock_setup.assert_called_once_with(self.context, project_id,
- 'pod_id_2', net1_id)
-
- # update shadow port to down and test again, this is possible when we
- # succeed to create shadow port but fail to update it to active
- profile = sd_ports[0]['binding:profile']
- profile.pop(constants.PROFILE_FORCE_UP)
- client1.update_ports(self.context, sd_ports[0]['id'],
- {'port': {'status': q_constants.PORT_STATUS_DOWN,
- 'binding:profile': profile}})
-
- db_api.new_job(self.context, project_id,
- constants.JT_SHADOW_PORT_SETUP, resource_id)
- self.xmanager.setup_shadow_ports(
- self.context,
- payload={constants.JT_SHADOW_PORT_SETUP: resource_id})
-
- # check shadow port is udpated to active again
- sd_port = client1.get_ports(self.context, sd_ports[0]['id'])
- self.assertIn(constants.PROFILE_FORCE_UP, sd_port['binding:profile'])
-
- # manually trigger shadow ports setup in pod2
- resource_id = 'pod_id_2#' + net1_id
- db_api.new_job(self.context, project_id,
- constants.JT_SHADOW_PORT_SETUP, resource_id)
- self.xmanager.setup_shadow_ports(
- self.context,
- payload={constants.JT_SHADOW_PORT_SETUP: resource_id})
-
- client2 = FakeClient('pod_2')
- sd_ports = client2.list_ports(
- self.context, [{'key': 'device_owner',
- 'comparator': 'eq',
- 'value': constants.DEVICE_OWNER_SHADOW}])
- self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
- '10.0.1.3')
-
- def test_job_handle(self):
- job_type = 'fake_resource'
-
- @xmanager._job_handle(job_type)
- def fake_handle(self, ctx, payload):
- pass
-
- fake_id = 'fake_id'
- fake_project_id = uuidutils.generate_uuid()
- payload = {job_type: fake_id}
- db_api.new_job(self.context, fake_project_id, job_type, fake_id)
- fake_handle(None, self.context, payload=payload)
-
- logs = core.query_resource(self.context, models.AsyncJobLog, [], [])
-
- self.assertEqual(fake_id, logs[0]['resource_id'])
- self.assertEqual(job_type, logs[0]['type'])
-
- def test_job_handle_exception(self):
- job_type = 'fake_resource'
-
- @xmanager._job_handle(job_type)
- def fake_handle(self, ctx, payload):
- raise Exception()
-
- fake_id = 'fake_id'
- fake_project_id = uuidutils.generate_uuid()
- payload = {job_type: fake_id}
- db_api.new_job(self.context, fake_project_id, job_type, fake_id)
- fake_handle(None, self.context, payload=payload)
-
- jobs = core.query_resource(self.context, models.AsyncJob, [], [])
- expected_status = [constants.JS_New, constants.JS_Fail]
- job_status = [job['status'] for job in jobs]
- six.assertCountEqual(self, expected_status, job_status)
-
- self.assertEqual(fake_id, jobs[0]['resource_id'])
- self.assertEqual(fake_id, jobs[1]['resource_id'])
- self.assertEqual(job_type, jobs[0]['type'])
- self.assertEqual(job_type, jobs[1]['type'])
-
- def test_job_run_expire(self):
- job_type = 'fake_resource'
-
- @xmanager._job_handle(job_type)
- def fake_handle(self, ctx, payload):
- pass
-
- fake_id = uuidutils.generate_uuid()
- fake_project_id = uuidutils.generate_uuid()
- payload = {job_type: fake_id}
- db_api.new_job(self.context, fake_project_id, job_type, fake_id)
- expired_job = {
- 'id': uuidutils.generate_uuid(),
- 'type': job_type,
- 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 200),
- 'status': constants.JS_Running,
- 'resource_id': fake_id,
- 'extra_id': constants.SP_EXTRA_ID
- }
- core.create_resource(self.context, models.AsyncJob, expired_job)
- fake_handle(None, self.context, payload=payload)
-
- logs = core.query_resource(self.context, models.AsyncJobLog, [], [])
-
- self.assertEqual(fake_id, logs[0]['resource_id'])
- self.assertEqual(job_type, logs[0]['type'])
-
- @patch.object(db_api, 'get_running_job')
- @patch.object(db_api, 'register_job')
- def test_worker_handle_timeout(self, mock_register, mock_get):
- job_type = 'fake_resource'
-
- @xmanager._job_handle(job_type)
- def fake_handle(self, ctx, payload):
- pass
-
- cfg.CONF.set_override('worker_handle_timeout', 1)
- mock_register.return_value = None
- mock_get.return_value = None
-
- fake_id = uuidutils.generate_uuid()
- fake_project_id = uuidutils.generate_uuid()
- payload = {job_type: fake_id}
- db_api.new_job(self.context, fake_project_id, job_type, fake_id)
- fake_handle(None, self.context, payload=payload)
-
- # nothing to assert, what we test is that fake_handle can exit when
- # timeout
-
- @patch('oslo_utils.timeutils.utcnow')
- def test_get_failed_or_new_jobs(self, mock_now):
- mock_now.return_value = datetime.datetime(2000, 1, 2, 12, 0, 0)
- job_dict_list = [
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 0, 0),
- 'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1",
- 'status': constants.JS_Fail}, # job_uuid1
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 5, 0),
- 'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1",
- 'status': constants.JS_Fail}, # job_uuid3
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 20, 0),
- 'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1",
- 'status': constants.JS_Fail}, # job_uuid5
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 15, 0),
- 'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1",
- 'status': constants.JS_Fail}, # job_uuid7
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 25, 0),
- 'resource_id': 'uuid3', 'type': 'res3', 'project_id': "uuid1",
- 'status': constants.JS_Success}, # job_uuid9
- {'timestamp': datetime.datetime(2000, 1, 1, 12, 30, 0),
- 'resource_id': 'uuid4', 'type': 'res4', 'project_id': "uuid1",
- 'status': constants.JS_New}, # job_uuid11
- {'timestamp': datetime.datetime(1999, 12, 31, 12, 0, 0),
- 'resource_id': 'uuid5', 'type': 'res5', 'project_id': "uuid1",
- 'status': constants.JS_Fail}, # job_uuid13
- {'timestamp': datetime.datetime(1999, 12, 31, 11, 59, 59),
- 'resource_id': 'uuid6', 'type': 'res6', 'project_id': "uuid1",
- 'status': constants.JS_Fail}] # job_uuid15
- for i, job_dict in enumerate(job_dict_list, 1):
- job_dict['id'] = 'job_uuid%d' % (2 * i - 1)
- job_dict['extra_id'] = 'extra_uuid%d' % (2 * i - 1)
- core.create_resource(self.context, models.AsyncJob, job_dict)
- job_dict['id'] = 'job_uuid%d' % (2 * i)
- job_dict['extra_id'] = 'extra_uuid%d' % (2 * i)
- job_dict['status'] = constants.JS_New
- core.create_resource(self.context, models.AsyncJob, job_dict)
-
- # for res3 + uuid3, the latest job's status is "Success", not returned
- # for res6 + uuid6, the latest job is out of the redo time span
- expected_failed_jobs = [
- {'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1"},
- {'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1"},
- {'resource_id': 'uuid5', 'type': 'res5', 'project_id': "uuid1"}]
- expected_new_jobs = [{'resource_id': 'uuid4', 'type': 'res4',
- 'project_id': "uuid1"}]
- (failed_jobs,
- new_jobs) = db_api.get_latest_failed_or_new_jobs(self.context)
- six.assertCountEqual(self, expected_failed_jobs, failed_jobs)
- six.assertCountEqual(self, expected_new_jobs, new_jobs)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- for res in RES_LIST:
- del res[:]
diff --git a/tricircle/tests/unit/xjob/test_xservice.py b/tricircle/tests/unit/xjob/test_xservice.py
deleted file mode 100644
index 05091ded..00000000
--- a/tricircle/tests/unit/xjob/test_xservice.py
+++ /dev/null
@@ -1,91 +0,0 @@
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from mock import patch
-from oslo_config import cfg
-import oslo_messaging as messaging
-import unittest
-
-from tricircle.xjob import xmanager
-from tricircle.xjob import xservice
-
-CONF = cfg.CONF
-
-
-def fake_rpc_start(self, override_pool_size=None):
- return
-
-
-class FakeXManager(xmanager.XManager):
- """Fake xmanager for tests."""
- def __init__(self, host=None, service_name=None):
- super(FakeXManager, self).__init__(host=host,
- service_name=service_name)
-
- def test_method(self):
- return 'manager'
-
-
-class ExtendedXService(xservice.XService):
- def test_method(self):
- return 'service'
-
-
-class XServiceTest(unittest.TestCase):
- """Test cases for XServices."""
-
- def setUp(self):
- for opt in xservice.common_opts:
- if opt.name == 'enable_api_gateway':
- CONF.unregister_opt(opt)
- CONF.register_opts(xservice.common_opts)
-
- @patch.object(messaging.MessageHandlingServer, 'start',
- new=fake_rpc_start)
- def test_message_gets_to_manager(self):
- t_manager = FakeXManager()
- serv = xservice.XService('test', 'test', 'test', t_manager)
- serv.start()
- self.assertEqual('manager', serv.test_method())
-
- @patch.object(messaging.MessageHandlingServer, 'start',
- new=fake_rpc_start)
- def test_override_manager_method(self):
- t_manager = FakeXManager()
- serv = ExtendedXService('test', 'test', 'test', t_manager)
- serv.start()
- self.assertEqual('service', serv.test_method())
-
- @patch.object(messaging.MessageHandlingServer, 'start',
- new=fake_rpc_start)
- def test_service_create(self):
- t_manager = FakeXManager()
- CONF.set_override('host', 'tricircle-foo')
- serv = xservice.XService.create(manager=t_manager)
- serv.start()
- self.assertEqual('manager', serv.test_method())
- self.assertEqual('tricircle-foo', serv.host)
-
- @patch.object(messaging.MessageHandlingServer, 'start',
- new=fake_rpc_start)
- def test_service_create_extend(self):
- CONF.set_override('host', 'tricircle-bar')
- serv = xservice.create_service()
- self.assertEqual('tricircle-bar', serv.host)
-
- def tearDown(self):
- CONF.unregister_opts(xservice.common_opts)
diff --git a/tricircle/xjob/__init__.py b/tricircle/xjob/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tricircle/xjob/opts.py b/tricircle/xjob/opts.py
deleted file mode 100644
index bca2ec0d..00000000
--- a/tricircle/xjob/opts.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import tricircle.xjob.xservice
-
-
-def list_opts():
- return [
- ('DEFAULT', tricircle.xjob.xservice.common_opts),
- ('DEFAULT', tricircle.xjob.xservice.service_opts),
- ]
diff --git a/tricircle/xjob/xmanager.py b/tricircle/xjob/xmanager.py
deleted file mode 100644
index 86cc1e12..00000000
--- a/tricircle/xjob/xmanager.py
+++ /dev/null
@@ -1,1884 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import collections
-import datetime
-import eventlet
-import netaddr
-import random
-import six
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging as messaging
-from oslo_service import periodic_task
-
-import neutron_lib.constants as q_constants
-import neutron_lib.exceptions as q_exceptions
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from tricircle.common import client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-import tricircle.network.exceptions as t_network_exc
-from tricircle.network import helper
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-IN_TEST = False
-AZ_HINTS = 'availability_zone_hints'
-
-
-def _job_handle(job_type):
- def handle_func(func):
- @six.wraps(func)
- def handle_args(*args, **kwargs):
- if IN_TEST:
- # NOTE(zhiyuan) job mechanism will cause some unpredictable
- # result in unit test so we would like to bypass it. However
- # we have problem mocking a decorator which decorates member
- # functions, that's why we use this label, not an elegant
- # way though.
- func(*args, **kwargs)
- return
- ctx = args[1]
- payload = kwargs['payload']
-
- resource_id = payload[job_type]
- start_time = datetime.datetime.now()
-
- while True:
- current_time = datetime.datetime.now()
- delta = current_time - start_time
- if delta.seconds >= CONF.worker_handle_timeout:
- # quit when this handle is running for a long time
- break
- job_new = db_api.get_latest_job(
- ctx, constants.JS_New, job_type, resource_id)
- if not job_new:
- break
- job_succ = db_api.get_latest_job(
- ctx, constants.JS_Success, job_type, resource_id)
- if job_succ and job_succ['timestamp'] >= job_new['timestamp']:
- break
- job = db_api.register_job(ctx, job_new['project_id'], job_type,
- resource_id)
- if not job:
- # fail to obtain the lock, let other worker handle the job
- running_job = db_api.get_running_job(ctx, job_type,
- resource_id)
- if not running_job:
- # there are two reasons that running_job is None. one
- # is that the running job has just been finished, the
- # other is that all workers fail to register the job
- # due to deadlock exception. so we sleep and try again
- eventlet.sleep(CONF.worker_sleep_time)
- continue
- job_time = running_job['timestamp']
- current_time = datetime.datetime.now()
- delta = current_time - job_time
- if delta.seconds > CONF.job_run_expire:
- # previous running job expires, we set its status to
- # fail and try again to obtain the lock
- db_api.finish_job(ctx, running_job['id'], False,
- job_new['timestamp'])
- LOG.warning('Job %(job)s of type %(job_type)s for '
- 'resource %(resource)s expires, set '
- 'its state to Fail',
- {'job': running_job['id'],
- 'job_type': job_type,
- 'resource': resource_id})
- eventlet.sleep(CONF.worker_sleep_time)
- continue
- else:
- # previous running job is still valid, we just leave
- # the job to the worker who holds the lock
- break
- # successfully obtain the lock, start to execute handler
- try:
- func(*args, **kwargs)
- except Exception:
- db_api.finish_job(ctx, job['id'], False,
- job_new['timestamp'])
- LOG.error('Job %(job)s of type %(job_type)s for '
- 'resource %(resource)s fails',
- {'job': job['id'],
- 'job_type': job_type,
- 'resource': resource_id})
- break
- db_api.finish_job(ctx, job['id'], True, job_new['timestamp'])
- eventlet.sleep(CONF.worker_sleep_time)
- return handle_args
- return handle_func
-
-
-class PeriodicTasks(periodic_task.PeriodicTasks):
- def __init__(self):
- super(PeriodicTasks, self).__init__(CONF)
-
-
-class XManager(PeriodicTasks):
-
- target = messaging.Target(version='1.0')
-
- def __init__(self, host=None, service_name='xjob'):
-
- LOG.debug('XManager initialization...')
-
- if not host:
- host = CONF.host
- self.host = host
- self.service_name = service_name
- # self.notifier = rpc.get_notifier(self.service_name, self.host)
- self.additional_endpoints = []
- self.clients = {constants.TOP: client.Client()}
- self.job_handles = {
- constants.JT_CONFIGURE_ROUTE: self.configure_route,
- constants.JT_ROUTER_SETUP: self.setup_bottom_router,
- constants.JT_PORT_DELETE: self.delete_server_port,
- constants.JT_SFC_SYNC:
- self.sync_service_function_chain,
- constants.JT_RESOURCE_RECYCLE: self.recycle_resources,
- constants.JT_SEG_RULE_SETUP: self.configure_security_group_rules,
- constants.JT_NETWORK_UPDATE: self.update_network,
- constants.JT_SUBNET_UPDATE: self.update_subnet,
- constants.JT_SHADOW_PORT_SETUP: self.setup_shadow_ports,
- constants.JT_TRUNK_SYNC: self.sync_trunk,
- constants.JT_QOS_CREATE: self.create_qos_policy,
- constants.JT_QOS_UPDATE: self.update_qos_policy,
- constants.JT_QOS_DELETE: self.delete_qos_policy,
- constants.JT_SYNC_QOS_RULE: self.sync_qos_policy_rules
- }
- self.helper = helper.NetworkHelper()
- self.xjob_handler = xrpcapi.XJobAPI()
- super(XManager, self).__init__()
-
- def _get_client(self, region_name=None):
- if not region_name:
- return self.clients[constants.TOP]
- if region_name not in self.clients:
- self.clients[region_name] = client.Client(region_name)
- return self.clients[region_name]
-
- def periodic_tasks(self, context, raise_on_error=False):
- """Tasks to be run at a periodic interval."""
- return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
-
- def init_host(self):
- """init_host
-
- Hook to do additional manager initialization when one requests
- the service be started. This is called before any service record
- is created.
- Child classes should override this method.
- """
- LOG.debug('XManager init_host...')
-
- def cleanup_host(self):
- """cleanup_host
-
- Hook to do cleanup work when the service shuts down.
- Child classes should override this method.
- """
- LOG.debug('XManager cleanup_host...')
-
- def pre_start_hook(self):
- """pre_start_hook
-
- Hook to provide the manager the ability to do additional
- start-up work before any RPC queues/consumers are created. This is
- called after other initialization has succeeded and a service
- record is created.
- Child classes should override this method.
- """
- LOG.debug('XManager pre_start_hook...')
-
- def post_start_hook(self):
- """post_start_hook
-
- Hook to provide the manager the ability to do additional
- start-up work immediately after a service creates RPC consumers
- and starts 'running'.
- Child classes should override this method.
- """
- LOG.debug('XManager post_start_hook...')
-
- # rpc message endpoint handling
- def test_rpc(self, ctx, payload):
-
- LOG.info("xmanager receive payload: %s", payload)
-
- info_text = "xmanager receive payload: %s" % payload
-
- return info_text
-
- @staticmethod
- def _get_resource_by_name(cli, cxt, _type, name):
- return cli.list_resources(_type, cxt, filters=[{'key': 'name',
- 'comparator': 'eq',
- 'value': name}])[0]
-
- @staticmethod
- def _get_router_interfaces(cli, cxt, router_id, net_id):
- interfaces = cli.list_ports(
- cxt, filters=[{'key': 'network_id', 'comparator': 'eq',
- 'value': net_id},
- {'key': 'device_id', 'comparator': 'eq',
- 'value': router_id}])
- return [inf for inf in interfaces if inf['device_owner'] in (
- q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_DVR_INTERFACE)]
-
- @periodic_task.periodic_task
- def redo_failed_or_new_job(self, ctx):
- failed_jobs, new_jobs = db_api.get_latest_failed_or_new_jobs(ctx)
- failed_jobs = [
- job for job in failed_jobs if job['type'] in self.job_handles]
- new_jobs = [
- job for job in new_jobs if job['type'] in self.job_handles]
- if not failed_jobs and not new_jobs:
- return
- if new_jobs:
- jobs = new_jobs
- is_new_job = True
- else:
- jobs = failed_jobs
- is_new_job = False
- # in one run we only pick one job to handle
- job_index = random.randint(0, len(jobs) - 1)
- job_type = jobs[job_index]['type']
- resource_id = jobs[job_index]['resource_id']
- project_id = jobs[job_index]['project_id']
- payload = {job_type: resource_id}
- LOG.debug('Redo %(status)s job for %(resource_id)s of type '
- '%(job_type)s',
- {'status': 'new' if is_new_job else 'failed',
- 'resource_id': resource_id, 'job_type': job_type})
- # this is an admin context, we set the correct project id
- ctx.tenant = project_id
- if not is_new_job:
- db_api.new_job(ctx, project_id, job_type, resource_id)
- self.job_handles[job_type](ctx, payload=payload)
-
- @staticmethod
- def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
- fip_address, port_id):
- try:
- client.create_floatingips(
- t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
- 'floating_ip_address': fip_address,
- 'port_id': port_id}})
- except q_cli_exceptions.IpAddressInUseClient:
- fips = client.list_floatingips(t_ctx,
- [{'key': 'floating_ip_address',
- 'comparator': 'eq',
- 'value': fip_address}])
- if not fips:
- # this is rare case that we got IpAddressInUseClient exception
- # a second ago but now the floating ip is missing
- raise t_network_exc.BottomPodOperationFailure(
- resource='floating ip', region_name=pod['region_name'])
- associated_port_id = fips[0].get('port_id')
- if associated_port_id == port_id:
- # if the internal port associated with the existing fip is what
- # we expect, just ignore this exception
- pass
- elif not associated_port_id:
- # if the existing fip is not associated with any internal port,
- # update the fip to add association
- client.update_floatingips(t_ctx, fips[0]['id'],
- {'floatingip': {'port_id': port_id}})
- else:
- raise
-
- def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
- t_router, t_bridge_net, t_bridge_subnet,
- is_ext_net_pod):
- # NOTE(zhiyuan) after the bridge network combination, external network
- # is attached to a separate router, which is created in central plugin,
- # so is_ext_net_pod is not used in the current implementation, but we
- # choose to keep this parameter since it's an important attribute of a
- # pod and we may need to use it later.
- b_client = self._get_client(b_pod['region_name'])
-
- is_distributed = t_router.get('distributed', False)
- router_body = {'router': {'name': t_router['id'],
- 'distributed': is_distributed}}
- project_id = t_router['tenant_id']
- q_ctx = None # no need to pass neutron context when using client
- is_local_router = self.helper.is_local_router(ctx, t_router)
-
- if is_local_router:
- # for local router, it's safe for us to get the first element as
- # pod name
- pod_name = self.helper.get_router_az_hints(t_router)[0]
- if pod_name != b_pod['region_name']:
- # now we allow to attach a cross-pod network to a local router,
- # so if the pod of the local router is different from the pod
- # of the bottom network, we do nothing.
- return
-
- # create bottom router in target bottom pod
- _, b_router_id = self.helper.prepare_bottom_element(
- ctx, project_id, b_pod, t_router, constants.RT_ROUTER, router_body)
-
- if not is_local_router:
- # create top bridge port
- t_bridge_port_id = self.helper.get_bridge_interface(
- ctx, q_ctx, project_id, t_pod, t_bridge_net['id'], b_router_id)
-
- # create bottom bridge port
- # if target bottom pod is hosting real external network, we create
- # another bottom router and attach the bridge network as internal
- # network, but this work is done by central plugin when user sets
- # router gateway.
- t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id)
- (is_new, b_bridge_port_id, b_bridge_subnet_id,
- b_bridge_net_id) = self.helper.get_bottom_bridge_elements(
- ctx, project_id, b_pod, t_bridge_net,
- True, t_bridge_subnet, None)
-
- # we attach the bridge port as router gateway
- # add_gateway is update operation, which can run multiple times
- gateway_ip = t_bridge_port['fixed_ips'][0]['ip_address']
- b_client.action_routers(
- ctx, 'add_gateway', b_router_id,
- {'network_id': b_bridge_net_id,
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
- 'ip_address': gateway_ip}]})
-
- # attach internal port to bottom router
- t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
- t_net['id'])
- b_net_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_net['id'], b_pod['region_name'], constants.RT_NETWORK)
- if b_net_id:
- b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
- b_net_id)
- else:
- b_ports = []
- if not t_ports and b_ports:
- # remove redundant bottom interface
- b_port = b_ports[0]
- request_body = {'port_id': b_port['id']}
- b_client.action_routers(ctx, 'remove_interface', b_router_id,
- request_body)
- elif t_ports and not b_ports:
- # create new bottom interface
- t_port = t_ports[0]
-
- # only consider ipv4 address currently
- t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
- t_port_ip = t_port['fixed_ips'][0]['ip_address']
- t_subnet = t_client.get_subnets(ctx, t_subnet_id)
- is_default_gw = t_port_ip == t_subnet['gateway_ip']
-
- if CONF.enable_api_gateway:
- (b_net_id,
- subnet_map) = self.helper.prepare_bottom_network_subnets(
- ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
- else:
- (b_net_id,
- subnet_map) = (t_net['id'], {t_subnet['id']: t_subnet['id']})
-
- if is_local_router:
- # if the attaching router is local router, we update the bottom
- # subnet gateway ip to the interface ip
- new_pools = self.helper.get_bottom_subnet_pools(t_subnet,
- t_port_ip)
- b_client.update_subnets(ctx, t_subnet_id,
- {'subnet': {
- 'gateway_ip': t_port_ip,
- 'allocation_pools': new_pools}})
- b_client.action_routers(
- ctx, 'add_interface', b_router_id,
- {'subnet_id': subnet_map[t_subnet_id]})
- else:
- # the attaching router is not local router
- if is_default_gw:
- # if top interface ip is equal to gateway ip of top subnet,
- # bottom subnet gateway is set to the ip of the reservered
- # gateway port, so we just attach the bottom subnet to the
- # bottom router and local neutron server will create the
- # interface for us, using the gateway ip.
- b_client.action_routers(
- ctx, 'add_interface', b_router_id,
- {'subnet_id': subnet_map[t_subnet_id]})
- else:
- # if top interface ip is different from gateway ip of top
- # subnet, meaning that this interface is explicitly created
- # by users, then the subnet may be already attached to a
- # local router and its gateway ip is changed, so we need to
- # query the reservered gateway port to get its ip.
- gateway_port_name = constants.interface_port_name % (
- b_pod['region_name'], t_subnet['id'])
- gateway_port = t_client.list_ports(
- ctx, filters=[{'key': 'name',
- 'comparator': 'eq',
- 'value': gateway_port_name}])[0]
- b_port_body = self.helper.get_create_port_body(
- gateway_port['project_id'], gateway_port,
- {t_subnet_id: t_subnet_id}, b_net_id)
- b_port_body['port'][
- 'device_owner'] = q_constants.DEVICE_OWNER_ROUTER_INTF
- b_port = b_client.create_ports(ctx, b_port_body)
- b_client.action_routers(ctx, 'add_interface', b_router_id,
- {'port_id': b_port['id']})
-
- if not t_router['external_gateway_info']:
- return
-
- # handle floatingip
- t_ext_net_id = t_router['external_gateway_info']['network_id']
- t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
- 'comparator': 'eq',
- 'value': t_ext_net_id}])
- # skip unbound top floatingip
- t_ip_fip_map = dict([(fip['floating_ip_address'],
- fip) for fip in t_fips if fip['port_id']])
- mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
- constants.RT_NETWORK)
- # bottom external network should exist
- b_ext_pod, b_ext_net_id = mappings[0]
- b_ext_client = self._get_client(b_ext_pod['region_name'])
- b_fips = b_ext_client.list_floatingips(
- ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
- 'value': b_ext_net_id}])
- b_ip_fip_map = dict([(fip['floating_ip_address'],
- fip) for fip in b_fips])
- add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
- del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]
-
- for add_fip in add_fips:
- fip = t_ip_fip_map[add_fip]
- t_int_port_id = fip['port_id']
- b_int_port_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_int_port_id, b_pod['region_name'], constants.RT_PORT)
- if not b_int_port_id:
- LOG.warning('Port %(port_id)s associated with floating ip '
- '%(fip)s is not mapped to bottom pod',
- {'port_id': t_int_port_id, 'fip': add_fip})
- continue
- t_int_port = t_client.get_ports(ctx, t_int_port_id)
- if t_int_port['network_id'] != t_net['id']:
- # only handle floating ip association for the given top network
- continue
-
- if b_ext_pod['pod_id'] != b_pod['pod_id']:
- # if the internal port is not located in the external network
- # pod, we need to create a shadow port in that pod for floating
- # ip association purpose
- t_int_net_id = t_int_port['network_id']
- t_int_subnet_id = t_int_port['fixed_ips'][0]['subnet_id']
-
- b_int_port = b_client.get_ports(ctx, b_int_port_id)
- host = b_int_port['binding:host_id']
- agent_type = self.helper.get_agent_type_by_vif(
- b_int_port['binding:vif_type'])
- agent = db_api.get_agent_by_host_type(ctx, host, agent_type)
- max_bulk_size = CONF.client.max_shadow_port_bulk_size
- self.helper.prepare_shadow_ports(
- ctx, project_id, b_ext_pod, t_int_net_id,
- [b_int_port], [agent], max_bulk_size)
-
- # create routing entries for shadow network and subnet so we
- # can easily find them during central network and subnet
- # deletion, create_resource_mapping will catch DBDuplicateEntry
- # exception and ignore it so it's safe to call this function
- # multiple times
- db_api.create_resource_mapping(ctx, t_int_net_id, t_int_net_id,
- b_ext_pod['pod_id'], project_id,
- constants.RT_SD_NETWORK)
- db_api.create_resource_mapping(ctx, t_int_subnet_id,
- t_int_subnet_id,
- b_ext_pod['pod_id'], project_id,
- constants.RT_SD_SUBNET)
-
- self._safe_create_bottom_floatingip(
- ctx, b_pod, b_ext_client, b_ext_net_id, add_fip,
- b_int_port_id)
-
- for del_fip in del_fips:
- fip = b_ip_fip_map[del_fip]
- if b_ext_pod['pod_id'] != b_pod['pod_id'] and fip['port_id']:
- # shadow port is created in this case, but we leave shadow port
- # deletion work to plugin, so do nothing
- pass
- b_ext_client.delete_floatingips(ctx, fip['id'])
-
- @_job_handle(constants.JT_ROUTER_SETUP)
- def setup_bottom_router(self, ctx, payload):
- (b_pod_id,
- t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
-
- t_client = self._get_client()
- t_pod = db_api.get_top_pod(ctx)
- t_router = t_client.get_routers(ctx, t_router_id)
-
- if not t_router:
- # we just end this job if top router no longer exists
- return
-
- project_id = t_router['tenant_id']
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_net_id, constants.RT_NETWORK)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- # NOTE(zhiyuan) we create one job for each pod to avoid
- # conflict caused by different workers operating the same pod
- self.xjob_handler.setup_bottom_router(
- ctx, project_id, t_net_id, t_router_id, b_pod['pod_id'])
- return
-
- t_net = t_client.get_networks(ctx, t_net_id)
- if not t_net:
- # we just end this job if top network no longer exists
- return
- project_id = t_router['tenant_id']
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- is_local_router = self.helper.is_local_router(ctx, t_router)
- if is_local_router:
- t_bridge_net = None
- t_bridge_subnet = None
- else:
- t_bridge_net_name = constants.bridge_net_name % project_id
- t_bridge_subnet_name = constants.bridge_subnet_name % project_id
- t_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
- t_bridge_net_name)
- t_bridge_subnet = self._get_resource_by_name(
- t_client, ctx, 'subnet', t_bridge_subnet_name)
-
- ext_nets = t_client.list_networks(ctx,
- filters=[{'key': 'router:external',
- 'comparator': 'eq',
- 'value': True}])
- ext_net_region_names = set(
- [ext_net[AZ_HINTS][0] for ext_net in ext_nets])
-
- if not ext_net_region_names:
- is_ext_net_pod = False
- elif b_pod['region_name'] in ext_net_region_names:
- is_ext_net_pod = True
- else:
- is_ext_net_pod = False
-
- self._setup_router_one_pod(
- ctx, t_pod, b_pod, t_client, t_net, t_router, t_bridge_net,
- t_bridge_subnet, is_ext_net_pod)
- if not is_local_router:
- self.xjob_handler.configure_route(ctx, project_id,
- t_router_id)
-
- @_job_handle(constants.JT_CONFIGURE_ROUTE)
- def configure_route(self, ctx, payload):
- t_router_id = payload[constants.JT_CONFIGURE_ROUTE]
- t_client = self._get_client()
- t_router = t_client.get_routers(ctx, t_router_id)
- if not t_router:
- return
- if t_router.get('external_gateway_info'):
- t_ext_net_id = t_router['external_gateway_info']['network_id']
- else:
- t_ext_net_id = None
-
- non_vm_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_DVR_INTERFACE,
- q_constants.DEVICE_OWNER_ROUTER_SNAT,
- q_constants.DEVICE_OWNER_ROUTER_GW,
- q_constants.DEVICE_OWNER_DHCP,
- constants.DEVICE_OWNER_SHADOW]
- ew_attached_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_DVR_INTERFACE,
- q_constants.DEVICE_OWNER_ROUTER_GW]
- ns_attached_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
- q_constants.DEVICE_OWNER_DVR_INTERFACE]
-
- mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_router_id,
- constants.RT_ROUTER)
- if not mappings:
- b_pods, b_router_ids = [], []
- else:
- b_pods, b_router_ids = map(list, zip(*mappings))
- ns_mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_router_id, constants.RT_NS_ROUTER)
- b_ns_pdd, b_ns_router_id = None, None
- if ns_mappings:
- b_ns_pdd, b_ns_router_id = ns_mappings[0]
- b_pods.append(b_ns_pdd)
- b_router_ids.append(b_ns_router_id)
-
- router_ew_bridge_ip_map = {}
- router_ns_bridge_ip_map = {}
- router_ips_map = {}
-
- pod_subnet_nexthop_map = {} # {pod_name: {subnet_id: nexthop}
- subnet_cidr_map = {} # {subnet_id: cidr}
-
- for i, b_pod in enumerate(b_pods):
- is_ns_router = b_router_ids[i] == b_ns_router_id
- bottom_client = self._get_client(b_pod['region_name'])
- if is_ns_router:
- device_owner_filter = ns_attached_port_types
- else:
- device_owner_filter = ew_attached_port_types
- b_interfaces = bottom_client.list_ports(
- ctx, filters=[{'key': 'device_id',
- 'comparator': 'eq',
- 'value': b_router_ids[i]},
- {'key': 'device_owner',
- 'comparator': 'eq',
- 'value': device_owner_filter}])
- router_ips_map[b_router_ids[i]] = {}
- pod_subnet_nexthop_map[b_pod['region_name']] = {}
-
- for b_interface in b_interfaces:
- ip = b_interface['fixed_ips'][0]['ip_address']
- bridge_cidr = CONF.client.bridge_cidr
- if netaddr.IPAddress(ip) in netaddr.IPNetwork(bridge_cidr):
- if is_ns_router:
- # this ip is the default gateway ip for north-south
- # networking
- router_ns_bridge_ip_map[b_router_ids[i]] = ip
- else:
- # this ip is the next hop for east-west networking
- router_ew_bridge_ip_map[b_router_ids[i]] = ip
- continue
- b_net_id = b_interface['network_id']
- b_subnet_id = b_interface['fixed_ips'][0]['subnet_id']
-
- b_subnet = bottom_client.get_subnets(ctx, b_subnet_id)
- if b_subnet['gateway_ip'] != ip:
- # ip of the interface attached to the non local router is
- # different from the gateway ip, meaning that the interface
- # is for east-west traffic purpose, so we save necessary
- # information for next process
- pod_subnet_nexthop_map[
- b_pod['region_name']][b_subnet_id] = ip
- subnet_cidr_map[b_subnet_id] = b_subnet['cidr']
-
- b_ports = bottom_client.list_ports(
- ctx, filters=[{'key': 'network_id',
- 'comparator': 'eq',
- 'value': b_net_id}])
- b_vm_ports = [b_port for b_port in b_ports if b_port.get(
- 'device_owner', '') not in non_vm_port_types]
- ips = [vm_port['fixed_ips'][0][
- 'ip_address'] for vm_port in b_vm_ports]
- router_ips_map[b_router_ids[i]][b_subnet['cidr']] = ips
-
- # handle extra routes for east-west traffic
- for i, b_router_id in enumerate(b_router_ids):
- if b_router_id == b_ns_router_id:
- continue
- bottom_client = self._get_client(b_pods[i]['region_name'])
- extra_routes = []
- if not router_ips_map[b_router_id]:
- bottom_client.update_routers(
- ctx, b_router_id, {'router': {'routes': extra_routes}})
- continue
- for router_id, cidr_ips_map in six.iteritems(router_ips_map):
- if router_id == b_router_id:
- continue
- for cidr, ips in six.iteritems(cidr_ips_map):
- if router_ips_map[b_router_id].get(cidr):
- # if the ip list is not empty, meaning that there are
- # already vm ports in the pod of b_router, so no need
- # to add extra routes
- continue
- for ip in ips:
- route = {'nexthop': router_ew_bridge_ip_map[router_id],
- 'destination': ip + '/32'}
- extra_routes.append(route)
-
- if router_ns_bridge_ip_map and t_ext_net_id:
- extra_routes.append(
- {'nexthop': router_ns_bridge_ip_map.values()[0],
- 'destination': constants.DEFAULT_DESTINATION})
- bottom_client.update_routers(
- ctx, b_router_id, {'router': {'routes': extra_routes}})
-
- # configure host routes for local network attached to local router
- for (pod_name,
- subnet_nexthop_map) in pod_subnet_nexthop_map.items():
- for subnet_id, nexthop in subnet_nexthop_map.items():
- host_routes = []
- for _subnet_id, cidr in subnet_cidr_map.items():
- if _subnet_id in subnet_nexthop_map:
- continue
- host_routes.append({'destination': cidr,
- 'nexthop': nexthop})
- bottom_client = self._get_client(pod_name)
- bottom_client.update_subnets(
- ctx, subnet_id, {'subnet': {'host_routes': host_routes}})
-
- if not b_ns_router_id:
- # router for north-south networking not exist, skip extra routes
- # configuration for north-south router
- return
- if not t_ext_net_id:
- # router not attached to external gateway but router for north-
- # south networking exists, clear the extra routes
- bottom_client = self._get_client(b_ns_pdd['region_name'])
- bottom_client.update_routers(
- ctx, b_ns_router_id, {'router': {'routes': []}})
- return
-
- # handle extra routes for north-south router
- ip_bridge_ip_map = {}
- for router_id, cidr_ips_map in six.iteritems(router_ips_map):
- if router_id not in router_ew_bridge_ip_map:
- continue
- for cidr, ips in six.iteritems(cidr_ips_map):
- for ip in ips:
- nexthop = router_ew_bridge_ip_map[router_id]
- destination = ip + '/32'
- ip_bridge_ip_map[destination] = nexthop
-
- bottom_client = self._get_client(b_ns_pdd['region_name'])
- extra_routes = []
- for fixed_ip in ip_bridge_ip_map:
- extra_routes.append(
- {'nexthop': ip_bridge_ip_map[fixed_ip],
- 'destination': fixed_ip})
- bottom_client.update_routers(
- ctx, b_ns_router_id, {'router': {'routes': extra_routes}})
-
- @_job_handle(constants.JT_PORT_DELETE)
- def delete_server_port(self, ctx, payload):
- b_pod_id, b_port_id = payload[constants.JT_PORT_DELETE].split('#')
- b_pod = db_api.get_pod(ctx, b_pod_id)
- self._get_client(b_pod['region_name']).delete_ports(ctx, b_port_id)
-
- @staticmethod
- def _safe_create_security_group_rule(context, client, body):
- try:
- client.create_security_group_rules(context, body)
- except q_exceptions.Conflict:
- return
-
- @staticmethod
- def _safe_delete_security_group_rule(context, client, _id):
- try:
- client.delete_security_group_rules(context, _id)
- except q_exceptions.NotFound:
- return
-
- @staticmethod
- def _construct_bottom_rule(rule, sg_id, ip=None):
- ip = ip or rule['remote_ip_prefix']
- # if ip is passed, this is an extended rule for remote group
- return {'remote_group_id': None,
- 'direction': rule['direction'],
- 'remote_ip_prefix': ip,
- 'protocol': rule.get('protocol'),
- 'ethertype': rule['ethertype'],
- 'port_range_max': rule.get('port_range_max'),
- 'port_range_min': rule.get('port_range_min'),
- 'security_group_id': sg_id}
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- @_job_handle(constants.JT_SEG_RULE_SETUP)
- def configure_security_group_rules(self, ctx, payload):
- project_id = payload[constants.JT_SEG_RULE_SETUP]
- top_client = self._get_client()
- sg_filters = [{'key': 'tenant_id', 'comparator': 'eq',
- 'value': project_id}]
- top_sgs = top_client.list_security_groups(ctx, sg_filters)
- for top_sg in top_sgs:
- new_b_rules = []
- for t_rule in top_sg['security_group_rules']:
- if not t_rule['remote_group_id']:
- # leave sg_id empty here
- new_b_rules.append(
- self._construct_bottom_rule(t_rule, ''))
- continue
- if top_sg['name'] != 'default':
- # currently we only handle rules containing remote_group_id
- # for default security group
- continue
- if t_rule['ethertype'] != 'IPv4':
- continue
- subnets = top_client.list_subnets(
- ctx, [{'key': 'tenant_id', 'comparator': 'eq',
- 'value': project_id}])
- bridge_ip_net = netaddr.IPNetwork(CONF.client.bridge_cidr)
- subnet_cidr_set = set()
- for subnet in subnets:
- ip_net = netaddr.IPNetwork(subnet['cidr'])
- if ip_net in bridge_ip_net:
- continue
- # leave sg_id empty here.
- # Tricircle has not supported IPv6 well yet,
- # so we ignore seg rules temporarily.
- if subnet['ip_version'] == q_constants.IP_VERSION_4:
- if subnet['cidr'] in subnet_cidr_set:
- continue
- subnet_cidr_set.add(subnet['cidr'])
- new_b_rules.append(
- self._construct_bottom_rule(t_rule, '',
- subnet['cidr']))
-
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, top_sg['id'], constants.RT_SG)
- for pod, b_sg_id in mappings:
- client = self._get_client(pod['region_name'])
- b_sg = client.get_security_groups(ctx, b_sg_id)
- add_rules = []
- del_rules = []
- match_index = set()
- for b_rule in b_sg['security_group_rules']:
- match = False
- for i, rule in enumerate(new_b_rules):
- if self._compare_rule(b_rule, rule):
- match = True
- match_index.add(i)
- break
- if not match:
- del_rules.append(b_rule)
- for i, rule in enumerate(new_b_rules):
- if i not in match_index:
- add_rules.append(rule)
-
- for del_rule in del_rules:
- self._safe_delete_security_group_rule(
- ctx, client, del_rule['id'])
- if add_rules:
- rule_body = {'security_group_rules': []}
- for add_rule in add_rules:
- add_rule['security_group_id'] = b_sg_id
- rule_body['security_group_rules'].append(add_rule)
- self._safe_create_security_group_rule(
- ctx, client, rule_body)
-
- @_job_handle(constants.JT_NETWORK_UPDATE)
- def update_network(self, ctx, payload):
- """update bottom network
-
- if bottom pod id equal to POD_NOT_SPECIFIED, dispatch jobs for every
- mapped bottom pod via RPC, otherwise update network in the specified
- pod.
-
- :param ctx: tricircle context
- :param payload: dict whose key is JT_NETWORK_UPDATE and value
- is "bottom_pod_id#top_network_id"
- :return: None
- """
- (b_pod_id, t_network_id) = payload[
- constants.JT_NETWORK_UPDATE].split('#')
-
- t_client = self._get_client()
- t_network = t_client.get_networks(ctx, t_network_id)
- if not t_network:
- return
-
- project_id = t_network['tenant_id']
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_network_id, constants.RT_NETWORK)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- self.xjob_handler.update_network(
- ctx, project_id, t_network_id,
- b_pod['pod_id'])
- return
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_network_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_network_id, b_region_name, constants.RT_NETWORK)
- # name is not allowed to be updated, because it is used by
- # lock_handle to retrieve bottom/local resources that have been
- # created but not registered in the resource routing table
- body = {
- 'network': {
- 'description': t_network['description'],
- 'admin_state_up': t_network['admin_state_up'],
- 'shared': t_network['shared']
- }
- }
-
- if not t_network.get('qos_policy_id', None):
- body['network']['qos_policy_id'] = None
-
- try:
- b_client.update_networks(ctx, b_network_id, body)
- except q_cli_exceptions.NotFound:
- LOG.error('network: %(net_id)s not found,'
- 'pod name: %(name)s',
- {'net_id': b_network_id, 'name': b_region_name})
-
- @_job_handle(constants.JT_SUBNET_UPDATE)
- def update_subnet(self, ctx, payload):
- """update bottom subnet
-
- if bottom pod id equal to POD_NOT_SPECIFIED, dispatch jobs for every
- mapped bottom pod via RPC, otherwise update subnet in the specified
- pod.
-
- :param ctx: tricircle context
- :param payload: dict whose key is JT_SUBNET_UPDATE and value
- is "bottom_pod_id#top_subnet_id"
- :return: None
- """
- (b_pod_id, t_subnet_id) = payload[
- constants.JT_SUBNET_UPDATE].split('#')
-
- t_client = self._get_client()
- t_subnet = t_client.get_subnets(ctx, t_subnet_id)
- if not t_subnet:
- return
-
- project_id = t_subnet['tenant_id']
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_subnet_id, constants.RT_SUBNET)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- self.xjob_handler.update_subnet(ctx, project_id,
- t_subnet_id, b_pod['pod_id'])
- return
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_subnet_id, b_region_name, constants.RT_SUBNET)
- b_client = self._get_client(region_name=b_region_name)
- b_subnet = b_client.get_subnets(ctx, b_subnet_id)
- b_gateway_ip = b_subnet['gateway_ip']
-
- # we need to remove the bottom subnet gateway ip from the top subnet
- # allaction pools
- b_allocation_pools = helper.NetworkHelper.get_bottom_subnet_pools(
- t_subnet, b_gateway_ip)
-
- # bottom gateway_ip doesn't need to be updated, because it is reserved
- # by top pod.
- # name is not allowed to be updated, because it is used by
- # lock_handle to retrieve bottom/local resources that have been
- # created but not registered in the resource routing table
- body = {
- 'subnet':
- {'description': t_subnet['description'],
- 'enable_dhcp': t_subnet['enable_dhcp'],
- 'allocation_pools': b_allocation_pools,
- 'host_routes': t_subnet['host_routes'],
- 'dns_nameservers': t_subnet['dns_nameservers']}
- }
- try:
- b_client.update_subnets(ctx, b_subnet_id, body)
- except q_cli_exceptions.NotFound:
- LOG.error('subnet: %(subnet_id)s not found, '
- 'pod name: %(name)s',
- {'subnet_id': b_subnet_id, 'name': b_region_name})
-
- @_job_handle(constants.JT_SHADOW_PORT_SETUP)
- def setup_shadow_ports(self, ctx, payload):
- """Setup shadow ports for the target pod and network
-
- this job workes as following:
- (1) query all shadow ports from pods the target network is mapped to
- (2) query all real ports from pods the target network is mapped to
- (3) check the shadow ports and real ports in the target pod, create
- needed shadow ports
- (4) check the shadow ports and real ports in other pods, create a new
- job if the pod lacks some shadow ports
-
- :param ctx: tricircle context
- :param payload: {JT_SHADOW_PORT_SETUP: pod_id#network_id}
- :return: None
- """
- run_label = 'during shadow ports setup'
-
- (target_pod_id,
- t_net_id) = payload[constants.JT_SHADOW_PORT_SETUP].split('#')
- target_pod = db_api.get_pod(ctx, target_pod_id)
- t_client = self._get_client()
- t_net = t_client.get_networks(ctx, t_net_id)
- if not t_net:
- # we just end this job if top network no longer exists
- return
- project_id = t_net['tenant_id']
- mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_net_id,
- constants.RT_NETWORK)
- pod_ids = set([pod['pod_id'] for pod, _ in mappings])
- pod_port_ids_map = collections.defaultdict(set)
- pod_sw_port_ids_map = {}
- port_info_map = {}
- target_pod_nova_map = []
- if target_pod_id not in pod_ids:
- LOG.debug('Pod %s not found %s', target_pod_id, run_label)
- # network is not mapped to the specified pod, nothing to do
- return
- for b_pod, b_net_id in mappings:
- b_client = self._get_client(b_pod['region_name'])
- # port table has (network_id, device_owner) index
- b_sw_ports = b_client.list_ports(
- ctx, filters=[{'key': 'network_id', 'comparator': 'eq',
- 'value': b_net_id},
- {'key': 'device_owner', 'comparator': 'eq',
- 'value': constants.DEVICE_OWNER_SHADOW},
- {'key': 'fields', 'comparator': 'eq',
- 'value': ['id', 'status']}])
- b_sw_port_ids = set([port['id'] for port in b_sw_ports])
- if b_pod['pod_id'] == target_pod['pod_id']:
- b_down_sw_port_ids = set(
- [port['id'] for port in b_sw_ports if (
- port['status'] == q_constants.PORT_STATUS_DOWN)])
- pod_sw_port_ids_map[b_pod['pod_id']] = b_sw_port_ids
- # port table has (network_id, device_owner) index
- b_ports = b_client.list_ports(
- ctx, filters=[{'key': 'network_id', 'comparator': 'eq',
- 'value': b_net_id},
- {'key': 'fields', 'comparator': 'eq',
- 'value': ['id', 'binding:vif_type',
- 'binding:host_id', 'fixed_ips',
- 'device_owner', 'device_id',
- 'mac_address']}])
- LOG.debug('Shadow ports %s in pod %s %s',
- b_sw_ports, target_pod_id, run_label)
- LOG.debug('Ports %s in pod %s %s',
- b_ports, target_pod_id, run_label)
- for b_port in b_ports:
- if b_port['device_owner'] == constants.DEVICE_OWNER_NOVA:
- if b_pod['pod_id'] == target_pod_id:
- target_pod_nova_map.append(b_port['id'])
- if not self.helper.is_need_top_sync_port(
- b_port, cfg.CONF.client.bridge_cidr):
- continue
- if b_port['device_owner'] == constants.DEVICE_OWNER_SHADOW:
- continue
- b_port_id = b_port['id']
- pod_port_ids_map[b_pod['pod_id']].add(b_port_id)
- port_info_map[b_port_id] = b_port
-
- for target_nova_port in target_pod_nova_map:
- for pod_id in pod_port_ids_map:
- if pod_id != target_pod_id and \
- target_nova_port in pod_port_ids_map[pod_id]:
- pod_port_ids_map[pod_id] = \
- pod_port_ids_map[pod_id] - (target_nova_port, )
-
- all_port_ids = set()
- for port_ids in six.itervalues(pod_port_ids_map):
- all_port_ids |= port_ids
- sync_port_ids = all_port_ids - (
- pod_port_ids_map[target_pod_id] | pod_sw_port_ids_map[
- target_pod_id])
- sync_pod_list = []
- for pod_id in pod_port_ids_map:
- if pod_id == target_pod_id:
- continue
- if pod_port_ids_map[target_pod_id] - (
- pod_port_ids_map[pod_id] | pod_sw_port_ids_map[pod_id]):
- sync_pod_list.append(pod_id)
-
- LOG.debug('Sync port ids %s %s', sync_port_ids, run_label)
- LOG.debug('Sync pod ids %s %s', sync_pod_list, run_label)
-
- agent_info_map = {}
- port_bodys = []
- agents = []
- for port_id in sync_port_ids:
- port_body = port_info_map[port_id]
- host = port_body['binding:host_id']
- agent_type = self.helper.get_agent_type_by_vif(
- port_body['binding:vif_type'])
- if not agent_type:
- continue
- key = '%s#%s' % (host, agent_type)
- if key in agent_info_map:
- agent = agent_info_map[key]
- else:
- agent = db_api.get_agent_by_host_type(ctx, host, agent_type)
- if not agent:
- LOG.error('Agent of type %(agent_type)s in '
- 'host %(host)s not found during shadow '
- 'ports setup',
- {'agent_type': agent_type, 'host': host})
- continue
- agent_info_map[key] = agent
- port_bodys.append(port_body)
- agents.append(agent)
-
- max_bulk_size = CONF.client.max_shadow_port_bulk_size
- sw_port_ids = self.helper.prepare_shadow_ports(
- ctx, project_id, target_pod, t_net_id, port_bodys, agents,
- max_bulk_size)
- b_down_sw_port_ids = b_down_sw_port_ids | set(sw_port_ids)
- # value for key constants.PROFILE_FORCE_UP does not matter
- update_body = {
- 'port': {
- 'binding:profile': {constants.PROFILE_FORCE_UP: 'True'}
- }
- }
- for sw_port_id in b_down_sw_port_ids:
- self._get_client(target_pod['region_name']).update_ports(
- ctx, sw_port_id, update_body)
-
- for pod_id in sync_pod_list:
- self.xjob_handler.setup_shadow_ports(ctx, project_id,
- pod_id, t_net_id)
-
- def _get_bottom_need_created_subports(self, ctx, t_ctx, project_id,
- trunk_id, add_subport_ids):
- t_client = self._get_client()
- need_created_ports = []
- port_filters = [{'key': 'device_id',
- 'comparator': 'eq',
- 'value': trunk_id},
- {'key': 'device_owner',
- 'comparator': 'eq',
- 'value': constants.DEVICE_OWNER_SUBPORT}
- ]
- trunk_subports = t_client.list_ports(ctx, filters=port_filters)
- map_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': constants.RT_PORT},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
-
- port_mappings = db_api.list_resource_routings(t_ctx, map_filters)
- mapping_port_ids = [port['top_id'] for port in port_mappings]
- pop_attrs = ['status', 'tags', 'updated_at',
- 'created_at', 'revision_number', 'id']
- for port in trunk_subports:
- if (port['id'] in add_subport_ids
- and port['id'] not in mapping_port_ids):
- port['device_id'] = port['id']
- # pop attributes which not allowed in POST
- for attr in pop_attrs:
- port.pop(attr, None)
- need_created_ports.append(port)
-
- return need_created_ports
-
- def _create_trunk_subport_mapping(self, t_ctx, project_id, pod, ports):
- entries = []
- for port in ports:
- port['id'] = port['device_id']
- entries.extend(self.helper.extract_resource_routing_entries(port))
- self.helper.ensure_resource_mapping(t_ctx, project_id, pod, entries)
-
- def _create_bottom_trunk_subports(self, ctx, target_pod,
- full_create_bodys, max_bulk_size):
- cursor = 0
- ret_port_ids = []
- b_client = self._get_client(target_pod['region_name'])
- while cursor < len(full_create_bodys):
- ret_port_ids.extend(self.helper.prepare_ports_with_retry(
- ctx, b_client,
- full_create_bodys[cursor: cursor + max_bulk_size]))
- cursor += max_bulk_size
- return ret_port_ids
-
- @_job_handle(constants.JT_TRUNK_SYNC)
- def sync_trunk(self, ctx, payload):
- b_pod_id, t_trunk_id = payload[constants.JT_TRUNK_SYNC].split('#')
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_trunk_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_trunk_id, b_region_name, constants.RT_TRUNK)
- if not b_trunk_id:
- return
- t_client = self._get_client()
- t_trunk = t_client.get_trunks(ctx, t_trunk_id)
- # delete trunk action
- if not t_trunk:
- b_client.delete_trunks(ctx, b_trunk_id)
- db_api.delete_mappings_by_top_id(ctx, t_trunk_id)
- return
-
- # update trunk action
- b_trunk = b_client.get_trunks(ctx, b_trunk_id)
-
- if not b_trunk:
- LOG.error('trunk: %(trunk_id)s not found, pod name: %(name)s',
- {'trunk_id': b_trunk_id, 'name': b_region_name})
- return
-
- body = {
- 'trunk':
- {'description': t_trunk['description'],
- 'admin_state_up': t_trunk['admin_state_up']}
- }
-
- t_subports = set(
- [(subport['port_id'],
- subport['segmentation_id']) for subport in t_trunk['sub_ports']])
- b_subports = set(
- [(subport['port_id'],
- subport['segmentation_id']) for subport in b_trunk['sub_ports']])
- add_subports = t_subports - b_subports
- del_subports = b_subports - t_subports
-
- add_subport_bodies = [
- {'port_id': subport[0],
- 'segmentation_type': 'vlan',
- 'segmentation_id': subport[1]} for subport in add_subports]
-
- del_subport_bodies = [
- {'port_id': subport[0]} for subport in del_subports]
-
- try:
- b_client.update_trunks(ctx, b_trunk_id, body)
- # must first delete subports, then add subports, otherwise it
- # will lead to the addition of existing subports
- if del_subport_bodies:
- b_client.action_trunks(ctx, 'remove_subports', b_trunk_id,
- {'sub_ports': del_subport_bodies})
-
- # create bottom ports bulk
- if add_subport_bodies:
- project_id = t_trunk['project_id']
- t_ctx = t_context.get_context_from_neutron_context(ctx)
- max_bulk_size = CONF.client.max_trunk_subports_bulk_size
- add_subport_ids = [
- subport['port_id'] for subport in add_subport_bodies]
- need_created_ports = self._get_bottom_need_created_subports(
- ctx, t_ctx, project_id, t_trunk_id, add_subport_ids)
- if need_created_ports:
- self._create_bottom_trunk_subports(
- ctx, b_pod, need_created_ports, max_bulk_size)
- self._create_trunk_subport_mapping(
- ctx, project_id, b_pod, need_created_ports)
- self.xjob_handler.configure_security_group_rules(
- t_ctx, project_id)
-
- b_client.action_trunks(ctx, 'add_subports', b_trunk_id,
- {'sub_ports': add_subport_bodies})
- except q_cli_exceptions.NotFound:
- LOG.error('trunk: %(trunk_id)s not found, pod name: %(name)s',
- {'trunk_id': b_trunk_id, 'name': b_region_name})
-
- def _delete_port_pair_by_ingress(self, ctx, b_client, ingress, project_id):
- filters = [{'key': 'ingress',
- 'comparator': 'eq',
- 'value': ingress},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}
- ]
- pps = b_client.list_port_pairs(ctx, filters=filters)
- if not pps:
- return
- self._delete_bottom_resource_by_id(
- ctx, constants.RT_PORT_PAIR, pps[0]['id'],
- b_client, project_id)
-
- def _delete_flow_classifier_by_src_port(self, ctx, b_client,
- port_id, project_id):
- filters = [{'key': 'logical_source_port',
- 'comparator': 'eq',
- 'value': port_id},
- {'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}
- ]
- fcs = b_client.list_flow_classifiers(ctx, filters=filters)
- if not fcs:
- return
- self._delete_bottom_resource_by_id(
- ctx, constants.RT_FLOW_CLASSIFIER, fcs[0]['id'],
- b_client, project_id)
-
- def _delete_portchain_by_fc_id(self, ctx, b_client, fc_id, project_id):
- filters = [{'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
- pcs = b_client.list_port_chains(ctx, filters=filters)
- for pc in pcs:
- if fc_id in pc['flow_classifiers']:
- self._delete_bottom_resource_by_id(
- ctx, constants.RT_PORT_CHAIN, pc['id'],
- b_client, project_id)
- return
-
- def _clear_bottom_portpairgroup_portpairs(self, ctx, b_client,
- pp_ids, project_id):
- filters = [{'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
- ppgs = b_client.list_port_pair_groups(ctx, filters=filters)
- for pp_id in pp_ids:
- for ppg in ppgs:
- if pp_id in ppg['port_pairs']:
- ppg_body = {'port_pair_group': {
- 'port_pairs': []
- }}
- b_client.update_port_pair_groups(ctx, ppg['id'], ppg_body)
- break
-
- def _delete_bottom_resource_by_id(self, ctx,
- res_type, res_id, b_client, project_id):
- try:
- b_client.delete_resources(res_type, ctx, res_id)
- except q_cli_exceptions.NotFound:
- LOG.debug(('%(res_type)s: %(id)s not found, '
- 'region name: %(name)s'),
- {'res_type': res_type,
- 'id': res_id,
- 'name': b_client.region_name})
- except q_cli_exceptions.Conflict as e:
- if constants.STR_IN_USE in e.message:
- LOG.debug(('%(res_type)s: %(id)s in use, '
- 'region name: %(name)s'),
- {'res_type': res_type,
- 'id': res_id,
- 'name': b_client.region_name})
- if res_type == constants.RT_FLOW_CLASSIFIER:
- self._delete_portchain_by_fc_id(
- ctx, b_client, res_id, project_id)
- self._delete_bottom_resource_by_id(
- ctx, constants.RT_FLOW_CLASSIFIER,
- res_id, b_client, project_id)
- # we are deleting the port pair, meaning that the port pair
- # should be no longer used, so we remove it from
- # its port pair group, if any.
- elif res_type == constants.RT_PORT_PAIR:
- self._clear_bottom_portpairgroup_portpairs(
- ctx, b_client, [res_id], project_id)
- self._delete_bottom_resource_by_id(
- ctx, constants.RT_PORT_PAIR,
- res_id, b_client, project_id)
- # conflict exception is not expected to be raised when
- # deleting port pair group, because port pair group is only
- # deleted during resource recycling, and we guarantee that
- # its port chain will be deleted before.
- # and, deleting port chain will not raise conflict exception
- else:
- raise
- else:
- raise
- db_api.delete_mappings_by_bottom_id(ctx, res_id)
-
- @_job_handle(constants.JT_RESOURCE_RECYCLE)
- def recycle_resources(self, ctx, payload):
- project_id = payload[constants.JT_RESOURCE_RECYCLE]
- filters = [{'key': 'project_id',
- 'comparator': 'eq',
- 'value': project_id}]
- resources = db_api.list_recycle_resources(ctx, filters)
- if not resources:
- return
- max_retries = 4
- # recycle_resources is triggered at the end of the
- # sync_service_function_chain function, need to consider the
- # situation which recycle_resources has been run but
- # sync_service_function_chain function has not ended.
- filters = [{'key': 'type',
- 'comparator': 'eq',
- 'value': constants.JT_SFC_SYNC}]
- for i in range(max_retries):
- sync_sfc_job = db_api.list_jobs(ctx, filters)
- if sync_sfc_job:
- if i == max_retries - 1:
- return
- time.sleep(5)
-
- res_map = collections.defaultdict(list)
- for res in resources:
- res_map[res['resource_type']].append(res['resource_id'])
-
- resource_types = [constants.RT_PORT_CHAIN,
- constants.RT_FLOW_CLASSIFIER,
- constants.RT_PORT_PAIR_GROUP,
- constants.RT_PORT_PAIR]
-
- for res_type in resource_types:
- for res_id in res_map[res_type]:
- b_resources = db_api.get_bottom_mappings_by_top_id(
- ctx, res_id, res_type)
- for b_pod, b_res_id in b_resources:
- b_client = self._get_client(b_pod['region_name'])
- self._delete_bottom_resource_by_id(
- ctx, res_type, b_res_id, b_client, ctx.project_id)
- db_api.delete_recycle_resource(ctx, res_id)
-
- def _prepare_sfc_bottom_element(self, ctx, project_id, b_pod, ele,
- res_type, body, b_client, **kwargs):
- max_retries = 2
- for i in range(max_retries):
- try:
- _, b_res_id = self.helper.prepare_bottom_element(
- ctx, project_id, b_pod, ele, res_type, body)
- return b_res_id
- except q_cli_exceptions.BadRequest as e:
- if i == max_retries - 1:
- raise
- if (constants.STR_USED_BY not in e.message and
- constants.STR_CONFLICTS_WITH not in e.message):
- raise
- if res_type == constants.RT_PORT_PAIR:
- self._delete_port_pair_by_ingress(
- ctx, b_client, kwargs['ingress'], project_id)
- elif res_type == constants.RT_FLOW_CLASSIFIER:
- self._delete_flow_classifier_by_src_port(
- ctx, b_client, kwargs['logical_source_port'],
- project_id)
- else:
- raise
- except q_cli_exceptions.Conflict as e:
- if i == max_retries - 1:
- raise
- if constants.STR_IN_USE not in e.message:
- raise
- if res_type == constants.RT_PORT_PAIR_GROUP:
- self._clear_bottom_portpairgroup_portpairs(
- ctx, b_client, kwargs['port_pairs'], project_id)
- elif res_type == constants.RT_PORT_CHAIN:
- self._delete_portchain_by_fc_id(
- ctx, b_client, kwargs['fc_id'], project_id)
- else:
- raise
-
- @_job_handle(constants.JT_SFC_SYNC)
- def sync_service_function_chain(self, ctx, payload):
- (b_pod_id, t_port_chain_id, net_id) = payload[
- constants.JT_SFC_SYNC].split('#')
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- if net_id:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, net_id, constants.RT_NETWORK)
- elif t_port_chain_id:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_port_chain_id, constants.RT_PORT_CHAIN)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- self.xjob_handler.sync_service_function_chain(
- ctx, ctx.project_id, t_port_chain_id,
- net_id, b_pod['pod_id'])
- return
-
- # abbreviation, pp: port pair, ppg: port pair group,
- # pc: port chain, fc: flow classifier
- t_client = self._get_client()
- t_pc = t_client.get_port_chains(ctx, t_port_chain_id)
- b_pod = db_api.get_pod(ctx, b_pod_id)
- region_name = b_pod['region_name']
- b_client = self._get_client(region_name)
- # delete action
- if not t_pc:
- self.xjob_handler.recycle_resources(ctx, ctx.project_id)
- return
-
- t_pps = {}
- t_ppgs = []
- for ppg_id in t_pc['port_pair_groups']:
- ppg = t_client.get_port_pair_groups(ctx, ppg_id)
- if not ppg:
- LOG.error('port pair group: %(ppg_id)s not found, '
- 'pod name: %(name)s', {'ppg_id': ppg_id,
- 'name': region_name})
- raise
- t_ppgs.append(ppg)
-
- for ppg in t_ppgs:
- filters = [{'key': 'portpairgroup_id',
- 'comparator': 'eq',
- 'value': ppg['id']}]
- pp = t_client.list_port_pairs(ctx, filters=filters)
- if pp:
- t_pps[ppg['id']] = pp
- b_pp_ids = {}
- for key, value in six.iteritems(t_pps):
- b_pp_ids[key] = []
- for pp in value:
- pp_id = pp.pop('id')
- b_pp_id = self._prepare_sfc_bottom_element(
- ctx, pp['project_id'], b_pod, {'id': pp_id},
- constants.RT_PORT_PAIR, {'port_pair': pp}, b_client,
- ingress=pp['ingress'])
- pp_body = {'port_pair': {
- 'name': pp['name'],
- 'description': pp['description']}}
- try:
- b_client.update_port_pairs(ctx, b_pp_id, pp_body)
- except q_cli_exceptions.NotFound:
- LOG.Error(('port pair: %(pp_id)s not found,'
- 'region name: %(name)s'),
- {'pp_id': pp_id, 'name': region_name})
- raise
- b_pp_ids[key].append(b_pp_id)
-
- b_ppg_ids = []
- for ppg in t_ppgs:
- ppg['port_pairs'] = b_pp_ids.get(ppg['id'], [])
- ppg_id = ppg.pop('id')
- ppg.pop('group_id')
- b_ppg_id = self._prepare_sfc_bottom_element(
- ctx, ppg['project_id'], b_pod, {'id': ppg_id},
- constants.RT_PORT_PAIR_GROUP, {'port_pair_group': ppg},
- b_client, port_pairs=ppg['port_pairs'])
- ppg_body = {'port_pair_group': {
- 'name': ppg['name'],
- 'description': ppg['description'],
- 'port_pairs': ppg['port_pairs']
- }}
- try:
- b_client.update_port_pair_groups(ctx, b_ppg_id, ppg_body)
- except q_cli_exceptions.NotFound:
- LOG.Error(('port pair group: %(t_ppg_id)s not found,'
- 'region name: %(name)s'),
- {'t_ppg_id': ppg_id, 'name': region_name})
- raise
- b_ppg_ids.append(b_ppg_id)
-
- b_fc_ids = []
- for fc_id in t_pc['flow_classifiers']:
- fc = t_client.get_flow_classifiers(ctx, fc_id)
- if fc:
- fc_id = fc.pop('id')
- b_fc_id = self._prepare_sfc_bottom_element(
- ctx, ppg['project_id'], b_pod, {'id': fc_id},
- constants.RT_FLOW_CLASSIFIER, {'flow_classifier': fc},
- b_client, logical_source_port=fc['logical_source_port'])
- fc_body = {'flow_classifier': {
- 'name': fc['name'],
- 'description': fc['description']
- }}
- try:
- b_client.update_flow_classifiers(ctx, b_fc_id, fc_body)
- except q_cli_exceptions.NotFound:
- LOG.Error(('flow classifier: %(fc_id)s not found,'
- 'region name: %(name)s'),
- {'fc_id': fc_id, 'name': region_name})
- raise
- b_fc_ids.append(b_fc_id)
-
- t_pc.pop('id')
- t_pc['port_pair_groups'] = b_ppg_ids
- t_pc['flow_classifiers'] = b_fc_ids
- b_pc_id = self._prepare_sfc_bottom_element(
- ctx, t_pc['project_id'], b_pod, {'id': t_port_chain_id},
- constants.RT_PORT_CHAIN, {'port_chain': t_pc}, b_client,
- fc_id=b_fc_ids[0] if b_fc_ids else None)
- pc_body = {'port_chain': {
- 'name': t_pc['name'],
- 'description': t_pc['description'],
- 'port_pair_groups': t_pc['port_pair_groups'],
- 'flow_classifiers': t_pc['flow_classifiers']
- }}
-
- try:
- b_client.update_port_chains(ctx, b_pc_id, pc_body)
- except q_cli_exceptions.NotFound:
- LOG.Error(('port chain: %(pc_id)s not found, '
- 'region name: %(name)s'),
- {'pc_id': t_port_chain_id, 'name': region_name})
- raise
-
- self.xjob_handler.recycle_resources(ctx, t_pc['project_id'])
-
- @_job_handle(constants.JT_QOS_CREATE)
- def create_qos_policy(self, ctx, payload):
- (b_pod_id, t_policy_id, res_type, res_id) = payload[
- constants.JT_QOS_CREATE].split('#')
-
- t_client = self._get_client()
- t_policy = t_client.get_qos_policies(ctx, t_policy_id)
-
- if not t_policy:
- # we just end this job if top policy no longer exists
- return
-
- project_id = t_policy['tenant_id']
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, res_id, res_type)
- for b_pod, _ in mappings:
- self.xjob_handler.create_qos_policy(ctx, project_id,
- t_policy_id,
- b_pod['pod_id'],
- res_type,
- res_id)
- return
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
-
- body = {
- 'policy': {
- 'description': t_policy.get('description', ''),
- 'tenant_id': t_policy.get('tenant_id', ''),
- 'project_id': t_policy.get('project_id', ''),
- 'shared': t_policy.get('shared', False),
- 'name': t_policy.get('name', '')
- }
- }
-
- try:
- _, b_policy_id = self.helper.prepare_bottom_element(
- ctx, project_id, b_pod, t_policy, constants.RT_QOS, body)
- if res_id:
- if res_type == constants.RT_NETWORK:
- body = {
- "network": {
- "qos_policy_id": b_policy_id
- }
- }
- b_client.update_networks(ctx, res_id, body)
- if res_type == constants.RT_PORT:
- body = {
- "port": {
- "qos_policy_id": b_policy_id
- }
- }
- b_client.update_ports(ctx, res_id, body)
- if t_policy['rules']:
- self.xjob_handler.sync_qos_policy_rules(
- ctx, project_id, t_policy_id)
- except q_cli_exceptions.NotFound:
- LOG.error('qos policy: %(policy_id)s not found,'
- 'pod name: %(name)s',
- {'policy_id': t_policy_id, 'name': b_region_name})
-
- @_job_handle(constants.JT_QOS_UPDATE)
- def update_qos_policy(self, ctx, payload):
- (b_pod_id, t_policy_id) = payload[
- constants.JT_QOS_UPDATE].split('#')
-
- t_client = self._get_client()
- t_policy = t_client.get_qos_policies(ctx, t_policy_id)
- if not t_policy:
- return
- project_id = t_policy['tenant_id']
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_policy_id, constants.RT_QOS)
- for b_pod, _ in mappings:
- self.xjob_handler.update_qos_policy(ctx, project_id,
- t_policy_id,
- b_pod['pod_id'])
- return
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_policy_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_policy_id, b_region_name, constants.RT_QOS)
-
- if not b_policy_id:
- return
-
- body = {
- 'policy': {
- 'description': t_policy.get('description', ''),
- 'shared': t_policy.get('shared', ''),
- 'name': t_policy.get('name', '')
- }
- }
-
- try:
- b_client.update_qos_policies(ctx, b_policy_id, body)
- except q_cli_exceptions.NotFound:
- LOG.error('qos policy: %(policy_id)s not found,'
- 'pod name: %(name)s',
- {'policy_id': t_policy_id, 'name': b_region_name})
-
- @_job_handle(constants.JT_QOS_DELETE)
- def delete_qos_policy(self, ctx, payload):
- (b_pod_id, t_policy_id) = payload[
- constants.JT_QOS_DELETE].split('#')
-
- project_id = ctx.tenant_id
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_policy_id, constants.RT_QOS)
- for b_pod, _ in mappings:
- self.xjob_handler.delete_qos_policy(ctx, project_id,
- t_policy_id,
- b_pod['pod_id'])
- return
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
- b_region_name = b_pod['region_name']
- b_client = self._get_client(region_name=b_region_name)
- b_policy_id = db_api.get_bottom_id_by_top_id_region_name(
- ctx, t_policy_id, b_region_name, constants.RT_QOS)
-
- try:
- b_client.delete_qos_policies(ctx, b_policy_id)
- db_api.delete_mappings_by_bottom_id(ctx, b_policy_id)
- except q_cli_exceptions.NotFound:
- LOG.error('qos policy: %(policy_id)s not found,'
- 'pod name: %(name)s',
- {'policy_id': t_policy_id, 'name': b_region_name})
-
- @staticmethod
- def _safe_create_policy_rule(
- t_context, client, rule_type, policy_id, body):
- try:
- return getattr(client, 'create_%s_rules' % rule_type)(
- t_context, policy_id, body)
- except q_exceptions.Conflict:
- return
-
- @staticmethod
- def _safe_get_policy_rule(t_context, client, rule_type, rule_id,
- policy_id):
- combine_id = '%s#%s' % (rule_id, policy_id)
- return getattr(client, 'get_%s_rules' % rule_type)(
- t_context, combine_id)
-
- @staticmethod
- def _safe_delete_policy_rule(t_context, client, rule_type, rule_id,
- policy_id):
- combine_id = '%s#%s' % (rule_id, policy_id)
- getattr(client, 'delete_%s_rules' % rule_type)(
- t_context, combine_id)
-
- @staticmethod
- def _construct_bottom_bandwidth_limit_rule(t_rule):
- return {
- "max_kbps": t_rule["max_kbps"],
- "max_burst_kbps": t_rule["max_burst_kbps"]
- }
-
- @staticmethod
- def _construct_bottom_dscp_marking_rule(t_rule):
- return {
- "dscp_mark": t_rule["dscp_mark"]
- }
-
- @staticmethod
- def _construct_bottom_minimum_bandwidth_rule(t_rule):
- return {
- "min_kbps": t_rule["min_kbps"],
- "direction": t_rule["direction"]
- }
-
- def _construct_bottom_policy_rule(self, rule_type, rule_data):
- return getattr(self, '_construct_bottom_%s_rule' % rule_type)(
- rule_data)
-
- @staticmethod
- def _compare_bandwidth_limit_rule(rule1, rule2):
- for key in ('max_kbps', 'max_burst_kbps'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- @staticmethod
- def _compare_dscp_marking_rule(rule1, rule2):
- for key in ('dscp_mark',):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- @staticmethod
- def _compare_minimum_bandwidth_rule(rule1, rule2):
- for key in ('min_kbps', 'direction'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- def _compare_policy_rule(self, rule_type, rule1, rule2):
- return getattr(self, '_compare_%s_rule' % rule_type)(rule1, rule2)
-
- @_job_handle(constants.JT_SYNC_QOS_RULE)
- def sync_qos_policy_rules(self, ctx, payload):
- policy_id = payload[constants.JT_SYNC_QOS_RULE]
- top_client = self._get_client()
-
- bandwidth_limit_rules = \
- top_client.list_bandwidth_limit_rules(ctx, filters=[{
- 'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
- dscp_marking_rules = \
- top_client.list_dscp_marking_rules(ctx, filters=[{
- 'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
- minimum_bandwidth_rules = \
- top_client.list_minimum_bandwidth_rules(ctx, filters=[{
- 'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, policy_id, constants.RT_QOS)
-
- self._sync_policy_rules(
- ctx, mappings, 'bandwidth_limit_rule', bandwidth_limit_rules)
-
- self._sync_policy_rules(
- ctx, mappings, 'dscp_marking_rule', dscp_marking_rules)
-
- self._sync_policy_rules(
- ctx, mappings, 'minimum_bandwidth_rule', minimum_bandwidth_rules)
-
- def _sync_policy_rules(self, ctx, mappings, rule_type, rules):
- if rule_type == 'bandwidth_limit_rule':
- rule_types = 'bandwidth_limit_rules'
- prefix = 'bandwidth_limit'
- elif rule_type == 'dscp_marking_rule':
- rule_types = 'dscp_marking_rules'
- prefix = 'dscp_marking'
- else:
- rule_types = 'minimum_bandwidth_rules'
- prefix = 'minimum_bandwidth'
-
- new_b_rules = []
- for t_rule in rules:
- new_b_rules.append(
- getattr(self, '_construct_bottom_%s' % rule_type)(t_rule))
-
- for pod, b_policy_id in mappings:
- client = self._get_client(pod['region_name'])
- b_rules = getattr(client, 'list_%s' % rule_types)(
- ctx, filters=[{'key': 'policy_id',
- 'comparator': 'eq',
- 'value': b_policy_id}])
- add_rules = []
- del_rules = []
- match_index = set()
- for b_rule in b_rules:
- match = False
- for i, rule in enumerate(new_b_rules):
- if getattr(self, '_compare_%s' % rule_type)(b_rule, rule):
- match = True
- match_index.add(i)
- break
- if not match:
- del_rules.append(b_rule)
- for i, rule in enumerate(new_b_rules):
- if i not in match_index:
- add_rules.append(rule)
-
- for del_rule in del_rules:
- self._safe_delete_policy_rule(
- ctx, client, prefix, del_rule['id'],
- b_policy_id)
- if add_rules:
- for new_rule in add_rules:
- rule_body = {rule_type: new_rule}
- self._safe_create_policy_rule(
- ctx, client, prefix,
- b_policy_id, rule_body)
diff --git a/tricircle/xjob/xservice.py b/tricircle/xjob/xservice.py
deleted file mode 100644
index 35d76b2f..00000000
--- a/tricircle/xjob/xservice.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import random
-import sys
-
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging as messaging
-from oslo_service import service as srv
-
-from tricircle.common.i18n import _
-
-from tricircle.common import baserpc
-from tricircle.common import context
-from tricircle.common import rpc
-from tricircle.common import version
-
-
-from tricircle.common import serializer as t_serializer
-
-from tricircle.common import topics
-from tricircle.xjob import xmanager as t_xmanager
-
-
-_TIMER_INTERVAL = 30
-_TIMER_INTERVAL_MAX = 60
-
-common_opts = [
- cfg.StrOpt('host', default='tricircle.xhost',
- help=_("The host name for RPC server")),
- cfg.IntOpt('workers', default=1,
- help=_("Number of workers")),
- cfg.IntOpt('worker_handle_timeout', default=1800,
- help=_("Timeout for worker's one turn of processing, in"
- " seconds")),
- cfg.IntOpt('job_run_expire', default=180,
- help=_("Running job is considered expires after this time, in"
- " seconds")),
- cfg.FloatOpt('worker_sleep_time', default=0.1,
- help=_("Seconds a worker sleeps after one run in a loop")),
- cfg.IntOpt('redo_time_span', default=172800,
- help=_("Time span in seconds, we calculate the latest job "
- "timestamp by subtracting this time span from the "
- "current timestamp, jobs created between these two "
- "timestamps will be redone")),
- cfg.BoolOpt('enable_api_gateway',
- default=False,
- help=_('Whether the Nova API gateway is enabled'))
-]
-
-service_opts = [
- cfg.IntOpt('report_interval',
- default=10,
- help='Seconds between nodes reporting state to datastore'),
- cfg.BoolOpt('periodic_enable',
- default=True,
- help='Enable periodic tasks'),
- cfg.IntOpt('periodic_fuzzy_delay',
- default=60,
- help='Range of seconds to randomly delay when starting the'
- ' periodic task scheduler to reduce stampeding.'
- ' (Disable by setting to 0)'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(service_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-class XService(srv.Service):
-
- """class Service
-
- Service object for binaries running on hosts.
- A service takes a manager and enables rpc by listening to queues based
- on topic. It also periodically runs tasks on the manager and reports
- its state to the database services table.
- """
-
- def __init__(self, host, binary, topic, manager, report_interval=None,
- periodic_enable=None, periodic_fuzzy_delay=None,
- periodic_interval_max=None, serializer=None,
- *args, **kwargs):
- super(XService, self).__init__()
- self.host = host
- self.binary = binary
- self.topic = topic
- self.manager = manager
- self.rpc_server = None
- self.report_interval = report_interval
- self.periodic_enable = periodic_enable
- self.periodic_fuzzy_delay = periodic_fuzzy_delay
- self.interval_max = periodic_interval_max
- self.serializer = serializer
- self.saved_args, self.saved_kwargs = args, kwargs
-
- def start(self):
- ver_str = version.version_info
- LOG.info('Starting %(topic)s node (version %(version)s)',
- {'topic': self.topic, 'version': ver_str})
-
- self.manager.init_host()
- self.manager.pre_start_hook()
-
- LOG.debug(_("Creating RPC server for service %s"), self.topic)
-
- target = messaging.Target(topic=self.topic, server=self.host)
-
- endpoints = [
- self.manager,
- baserpc.BaseServerRPCAPI(self.manager.service_name)
- ]
- endpoints.extend(self.manager.additional_endpoints)
-
- self.rpc_server = rpc.get_server(target, endpoints, self.serializer)
-
- self.rpc_server.start()
-
- self.manager.post_start_hook()
-
- if self.periodic_enable:
- if self.periodic_fuzzy_delay:
- initial_delay = random.randint(0, self.periodic_fuzzy_delay)
- else:
- initial_delay = None
-
- self.tg.add_dynamic_timer(self.periodic_tasks,
- initial_delay=initial_delay,
- periodic_interval_max=self.interval_max)
-
- def __getattr__(self, key):
- manager = self.__dict__.get('manager', None)
- return getattr(manager, key)
-
- @classmethod
- def create(cls, host=None, binary=None, topic=None, manager=None,
- report_interval=None, periodic_enable=None,
- periodic_fuzzy_delay=None, periodic_interval_max=None,
- serializer=None,):
-
- """Instantiates class and passes back application object.
-
- :param host: defaults to CONF.host
- :param binary: defaults to basename of executable
- :param topic: defaults to bin_name - 'nova-' part
- :param manager: defaults to CONF._manager
- :param report_interval: defaults to CONF.report_interval
- :param periodic_enable: defaults to CONF.periodic_enable
- :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
- :param periodic_interval_max: if set, the max time to wait between runs
- """
-
- if not host:
- host = CONF.host
- if not binary:
- binary = os.path.basename(sys.argv[0])
- if not topic:
- topic = binary.rpartition('tricircle-')[2]
- if not manager:
- manager_cls = ('%s_manager' %
- binary.rpartition('tricircle-')[2])
- manager = CONF.get(manager_cls, None)
- if report_interval is None:
- report_interval = CONF.report_interval
- if periodic_enable is None:
- periodic_enable = CONF.periodic_enable
- if periodic_fuzzy_delay is None:
- periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
-
- service_obj = cls(host, binary, topic, manager,
- report_interval=report_interval,
- periodic_enable=periodic_enable,
- periodic_fuzzy_delay=periodic_fuzzy_delay,
- periodic_interval_max=periodic_interval_max,
- serializer=serializer)
-
- return service_obj
-
- def kill(self):
- self.stop()
-
- def stop(self):
- try:
- self.rpc_server.stop()
- except Exception:
- pass
-
- try:
- self.manager.cleanup_host()
- except Exception:
- LOG.exception('Service error occurred during cleanup_host')
- pass
-
- super(XService, self).stop()
-
- def periodic_tasks(self, raise_on_error=False):
- """Tasks to be run at a periodic interval."""
- ctxt = context.get_admin_context()
- return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
-
-
-def create_service():
-
- LOG.debug(_('create xjob server'))
-
- xmanager = t_xmanager.XManager()
- xservice = XService(
- host=CONF.host,
- binary="xjob",
- topic=topics.TOPIC_XJOB,
- manager=xmanager,
- periodic_enable=True,
- report_interval=_TIMER_INTERVAL,
- periodic_interval_max=_TIMER_INTERVAL_MAX,
- serializer=t_serializer.TricircleSerializer()
- )
-
- xservice.start()
-
- return xservice
-
-
-_launcher = None
-
-
-def serve(xservice, workers=1):
- global _launcher
- if _launcher:
- raise RuntimeError(_('serve() can only be called once'))
-
- _launcher = srv.ProcessLauncher(CONF, restart_method='mutate')
- _launcher.launch_service(xservice, workers=workers)
-
-
-def wait():
- _launcher.wait()