From 449a1f66c925057dea523867de176edff33eb515 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 15:43:12 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I161dc093726c3c8cce5a7057282743ca4908eb57 --- .coveragerc | 7 - .gitignore | 32 - .gitreview | 4 - .mailmap | 11 - .pylintrc | 112 --- .testr.conf | 8 - CONTRIBUTING.rst | 13 - HACKING.rst | 33 - LICENSE | 176 ----- README | 14 + README.rst | 30 - TESTING.rst | 195 ----- babel.cfg | 2 - devstack/README.rst | 186 ----- devstack/devstackgaterc | 113 --- devstack/entry_points | 369 --------- devstack/files/debs/networking-odl | 1 - devstack/files/rpms/networking-odl | 1 - devstack/functions | 159 ---- devstack/local.conf.example | 109 --- ...etvirt-impl-config_netvirt-impl-config.xml | 5 - devstack/odl-releases/boron-0.5.0 | 1 - devstack/odl-releases/boron-0.5.1-SR1 | 1 - devstack/odl-releases/boron-0.5.2-SR2 | 0 devstack/odl-releases/boron-0.5.3-SR3 | 0 devstack/odl-releases/boron-0.5.4-SR4 | 0 devstack/odl-releases/boron-snapshot-0.5 | 0 devstack/odl-releases/boron-snapshot-0.5.5 | 0 devstack/odl-releases/carbon-0.6.0 | 0 devstack/odl-releases/carbon-0.6.1-SR1 | 0 devstack/odl-releases/carbon-snapshot-0.6 | 0 devstack/odl-releases/carbon-snapshot-0.6.2 | 0 devstack/odl-releases/common | 77 -- devstack/odl-releases/latest-snapshot | 0 devstack/odl-releases/nitrogen-snapshot-0.7 | 0 devstack/odl-releases/nitrogen-snapshot-0.7.0 | 0 devstack/override-defaults | 38 - devstack/plugin.sh | 156 ---- devstack/post_test_hook.sh | 62 -- devstack/pre_test_hook.sh | 129 ---- devstack/settings | 114 --- devstack/settings.odl | 134 ---- devstack/setup_java.sh | 207 ----- devstack/upgrade/resources.sh | 0 devstack/upgrade/settings | 34 - devstack/upgrade/upgrade.sh | 23 - doc/source/admin/index.rst | 9 - doc/source/admin/reference_architecture.rst | 116 --- doc/source/conf.py | 82 -- doc/source/configuration/index.rst | 11 - doc/source/contributor/contributing.rst | 4 - .../contributor/drivers_architecture.rst | 89 --- doc/source/contributor/hostconfig.rst | 148 ---- doc/source/contributor/index.rst | 39 - doc/source/contributor/maintenance.rst | 44 -- doc/source/contributor/quickstart.rst | 219 ------ doc/source/contributor/specs/index.rst | 33 - .../contributor/specs/newton/qos-driver.rst | 104 --- .../contributor/specs/newton/sfc-driver.rst | 139 ---- .../specs/ocata/journal-recovery.rst | 152 ---- .../specs/pike/dep-validations-on-create.rst | 129 ---- .../specs/pike/neutron-port-dhcp.rst | 210 ----- doc/source/contributor/testing.rst | 1 - doc/source/contributor/usage.rst | 7 - doc/source/index.rst | 38 - doc/source/install/devstack.rst | 1 - doc/source/install/index.rst | 8 - doc/source/install/installation.rst | 220 ------ etc/neutron/plugins/ml2/ml2_conf_odl.ini | 61 -- etc/policy.json | 143 ---- networking_odl/__init__.py | 24 - networking_odl/_i18n.py | 40 - networking_odl/bgpvpn/__init__.py | 0 networking_odl/bgpvpn/odl_v2.py | 116 --- networking_odl/ceilometer/__init__.py | 0 networking_odl/ceilometer/network/__init__.py | 0 .../ceilometer/network/statistics/__init__.py | 0 .../statistics/opendaylight_v2/__init__.py | 0 .../statistics/opendaylight_v2/client.py | 137 ---- .../statistics/opendaylight_v2/driver.py | 296 ------- networking_odl/cmd/__init__.py | 0 networking_odl/cmd/set_ovs_hostconfigs.py | 473 ------------ networking_odl/cmd/test_setup_hostconfigs.sh | 3 - networking_odl/common/__init__.py | 0 networking_odl/common/callback.py | 98 --- networking_odl/common/client.py | 162 ---- networking_odl/common/config.py | 73 -- networking_odl/common/constants.py | 85 -- networking_odl/common/filters.py | 178 ----- networking_odl/common/lightweight_testing.py | 178 ----- networking_odl/common/odl_features.py | 112 --- networking_odl/common/postcommit.py | 71 -- networking_odl/common/utils.py | 51 -- networking_odl/common/websocket_client.py | 331 -------- networking_odl/db/__init__.py | 0 networking_odl/db/db.py | 227 ------ networking_odl/db/head.py | 24 - networking_odl/db/migration/__init__.py | 0 .../db/migration/alembic_migrations/README | 1 - .../migration/alembic_migrations/__init__.py | 0 .../db/migration/alembic_migrations/env.py | 99 --- .../alembic_migrations/script.py.mako | 36 - .../alembic_migrations/versions/CONTRACT_HEAD | 1 - .../alembic_migrations/versions/EXPAND_HEAD | 1 - .../b89a299e19f9_initial_branchpoint.py | 28 - .../contract/383acb0d38a0_initial_contract.py | 36 - .../expand/247501328046_initial_expand.py | 32 - ..._opendaylight_neutron_mechanism_driver_.py | 54 -- ...a0c536252a5_update_opendayligut_journal.py | 45 -- ...0427d776_add_sequence_number_to_journal.py | 49 -- ...dbf02afde_add_journal_maintenance_table.py | 52 -- ...d3a_drop_opendaylight_maintenance_table.py | 32 - ...f56ff2fb_add_journal_dependencies_table.py | 42 - ...added_version_id_for_optimistic_locking.py | 36 - ...create_opendaylight_preiodic_task_table.py | 50 -- networking_odl/db/models.py | 76 -- networking_odl/fwaas/__init__.py | 0 networking_odl/fwaas/driver.py | 69 -- networking_odl/hacking/__init__.py | 0 networking_odl/hacking/checks.py | 60 -- networking_odl/journal/__init__.py | 0 networking_odl/journal/cleanup.py | 45 -- .../journal/dependency_validations.py | 224 ------ networking_odl/journal/full_sync.py | 126 --- networking_odl/journal/journal.py | 234 ------ networking_odl/journal/periodic_task.py | 104 --- networking_odl/journal/recovery.py | 100 --- networking_odl/l2gateway/__init__.py | 0 networking_odl/l2gateway/driver.py | 87 --- networking_odl/l2gateway/driver_v2.py | 95 --- networking_odl/l3/__init__.py | 0 networking_odl/l3/l3_odl.py | 149 ---- networking_odl/l3/l3_odl_v2.py | 182 ----- networking_odl/lbaas/__init__.py | 0 networking_odl/lbaas/driver_v2.py | 183 ----- networking_odl/lbaas/lbaasv2_driver_v2.py | 142 ---- networking_odl/ml2/README.odl | 41 - networking_odl/ml2/__init__.py | 0 networking_odl/ml2/legacy_port_binding.py | 84 -- networking_odl/ml2/mech_driver.py | 512 ------------ networking_odl/ml2/mech_driver_v2.py | 239 ------ networking_odl/ml2/port_binding.py | 121 --- networking_odl/ml2/port_status_update.py | 135 ---- networking_odl/ml2/pseudo_agentdb_binding.py | 378 --------- networking_odl/qos/__init__.py | 0 networking_odl/qos/qos_driver_v2.py | 101 --- networking_odl/qos/qos_utils.py | 39 - networking_odl/sfc/__init__.py | 0 networking_odl/sfc/flowclassifier/__init__.py | 0 .../flowclassifier/sfc_flowclassifier_v1.py | 65 -- .../flowclassifier/sfc_flowclassifier_v2.py | 89 --- networking_odl/sfc/sfc_driver_v1.py | 101 --- networking_odl/sfc/sfc_driver_v2.py | 142 ---- networking_odl/tests/__init__.py | 0 networking_odl/tests/base.py | 93 --- networking_odl/tests/contrib/gate_hook.sh | 24 - .../tests/contrib/post_test_hook.sh | 58 -- networking_odl/tests/fullstack/__init__.py | 0 networking_odl/tests/fullstack/base.py | 52 -- .../tests/fullstack/requirements.txt | 6 - .../tests/fullstack/test_mech_driver.py | 84 -- networking_odl/tests/functional/__init__.py | 0 networking_odl/tests/functional/base.py | 114 --- .../tests/functional/db/__init__.py | 0 .../tests/functional/db/test_migrations.py | 152 ---- .../tests/functional/requirements.txt | 9 - .../tests/functional/test_bgpvpn.py | 173 ----- .../tests/functional/test_l2gateway.py | 179 ----- networking_odl/tests/functional/test_l3.py | 91 --- .../tests/functional/test_ml2_drivers.py | 134 ---- networking_odl/tests/functional/test_qos.py | 88 --- .../tests/functional/test_trunk_drivers.py | 115 --- networking_odl/tests/match.py | 52 -- networking_odl/tests/unit/__init__.py | 19 - networking_odl/tests/unit/base_v2.py | 57 -- networking_odl/tests/unit/bgpvpn/__init__.py | 0 .../tests/unit/bgpvpn/test_odl_v2.py | 166 ---- .../tests/unit/ceilometer/__init__.py | 0 .../tests/unit/ceilometer/network/__init__.py | 0 .../ceilometer/network/statistics/__init__.py | 0 .../statistics/opendaylight_v2/__init__.py | 0 .../statistics/opendaylight_v2/test_client.py | 133 ---- .../statistics/opendaylight_v2/test_driver.py | 719 ----------------- networking_odl/tests/unit/cmd/__init__.py | 0 .../unit/cmd/test_set_ovs_hostconfigs.py | 264 ------- networking_odl/tests/unit/common/__init__.py | 0 .../tests/unit/common/test_callback.py | 155 ---- .../tests/unit/common/test_client.py | 55 -- .../tests/unit/common/test_filters.py | 84 -- .../unit/common/test_lightweight_testing.py | 178 ----- .../tests/unit/common/test_odl_features.py | 75 -- .../tests/unit/common/test_postcommit.py | 72 -- .../tests/unit/common/test_utils.py | 37 - .../unit/common/test_websocket_client.py | 252 ------ networking_odl/tests/unit/db/__init__.py | 0 networking_odl/tests/unit/db/test_db.py | 321 -------- networking_odl/tests/unit/db/test_sqlite.py | 60 -- networking_odl/tests/unit/fwaas/__init__.py | 0 .../tests/unit/fwaas/test_fwaas_odl.py | 33 - networking_odl/tests/unit/journal/__init__.py | 0 .../journal/test_dependency_validations.py | 405 ---------- .../tests/unit/journal/test_full_sync.py | 241 ------ .../tests/unit/journal/test_journal.py | 54 -- .../tests/unit/journal/test_periodic_task.py | 139 ---- .../tests/unit/journal/test_recovery.py | 170 ---- .../tests/unit/l2gateway/__init__.py | 0 .../tests/unit/l2gateway/test_driver.py | 128 --- .../tests/unit/l2gateway/test_driver_v2.py | 147 ---- networking_odl/tests/unit/l3/__init__.py | 0 networking_odl/tests/unit/l3/test_l3_odl.py | 316 -------- .../tests/unit/l3/test_l3_odl_v2.py | 419 ---------- networking_odl/tests/unit/lbaas/__init__.py | 0 .../tests/unit/lbaas/test_lbaasv2_odl_v1.py | 36 - .../tests/unit/lbaas/test_lbaasv2_odl_v2.py | 177 ----- networking_odl/tests/unit/ml2/__init__.py | 0 .../tests/unit/ml2/config-ovs-external_ids.sh | 37 - networking_odl/tests/unit/ml2/odl_teststub.js | 62 -- networking_odl/tests/unit/ml2/test_driver.py | 99 --- .../unit/ml2/test_legacy_port_binding.py | 90 --- .../tests/unit/ml2/test_mechanism_odl.py | 659 ---------------- .../tests/unit/ml2/test_mechanism_odl_v2.py | 727 ------------------ .../tests/unit/ml2/test_port_binding.py | 44 -- .../tests/unit/ml2/test_port_status_update.py | 96 --- .../unit/ml2/test_pseudo_agentdb_binding.py | 512 ------------ networking_odl/tests/unit/qos/__init__.py | 0 .../tests/unit/qos/test_qos_driver_v2.py | 74 -- networking_odl/tests/unit/sfc/__init__.py | 0 networking_odl/tests/unit/sfc/constants.py | 68 -- .../tests/unit/sfc/flowclassifier/__init__.py | 0 .../test_sfc_flowclassifier_v1.py | 57 -- .../test_sfc_flowclassifier_v2.py | 79 -- .../tests/unit/sfc/test_sfc_driver_v1.py | 112 --- .../tests/unit/sfc/test_sfc_driver_v2.py | 159 ---- networking_odl/tests/unit/test_base_db.py | 32 - networking_odl/tests/unit/trunk/__init__.py | 0 .../tests/unit/trunk/test_trunk_driver_v1.py | 124 --- .../tests/unit/trunk/test_trunk_driver_v2.py | 130 ---- networking_odl/trunk/__init__.py | 0 networking_odl/trunk/constants.py | 27 - networking_odl/trunk/trunk_driver_v1.py | 109 --- networking_odl/trunk/trunk_driver_v2.py | 131 ---- rally-jobs/README.rst | 31 - rally-jobs/extra/README.rst | 6 - rally-jobs/odl.yaml | 296 ------- rally-jobs/plugins/README.rst | 9 - rally-jobs/plugins/__init__.py | 0 releasenotes/notes/.placeholder | 0 .../add-beryllium-sr4-7eced33ec292bcc8.yaml | 7 - .../add-host-config-8fb45d7f9732a795.yaml | 11 - .../bgpvpn-driver-v2-36c0772d510587f4.yaml | 10 - ...recate-qos-driver-v1-96bce9842413700b.yaml | 4 - ...ck-default-driver-v2-6ae6ce789b4a6cc9.yaml | 7 - ...flat-network-support-7c032aabc21902b1.yaml | 6 - .../notes/full-sync-f6b7ec1bd9ea0e52.yaml | 15 - .../functional-test-b0855d6f1d85da30.yaml | 9 - .../journal-recovery-88e583ad2db22bcc.yaml | 10 - .../l2gw-driver-v2-b32aacf882ed446c.yaml | 10 - .../lbaas-driver-v2-46bf34992f4785d1.yaml | 13 - .../maintenance-thread-e54c3b4bd7c03546.yaml | 20 - ...cs-from-opendaylight-057a6b3c30626527.yaml | 7 - .../new-netvirt-default-0eccc77d3cb54484.yaml | 13 - .../nuke-lbaasv1-driver-fce366522350fe21.yaml | 9 - ...-feature-negotiation-ece3201a6e9f8f74.yaml | 21 - ...port-binding-default-b5f24ad350b47eb0.yaml | 16 - ...o-agent-port-binding-0a3d1d193b99293e.yaml | 16 - .../notes/qos-driver-v1-711698186ca693c4.yaml | 9 - .../notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml | 8 - ...ove-network-topology-67daff08f3d6ff14.yaml | 11 - ...remove_qos_driver_v1-2bfbf1f979082b07.yaml | 8 - .../notes/sfc-driver-v1-d11fd5fd17114f2c.yaml | 10 - .../notes/sfc-driver-v2-9378b0db810b6fcb.yaml | 16 - .../notes/trunk-drivers-3592691bdd08929e.yaml | 9 - .../notes/version-bump-16230eadac71cbb0.yaml | 7 - .../vlan-transparency-63c153d310eacc5d.yaml | 21 - .../websocket-client-7c8117671aeea181.yaml | 8 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 271 ------- releasenotes/source/index.rst | 16 - .../locale/fr/LC_MESSAGES/releasenotes.po | 26 - releasenotes/source/newton.rst | 6 - releasenotes/source/ocata.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 10 - setup.cfg | 107 --- setup.py | 29 - test-requirements.txt | 24 - tools/check_bash.sh | 31 - tools/check_i18n.py | 153 ---- tools/check_i18n_test_case.txt | 67 -- tools/clean.sh | 5 - tools/coding-checks.sh | 59 -- tools/configure_for_func_testing.sh | 284 ------- tools/i18n_cfg.py | 109 --- tools/install_venv.py | 72 -- tools/ostestr_compat_shim.sh | 8 - tools/tox_install.sh | 23 - tools/tox_install_project.sh | 76 -- tools/with_venv.sh | 19 - tox.ini | 155 ---- vagrant/README.rst | 29 - vagrant/functional/Vagrantfile | 23 - vagrant/functional/config-override.sh | 38 - vagrant/functional/reproduce.sh | 121 --- vagrant/functional/setup-minimum.sh | 11 - vagrant/integration/multinode/README.rst | 25 - vagrant/integration/multinode/Vagrantfile | 24 - vagrant/integration/multinode/compute.conf | 57 -- vagrant/integration/multinode/control.conf | 92 --- .../integration/multinode/setup_compute.sh | 27 - .../integration/multinode/setup_control.sh | 26 - vagrant/setup_proxy.sh | 50 -- 312 files changed, 14 insertions(+), 24246 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .mailmap delete mode 100644 .pylintrc delete mode 100644 .testr.conf delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE create mode 100644 README delete mode 100644 README.rst delete mode 100644 TESTING.rst delete mode 100644 babel.cfg delete mode 100644 devstack/README.rst delete mode 100644 devstack/devstackgaterc delete mode 100644 devstack/entry_points delete mode 100644 devstack/files/debs/networking-odl delete mode 100644 devstack/files/rpms/networking-odl delete mode 100644 devstack/functions delete mode 100644 devstack/local.conf.example delete mode 100644 devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml delete mode 100644 devstack/odl-releases/boron-0.5.0 delete mode 100644 devstack/odl-releases/boron-0.5.1-SR1 delete mode 100644 devstack/odl-releases/boron-0.5.2-SR2 delete mode 100644 devstack/odl-releases/boron-0.5.3-SR3 delete mode 100644 devstack/odl-releases/boron-0.5.4-SR4 delete mode 100644 devstack/odl-releases/boron-snapshot-0.5 delete mode 100644 devstack/odl-releases/boron-snapshot-0.5.5 delete mode 100644 devstack/odl-releases/carbon-0.6.0 delete mode 100644 devstack/odl-releases/carbon-0.6.1-SR1 delete mode 100644 devstack/odl-releases/carbon-snapshot-0.6 delete mode 100644 devstack/odl-releases/carbon-snapshot-0.6.2 delete mode 100644 devstack/odl-releases/common delete mode 100644 devstack/odl-releases/latest-snapshot delete mode 100644 devstack/odl-releases/nitrogen-snapshot-0.7 delete mode 100644 devstack/odl-releases/nitrogen-snapshot-0.7.0 delete mode 100644 devstack/override-defaults delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/post_test_hook.sh delete mode 100644 devstack/pre_test_hook.sh delete mode 100644 devstack/settings delete mode 100644 devstack/settings.odl delete mode 100644 devstack/setup_java.sh delete mode 100755 devstack/upgrade/resources.sh delete mode 100644 devstack/upgrade/settings delete mode 100755 devstack/upgrade/upgrade.sh delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/admin/reference_architecture.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/configuration/index.rst delete mode 100644 doc/source/contributor/contributing.rst delete mode 100644 doc/source/contributor/drivers_architecture.rst delete mode 100644 doc/source/contributor/hostconfig.rst delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/contributor/maintenance.rst delete mode 100644 doc/source/contributor/quickstart.rst delete mode 100644 doc/source/contributor/specs/index.rst delete mode 100644 doc/source/contributor/specs/newton/qos-driver.rst delete mode 100644 doc/source/contributor/specs/newton/sfc-driver.rst delete mode 100644 doc/source/contributor/specs/ocata/journal-recovery.rst delete mode 100644 doc/source/contributor/specs/pike/dep-validations-on-create.rst delete mode 100644 doc/source/contributor/specs/pike/neutron-port-dhcp.rst delete mode 100644 doc/source/contributor/testing.rst delete mode 100644 doc/source/contributor/usage.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/devstack.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/installation.rst delete mode 100644 etc/neutron/plugins/ml2/ml2_conf_odl.ini delete mode 100644 etc/policy.json delete mode 100644 networking_odl/__init__.py delete mode 100644 networking_odl/_i18n.py delete mode 100644 networking_odl/bgpvpn/__init__.py delete mode 100644 networking_odl/bgpvpn/odl_v2.py delete mode 100644 networking_odl/ceilometer/__init__.py delete mode 100644 networking_odl/ceilometer/network/__init__.py delete mode 100644 networking_odl/ceilometer/network/statistics/__init__.py delete mode 100644 networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py delete mode 100644 networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py delete mode 100644 networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py delete mode 100644 networking_odl/cmd/__init__.py delete mode 100755 networking_odl/cmd/set_ovs_hostconfigs.py delete mode 100755 networking_odl/cmd/test_setup_hostconfigs.sh delete mode 100644 networking_odl/common/__init__.py delete mode 100644 networking_odl/common/callback.py delete mode 100644 networking_odl/common/client.py delete mode 100644 networking_odl/common/config.py delete mode 100644 networking_odl/common/constants.py delete mode 100644 networking_odl/common/filters.py delete mode 100644 networking_odl/common/lightweight_testing.py delete mode 100644 networking_odl/common/odl_features.py delete mode 100644 networking_odl/common/postcommit.py delete mode 100644 networking_odl/common/utils.py delete mode 100644 networking_odl/common/websocket_client.py delete mode 100644 networking_odl/db/__init__.py delete mode 100644 networking_odl/db/db.py delete mode 100644 networking_odl/db/head.py delete mode 100644 networking_odl/db/migration/__init__.py delete mode 100644 networking_odl/db/migration/alembic_migrations/README delete mode 100644 networking_odl/db/migration/alembic_migrations/__init__.py delete mode 100644 networking_odl/db/migration/alembic_migrations/env.py delete mode 100644 networking_odl/db/migration/alembic_migrations/script.py.mako delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py delete mode 100644 networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py delete mode 100644 networking_odl/db/models.py delete mode 100644 networking_odl/fwaas/__init__.py delete mode 100644 networking_odl/fwaas/driver.py delete mode 100644 networking_odl/hacking/__init__.py delete mode 100644 networking_odl/hacking/checks.py delete mode 100644 networking_odl/journal/__init__.py delete mode 100644 networking_odl/journal/cleanup.py delete mode 100644 networking_odl/journal/dependency_validations.py delete mode 100644 networking_odl/journal/full_sync.py delete mode 100644 networking_odl/journal/journal.py delete mode 100644 networking_odl/journal/periodic_task.py delete mode 100644 networking_odl/journal/recovery.py delete mode 100644 networking_odl/l2gateway/__init__.py delete mode 100644 networking_odl/l2gateway/driver.py delete mode 100644 networking_odl/l2gateway/driver_v2.py delete mode 100644 networking_odl/l3/__init__.py delete mode 100644 networking_odl/l3/l3_odl.py delete mode 100644 networking_odl/l3/l3_odl_v2.py delete mode 100644 networking_odl/lbaas/__init__.py delete mode 100644 networking_odl/lbaas/driver_v2.py delete mode 100644 networking_odl/lbaas/lbaasv2_driver_v2.py delete mode 100644 networking_odl/ml2/README.odl delete mode 100644 networking_odl/ml2/__init__.py delete mode 100644 networking_odl/ml2/legacy_port_binding.py delete mode 100644 networking_odl/ml2/mech_driver.py delete mode 100644 networking_odl/ml2/mech_driver_v2.py delete mode 100644 networking_odl/ml2/port_binding.py delete mode 100644 networking_odl/ml2/port_status_update.py delete mode 100644 networking_odl/ml2/pseudo_agentdb_binding.py delete mode 100644 networking_odl/qos/__init__.py delete mode 100644 networking_odl/qos/qos_driver_v2.py delete mode 100644 networking_odl/qos/qos_utils.py delete mode 100644 networking_odl/sfc/__init__.py delete mode 100644 networking_odl/sfc/flowclassifier/__init__.py delete mode 100644 networking_odl/sfc/flowclassifier/sfc_flowclassifier_v1.py delete mode 100644 networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py delete mode 100644 networking_odl/sfc/sfc_driver_v1.py delete mode 100644 networking_odl/sfc/sfc_driver_v2.py delete mode 100644 networking_odl/tests/__init__.py delete mode 100644 networking_odl/tests/base.py delete mode 100644 networking_odl/tests/contrib/gate_hook.sh delete mode 100644 networking_odl/tests/contrib/post_test_hook.sh delete mode 100644 networking_odl/tests/fullstack/__init__.py delete mode 100644 networking_odl/tests/fullstack/base.py delete mode 100644 networking_odl/tests/fullstack/requirements.txt delete mode 100644 networking_odl/tests/fullstack/test_mech_driver.py delete mode 100644 networking_odl/tests/functional/__init__.py delete mode 100644 networking_odl/tests/functional/base.py delete mode 100644 networking_odl/tests/functional/db/__init__.py delete mode 100644 networking_odl/tests/functional/db/test_migrations.py delete mode 100644 networking_odl/tests/functional/requirements.txt delete mode 100644 networking_odl/tests/functional/test_bgpvpn.py delete mode 100644 networking_odl/tests/functional/test_l2gateway.py delete mode 100644 networking_odl/tests/functional/test_l3.py delete mode 100644 networking_odl/tests/functional/test_ml2_drivers.py delete mode 100644 networking_odl/tests/functional/test_qos.py delete mode 100644 networking_odl/tests/functional/test_trunk_drivers.py delete mode 100644 networking_odl/tests/match.py delete mode 100644 networking_odl/tests/unit/__init__.py delete mode 100644 networking_odl/tests/unit/base_v2.py delete mode 100644 networking_odl/tests/unit/bgpvpn/__init__.py delete mode 100644 networking_odl/tests/unit/bgpvpn/test_odl_v2.py delete mode 100644 networking_odl/tests/unit/ceilometer/__init__.py delete mode 100644 networking_odl/tests/unit/ceilometer/network/__init__.py delete mode 100644 networking_odl/tests/unit/ceilometer/network/statistics/__init__.py delete mode 100644 networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py delete mode 100644 networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py delete mode 100644 networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py delete mode 100644 networking_odl/tests/unit/cmd/__init__.py delete mode 100644 networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py delete mode 100644 networking_odl/tests/unit/common/__init__.py delete mode 100644 networking_odl/tests/unit/common/test_callback.py delete mode 100644 networking_odl/tests/unit/common/test_client.py delete mode 100644 networking_odl/tests/unit/common/test_filters.py delete mode 100644 networking_odl/tests/unit/common/test_lightweight_testing.py delete mode 100644 networking_odl/tests/unit/common/test_odl_features.py delete mode 100644 networking_odl/tests/unit/common/test_postcommit.py delete mode 100644 networking_odl/tests/unit/common/test_utils.py delete mode 100644 networking_odl/tests/unit/common/test_websocket_client.py delete mode 100644 networking_odl/tests/unit/db/__init__.py delete mode 100644 networking_odl/tests/unit/db/test_db.py delete mode 100644 networking_odl/tests/unit/db/test_sqlite.py delete mode 100644 networking_odl/tests/unit/fwaas/__init__.py delete mode 100644 networking_odl/tests/unit/fwaas/test_fwaas_odl.py delete mode 100644 networking_odl/tests/unit/journal/__init__.py delete mode 100644 networking_odl/tests/unit/journal/test_dependency_validations.py delete mode 100644 networking_odl/tests/unit/journal/test_full_sync.py delete mode 100644 networking_odl/tests/unit/journal/test_journal.py delete mode 100644 networking_odl/tests/unit/journal/test_periodic_task.py delete mode 100644 networking_odl/tests/unit/journal/test_recovery.py delete mode 100644 networking_odl/tests/unit/l2gateway/__init__.py delete mode 100644 networking_odl/tests/unit/l2gateway/test_driver.py delete mode 100644 networking_odl/tests/unit/l2gateway/test_driver_v2.py delete mode 100644 networking_odl/tests/unit/l3/__init__.py delete mode 100644 networking_odl/tests/unit/l3/test_l3_odl.py delete mode 100644 networking_odl/tests/unit/l3/test_l3_odl_v2.py delete mode 100644 networking_odl/tests/unit/lbaas/__init__.py delete mode 100644 networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v1.py delete mode 100644 networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v2.py delete mode 100644 networking_odl/tests/unit/ml2/__init__.py delete mode 100755 networking_odl/tests/unit/ml2/config-ovs-external_ids.sh delete mode 100644 networking_odl/tests/unit/ml2/odl_teststub.js delete mode 100644 networking_odl/tests/unit/ml2/test_driver.py delete mode 100644 networking_odl/tests/unit/ml2/test_legacy_port_binding.py delete mode 100644 networking_odl/tests/unit/ml2/test_mechanism_odl.py delete mode 100644 networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py delete mode 100644 networking_odl/tests/unit/ml2/test_port_binding.py delete mode 100644 networking_odl/tests/unit/ml2/test_port_status_update.py delete mode 100644 networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py delete mode 100644 networking_odl/tests/unit/qos/__init__.py delete mode 100644 networking_odl/tests/unit/qos/test_qos_driver_v2.py delete mode 100644 networking_odl/tests/unit/sfc/__init__.py delete mode 100644 networking_odl/tests/unit/sfc/constants.py delete mode 100644 networking_odl/tests/unit/sfc/flowclassifier/__init__.py delete mode 100644 networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v1.py delete mode 100644 networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py delete mode 100644 networking_odl/tests/unit/sfc/test_sfc_driver_v1.py delete mode 100644 networking_odl/tests/unit/sfc/test_sfc_driver_v2.py delete mode 100644 networking_odl/tests/unit/test_base_db.py delete mode 100644 networking_odl/tests/unit/trunk/__init__.py delete mode 100644 networking_odl/tests/unit/trunk/test_trunk_driver_v1.py delete mode 100644 networking_odl/tests/unit/trunk/test_trunk_driver_v2.py delete mode 100644 networking_odl/trunk/__init__.py delete mode 100644 networking_odl/trunk/constants.py delete mode 100644 networking_odl/trunk/trunk_driver_v1.py delete mode 100644 networking_odl/trunk/trunk_driver_v2.py delete mode 100644 rally-jobs/README.rst delete mode 100644 rally-jobs/extra/README.rst delete mode 100644 rally-jobs/odl.yaml delete mode 100644 rally-jobs/plugins/README.rst delete mode 100644 rally-jobs/plugins/__init__.py delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml delete mode 100644 releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml delete mode 100644 releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml delete mode 100644 releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml delete mode 100644 releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml delete mode 100644 releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml delete mode 100644 releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml delete mode 100644 releasenotes/notes/functional-test-b0855d6f1d85da30.yaml delete mode 100644 releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml delete mode 100644 releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml delete mode 100644 releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml delete mode 100644 releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml delete mode 100644 releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml delete mode 100644 releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml delete mode 100644 releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml delete mode 100644 releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml delete mode 100644 releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml delete mode 100644 releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml delete mode 100644 releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml delete mode 100644 releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml delete mode 100644 releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml delete mode 100644 releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml delete mode 100644 releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml delete mode 100644 releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml delete mode 100644 releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml delete mode 100644 releasenotes/notes/version-bump-16230eadac71cbb0.yaml delete mode 100644 releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml delete mode 100644 releasenotes/notes/websocket-client-7c8117671aeea181.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100644 tools/check_bash.sh delete mode 100644 tools/check_i18n.py delete mode 100644 tools/check_i18n_test_case.txt delete mode 100755 tools/clean.sh delete mode 100755 tools/coding-checks.sh delete mode 100755 tools/configure_for_func_testing.sh delete mode 100644 tools/i18n_cfg.py delete mode 100644 tools/install_venv.py delete mode 100755 tools/ostestr_compat_shim.sh delete mode 100755 tools/tox_install.sh delete mode 100755 tools/tox_install_project.sh delete mode 100755 tools/with_venv.sh delete mode 100644 tox.ini delete mode 100644 vagrant/README.rst delete mode 100644 vagrant/functional/Vagrantfile delete mode 100755 vagrant/functional/config-override.sh delete mode 100755 vagrant/functional/reproduce.sh delete mode 100755 vagrant/functional/setup-minimum.sh delete mode 100644 vagrant/integration/multinode/README.rst delete mode 100644 vagrant/integration/multinode/Vagrantfile delete mode 100644 vagrant/integration/multinode/compute.conf delete mode 100644 vagrant/integration/multinode/control.conf delete mode 100755 vagrant/integration/multinode/setup_compute.sh delete mode 100755 vagrant/integration/multinode/setup_control.sh delete mode 100755 vagrant/setup_proxy.sh diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index d72357479..000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = networking_odl -omit = networking_odl/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index e43d13a8d..000000000 --- a/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -AUTHORS -build/* -build-stamp -ChangeLog -cover/ -covhtml/ -dist/ -doc/build -*.DS_Store -*.pyc -etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample -networking_odl.egg-info/ -networking_odl/vcsversion.py -networking_odl/versioninfo -pbr*.egg/ -run_tests.err.log -run_tests.log -# Files create dy releasenotes build -releasenotes/build -setuptools*.egg/ -subunit.log -*.mo -*.sw? -*~ -.vagrant -/.* -!/.coveragerc -!/.gitignore -!/.gitreview -!/.mailmap -!/.pylintrc -!/.testr.conf diff --git a/.gitreview b/.gitreview deleted file mode 100644 index ad57c24d9..000000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/networking-odl.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index f3e7e5e1a..000000000 --- a/.mailmap +++ /dev/null @@ -1,11 +0,0 @@ -# Format is: -# -# -lawrancejing -Jiajun Liu -Zhongyue Luo -Kun Huang -Zhenguo Niu -Isaku Yamahata -Isaku Yamahata -Morgan Fainberg diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 883a12645..000000000 --- a/.pylintrc +++ /dev/null @@ -1,112 +0,0 @@ -# The format of this file isn't really documented; just use --generate-rcfile -[MASTER] -# Add to the black list. It should be a base name, not a -# path. You may set this option multiple times. -# -ignore=.git,tests - -[MESSAGES CONTROL] -# NOTE(gus): This is a long list. A number of these are important and -# should be re-enabled once the offending code is fixed (or marked -# with a local disable) -disable= -# "F" Fatal errors that prevent further processing - import-error, -# "I" Informational noise - locally-disabled, -# "E" Error for important programming issues (likely bugs) - access-member-before-definition, - no-member, - no-method-argument, - no-self-argument, -# "W" Warnings for stylistic problems or minor programming issues - abstract-method, - arguments-differ, - attribute-defined-outside-init, - bad-builtin, - bad-indentation, - broad-except, - cyclic-import, - dangerous-default-value, - deprecated-lambda, - expression-not-assigned, - fixme, - global-statement, - no-init, - non-parent-init-called, - protected-access, - redefined-builtin, - redefined-outer-name, - signature-differs, - star-args, - super-init-not-called, - unpacking-non-sequence, - unused-argument, - unused-import, - unused-variable, -# "C" Coding convention violations - bad-continuation, - invalid-name, - missing-docstring, - superfluous-parens, -# "R" Refactor recommendations - abstract-class-little-used, - abstract-class-not-used, - duplicate-code, - interface-not-implemented, - no-self-use, - too-few-public-methods, - too-many-ancestors, - too-many-arguments, - too-many-branches, - too-many-instance-attributes, - too-many-lines, - too-many-locals, - too-many-public-methods, - too-many-return-statements, - too-many-statements - -[BASIC] -# Variable names can be 1 to 31 characters long, with lowercase and underscores -variable-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Argument names can be 2 to 31 characters long, with lowercase and underscores -argument-rgx=[a-z_][a-z0-9_]{1,30}$ - -# Method names should be at least 3 characters long -# and be lowecased with underscores -method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ - -# Module names matching neutron-* are ok (files in bin/) -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ - -# Don't require docstrings on tests. -no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ - -[FORMAT] -# Maximum number of characters on a single line. -max-line-length=79 - -[VARIABLES] -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -# _ is used by our localization -additional-builtins=_ - -[CLASSES] -# List of interface methods to ignore, separated by a comma. -ignore-iface-methods= - -[IMPORTS] -# Deprecated modules which should not be used, separated by a comma -deprecated-modules= -# should use oslo_serialization.jsonutils - json - -[TYPECHECK] -# List of module names for which member attributes should not be checked -ignored-modules=six.moves,_MovedItems - -[REPORTS] -# Tells whether to display a full report or only the messages -reports=no diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 0adfe9c83..000000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - OS_LOG_CAPTURE=1 \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./networking_odl/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index b88ee37a4..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,13 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps documented at: -http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: -http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: -https://bugs.launchpad.net/networking-odl diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 608fd2b5c..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,33 +0,0 @@ -Neutron Style Commandments -======================= - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/hacking/latest/ -- Step 2: Read on - -Neutron Specific Commandments --------------------------- - -- [N319] Validate that debug level logs are not translated -- [N320] Validate that LOG messages, except debug ones, have translations -- [N321] Validate that jsonutils module is used instead of json -- [N322] We do not use @authors tags in source files. We have git to track - authorship. -- [N323] Detect common errors with assert_called_once_with - -Creating Unit Tests -------------------- -For every new feature, unit tests should be created that both test and -(implicitly) document the usage of said feature. If submitting a patch for a -bug that had no unit test, a new passing unit test should be added. If a -submitted bug fix does have a unit test, be sure to add a new one that fails -without the patch and passes with the patch. - -All unittest classes must ultimately inherit from testtools.TestCase. In the -Neutron test suite, this should be done by inheriting from -neutron.tests.base.BaseTestCase. - -All setUp and tearDown methods must upcall using the super() method. -tearDown methods should be avoided and addCleanup calls should be preferred. -Never manually create tempfiles. Always use the tempfile fixtures from -the fixture library to ensure that they are cleaned up. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 000000000..8fcd2b2f8 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 2a364abe8..000000000 --- a/README.rst +++ /dev/null @@ -1,30 +0,0 @@ -========================== -Welcome to networking-odl! -========================== - -.. Team and repository tags - -.. image:: http://governance.openstack.org/badges/networking-odl.svg - :target: http://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -Summary -------- - -OpenStack networking-odl is a library of drivers and plugins that integrates -OpenStack Neutron API with OpenDaylight Backend. For example it has ML2 -driver and L3 plugin to enable communication of OpenStack Neutron L2 -and L3 resources API to OpenDayLight Backend. - -To report and discover bugs in networking-odl the following -link can be used: -https://bugs.launchpad.net/networking-odl - -Any new code submission or proposal must follow the development -guidelines detailed in HACKING.rst and for further details this -link can be checked: -https://docs.openstack.org/networking-odl/latest/ - -The OpenDaylight homepage: -https://www.opendaylight.org/ diff --git a/TESTING.rst b/TESTING.rst deleted file mode 100644 index e11998044..000000000 --- a/TESTING.rst +++ /dev/null @@ -1,195 +0,0 @@ -Testing Networking-odl + neutron -================================ - -Overview --------- - -The unit tests (networking_odl/tests/unit/) are meant to cover as much code as -possible and should be executed without the service running. They are -designed to test the various pieces of the neutron tree to make sure -any new changes don't break existing functionality. - -# TODO (Manjeet): Update functional testing doc. - -Development process -------------------- - -It is expected that any new changes that are proposed for merge -come with tests for that feature or code area. Ideally any bugs -fixes that are submitted also have tests to prove that they stay -fixed! In addition, before proposing for merge, all of the -current tests should be passing. - -Virtual environments -~~~~~~~~~~~~~~~~~~~~ - -Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. - -Create a machine (such as a VM or Vagrant box) running a distribution supported -by DevStack and install DevStack there. For example, there is a Vagrant script -for DevStack at https://github.com/bcwaldon/vagrant_devstack. - -.. note:: - - If you prefer not to use DevStack, you can still check out source code on your local - machine and develop from there. - - -Running unit tests ------------------- - -There are two mechanisms for running tests: tox, and nose. Before submitting -a patch for review you should always ensure all test pass; a tox run is -triggered by the jenkins gate executed on gerrit for each patch pushed for -review. - -With these mechanisms you can either run the tests in the standard -environment or create a virtual environment to run them in. - -By default after running all of the tests, any pep8 errors -found in the tree will be reported. - - -With `nose` -~~~~~~~~~~~ - -You can use `nose`_ to run individual tests, as well as use for debugging -portions of your code:: - - source .venv/bin/activate - pip install nose - nosetests - -There are disadvantages to running Nose - the tests are run sequentially, so -race condition bugs will not be triggered, and the full test suite will -take significantly longer than tox & testr. The upside is that testr has -some rough edges when it comes to diagnosing errors and failures, and there is -no easy way to set a breakpoint in the Neutron code, and enter an -interactive debugging session while using testr. - -.. _nose: https://nose.readthedocs.org/en/latest/index.html - -With `tox` -~~~~~~~~~~ - -Networking-odl, like other OpenStack projects, uses `tox`_ for managing the virtual -environments for running test cases. It uses `Testr`_ for managing the running -of the test cases. - -Tox handles the creation of a series of `virtualenvs`_ that target specific -versions of Python (2.6, 2.7, 3.3, etc). - -Testr handles the parallel execution of series of test cases as well as -the tracking of long-running tests and other things. - -Running unit tests is as easy as executing this in the root directory of the -Neutron source code:: - - tox - -Running tests for syntax and style check for written code:: - - tox -e pep8 - -For more information on the standard Tox-based test infrastructure used by -OpenStack and how to do some common test/debugging procedures with Testr, -see this wiki page: -https://wiki.openstack.org/wiki/Testr - -.. _Testr: https://wiki.openstack.org/wiki/Testr -.. _tox: http://tox.readthedocs.org/en/latest/ -.. _virtualenvs: https://pypi.python.org/pypi/virtualenv - -Tests written can also be debugged by adding pdb break points. Normally if you add -a break point and just run the tests with normal flags they will end up in failing. -There is debug flag you can use to run after adding pdb break points in the tests. - -Set break points in your test code and run:: - - tox -e debug networking_odl.tests.unit.db.test_db.DbTestCase.test_validate_updates_same_object_uuid - -The package oslotest was used to enable debugging in the tests. For more -information see the link: -https://docs.openstack.org/oslotest/latest/user/features.html - - -Running individual tests -~~~~~~~~~~~~~~~~~~~~~~~~ - -For running individual test modules or cases, you just need to pass -the dot-separated path to the module you want as an argument to it. - -For executing a specific test case, specify the name of the test case -class separating it from the module path with a colon. - -For example, the following would run only the Testodll3 tests from -networking_odl/tests/unit/l3/test_odl_l3.py :: - - $ tox -e py27 networking_odl.tests.unit.l3.test_l3_odl.Testodll3 - -Adding more tests -~~~~~~~~~~~~~~~~~ - -There might not be full coverage yet. New patches for adding tests -which are not there are always welcome. - -To get a grasp of the areas where tests are needed, you can check -current coverage by running:: - - $ tox -e cover - -Debugging ---------- - -It's possible to debug tests in a tox environment:: - - $ tox -e venv -- python -m testtools.run [test module path] - -Tox-created virtual environments (venv's) can also be activated -after a tox run and reused for debugging:: - - $ tox -e venv - $ . .tox/venv/bin/activate - $ python -m testtools.run [test module path] - -Tox packages and installs the neutron source tree in a given venv -on every invocation, but if modifications need to be made between -invocation (e.g. adding more pdb statements), it is recommended -that the source tree be installed in the venv in editable mode:: - - # run this only after activating the venv - $ pip install --editable . - -Editable mode ensures that changes made to the source tree are -automatically reflected in the venv, and that such changes are not -overwritten during the next tox run. - -Running functional tests ------------------------- -Neutron defines different classes of test cases. One of them is functional -test. It requires pre-configured environment. But it's lighter than -running devstack or openstack deployment. -For definitions of functional tests, please refer to: -https://docs.openstack.org/neutron/latest/contributor/index.html - -The script is provided to setup the environment. -At first make sure the latest version of pip command:: - - # ensure you have the latest version of pip command - # for example on ubuntu - $ sudo apt-get install python-pip - $ sudo pip --upgrade pip - -And then run functional test as follows:: - - # assuming devstack is setup with networking-odl - $ cd networking-odl - $ ./tools/configure_for_func_testing.sh /path/to/devstack - $ tox -e dsvm-functional - - -For setting up devstack, please refer to neutron documentation: - -* https://wiki.openstack.org/wiki/NeutronDevstack -* https://docs.openstack.org/neutron/latest/contributor/index.html -* https://docs.openstack.org/neutron/latest/contributor/testing/testing.html diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 5b8b588f0..000000000 --- a/devstack/README.rst +++ /dev/null @@ -1,186 +0,0 @@ -====================== - Enabling in Devstack -====================== - -1. Download DevStack - -2. Copy the sample local.conf over:: - - cp devstack/local.conf.example local.conf - -3. Optionally, to manually configure this: - - Add this repo as an external repository:: - - > cat local.conf - [[local|localrc]] - enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl - -4. Optionally, to enable support for OpenDaylight L3 router functionality, - add the below:: - - > cat local.conf - [[local|localrc]] - ODL_L3=True - - .. note:: - - This is only relevant when using old netvirt (ovsdb based, default). - -5. If you need to route the traffic out of the box (e.g. br-ex), set - ODL_PROVIDER_MAPPINGS to map the physical provider network to device - mapping, as shown below:: - - > cat local.conf - [[local|localrc]] - ODL_L3=True - ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2} # for old netvirt (ovsdb based) - ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth2} # for new netvirt (vpnservice based) - -6. Optionally, to enable support for OpenDaylight with LBaaS V2, add this:: - - > cat local.conf - [[local|localrc]] - enable_plugin neutron-lbaas http://git.openstack.org/openstack/neutron-lbaas - enable_service q-lbaasv2 - NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:opendaylight:networking_odl.lbaas.driver_v2.OpenDaylightLbaasDriverV2:default" - -7. run ``stack.sh`` - -8. Note: In a multi-node devstack environment, for each compute node you will - want to add this to the local.conf file:: - - > cat local.conf - [[local|localrc]] - enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl - ODL_MODE=compute - -9. Note: In a node using a release of Open vSwitch provided from another source - than your Linux distribution you have to enable in your local.conf skipping - of OVS installation step by setting *SKIP_OVS_INSTALL=True*. For example - when stacking together with `networking-ovs-dpdk - `_ Neutron plug-in to - avoid conflicts between openvswitch and ovs-dpdk you have to add this to - the local.conf file:: - - > cat local.conf - [[local|localrc]] - enable_plugin networking-ovs-dpdk http://git.openstack.org/openstack/networking-ovs-dpdk - enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl - SKIP_OVS_INSTALL=True - -10. Note: Optionally, to use the new netvirt implementation - (netvirt-vpnservice-openstack), add the following to the local.conf file - (only allinone topology is currently supported by devstack, since tunnel - endpoints are not automatically configured). For tunnel configurations - after loading devstack, please refer to this guide - https://wiki.opendaylight.org/view/Netvirt:_L2Gateway_HowTo#Configuring_Tunnels:: - - > cat local.conf - [[local|localrc]] - ODL_NETVIRT_KARAF_FEATURE=odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-vpnservice-openstack - ODL_BOOT_WAIT_URL=restconf/operational/network-topology:network-topology/ # Workaround since netvirt:1 no longer exists in DS! - -11. Note: To enable Quality Of Service (QoS) with OpenDaylight Backend, - add the following lines in neutron.conf:: - - > in /etc/neutron/neutron.conf - service_plugins = qos, odl-router - - enable qos extension driver in ml2 conf:: - - > in /etc/neutron/plugins/ml2/ml2_conf.ini - extensions_drivers = qos, port_security - - restart neutron service q-svc - - -12. Note: legacy netvirt specific options - - - OVS conntrack support - - :variable: ODL_LEGACY_NETVIRT_CONNTRACK By default it's False for - compatibility and version requirements. - - - version requirement - - :ODL version: Boron release or later. - (ODL legacy netvirt support is from Beryllium. But - networking-odl devstack supports Boron+) - - :OVS version: 2.5 or later - - enable OVS conntrack support:: - - > cat local.conf - [[local|localrc]] - ODL_LEGACY_NETVIRT_CONNTRACK=True - -13. Note: To enable Vlan Aware VMs (Trunk) with OpenDaylight Backend, - make the following entries in local.conf:: - - > cat local.conf - [[local|localrc]] - Q_SERVICE_PLUGIN_CLASSES=trunk - -14. Enabling L2Gateway Backend for OpenDaylight - -- The package networking-l2gw must be installed as a pre-requisite. - - So include in your localrc (or local.conf) the following:: - - enable_plugin networking-l2gw http://git.openstack.org/openstack/networking-l2gw - enable_service l2gw_plugin - NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default - -- Now stack up Devstack and after stacking completes, we are all set to use - l2gateway-as-a-service with OpenDaylight. - -15. Note: To enable Service Function Chaining support driven by networking-sfc, - the following steps have to be taken: - - - local.conf should contain the following lines:: - - # enable our plugin: - enable_plugin networking-odl https://github.com/openstack/networking-odl.git - - # enable the networking-sfc plugin: - enable_plugin networking-sfc https://github.com/openstack/networking-sfc.git - - # enable the odl-netvirt-sfc Karaf feature in OpenDaylight - ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-sfc - - # enable the networking-sfc OpenDaylight driver pair - [[post-config|$NEUTRON_CONF]] - [sfc] - drivers = odl_v2 - [flowclassifier] - drivers = odl_v2 - - - A special commit of Open vSwitch should be compiled and installed - (containing compatible NSH OpenFlow support). This isn't - done automatically by networking-odl or DevStack, so the user has to - manually install. Please follow the instructions in: - https://wiki.opendaylight.org/view/Service_Function_Chaining:Main#Building_Open_vSwitch_with_VxLAN-GPE_and_NSH_support - - - Carbon is the recommended and latest version of OpenDaylight to use, - you can specify it by adding the following to local.conf:: - - ODL_RELEASE=carbon-snapshot-0.6 - - - To clarify, OpenDaylight doesn't have to be running/installed before - stacking with networking-odl (and it shouldn't). The networking-odl - DevStack plugin will download and start OpenDaylight automatically. - However, it will not fetch the correct Open vSwitch version, so the - instructions above and the usage of ``SKIP_OVS_INSTALL`` are important. - -16. To enable BGPVPN driver to use with OpenDaylight controller - Include the following lines in your localrc (or local.conf):: - - enable_plugin networking-bgpvpn https://git.openstack.org/openstack/networking-bgpvpn.git - - [[post-config|$NETWORKING_BGPVPN_CONF]] - [service_providers] - service_provider=BGPVPN:OpenDaylight.networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver:default - - and then stack up your devstack. diff --git a/devstack/devstackgaterc b/devstack/devstackgaterc deleted file mode 100644 index 877774b65..000000000 --- a/devstack/devstackgaterc +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# This script is executed in the OpenStack CI job that runs DevStack + tempest. -# You can find the CI job configuration here: -# -# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/networking-odl.yaml -# - -# TODO(yamahata): tempest test is run serially at the moment and -# we're occasionally hitting timeout of 120 mins. For now as work around, -# lengthen timeout a bit. -# In near future(Ocata cycle) after migrating to new ODL netvirt(conserves), -# parallel execution should be enabled and remove this work around. -if [[ -z "${RALLY_SCENARIO}" && -z "${GRENADE_PLUGINRC}" ]] ; then - export BUILD_TIMEOUT=180 - export DEVSTACK_GATE_TIMEOUT=$(expr $BUILD_TIMEOUT - $DEVSTACK_GATE_TIMEOUT_BUFFER) -fi - -export OVERRIDE_ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key,mysql,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,q-dhcp,q-meta,q-svc,quantum,rabbit,placement-api,n-api-meta -if [ -z "${RALLY_SCENARIO}" ] ; then - # Only include tempest if this is not a rally job, As running tempest in Rally is likely to cause failure - export OVERRIDE_ENABLED_SERVICES=${OVERRIDE_ENABLED_SERVICES},tempest -fi - -# NOTE(manjeets) To prevent create of public network twice -if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] ; then - - # NOTE(manjeets) Temporarily disabling LM test due to bug 1643678 - # https://bugs.launchpad.net/networking-odl/+bug/1643678 - export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIVE_MIGRATION_AVAILABLE=False" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=False" - # DEVSTACK_GATE_NEUTRON_DVR in devstack-gate set Q_DVR_MODE as dvr_snat - export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_DVR_MODE=legacy" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"Q_DVR_MODE=legacy" - - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"disable_all_services" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,mysql,placement-client" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"RABBIT_HOST=\$SERVICE_HOST" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ODL_MODE=compute" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl" - export DEVSTACK_SUBNODE_CONFIG+=$'\n'"LIBVIRT_TYPE=qemu" -fi - -# Begin list of exclusions. -r="^(?!.*" - -# exclude the slow tag (part of the default for 'full') -r="$r(?:.*\[.*\bslow\b.*\])" - -# exclude things that just aren't enabled: -r="$r|(?:tempest\.api\.network\.admin\.test_quotas\.QuotasTest\.test_lbaas_quotas.*)" -r="$r|(?:tempest\.api\.network\.test_load_balancer.*)" -r="$r|(?:tempest\.scenario\.test_load_balancer.*)" -r="$r|(?:tempest\.api\.network\.admin\.test_load_balancer.*)" -r="$r|(?:tempest\.api\.network\.admin\.test_lbaas.*)" -r="$r|(?:tempest\.api\.network\.test_fwaas_extensions.*)" -r="$r|(?:tempest\.api\.network\.test_vpnaas_extensions.*)" -r="$r|(?:tempest\.api\.network\.test_metering_extensions.*)" -r="$r|(?:tempest\.thirdparty\.boto\.test_s3.*)" - -# exclude stuff we're less likely to break because i'm impatient -r="$r|(?:tempest\.api\.identity.*)" -r="$r|(?:tempest\.api\.image.*)" -r="$r|(?:tempest\.api\.volume.*)" - -# unsupported features -# ODL legacy netvirt doesn't support ipv6 -r="$r|(?:tempest\.scenario\.test_network_v6\.TestGettingAddress.*)" - -# Current list of failing tests that need to be triaged, have bugs filed, and -# fixed as appropriate. -# (none) - -# TODO(yamahata): fix bugs and remove those tests from here -# BUG: https://bugs.launchpad.net/networking-odl/+bug/1642158 -# legacy netvirt ignores admin-state-up state for network/port -r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_instance_port_admin_state.*)" -r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_router_admin_state.*)" - -# BUG: https://bugs.launchpad.net/networking-odl/+bug/1643033 -# stateful security group: conntracking needs to be enabled -r="$r|(?:tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_hotplug_nic.*)" -r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_cross_tenant_traffic.*)" -r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_port_security_disable_security_group.*)" - -# BUG: https://bugs.launchpad.net/networking-odl/+bug/1656129 -# exluding some tests temporarily -if [ -n $DEVSTACK_GATE_GRENADE ]; then - # Disable some tempest tests temporarily on - # grenade job - r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_cryptsetup.*)" - r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVolumes\.test_encrypted_cinder_volumes_luks.*)" - r="$r|(?:tempest\.scenario\.test_minimum_basic\.TestMinimumBasicScenario\.test_minimum_basic_scenario.*)" -fi - -# End list of exclusions. -r="$r)" - -# only run tempest.api/scenario/thirdparty tests (part of the default for 'full') -r="$r(tempest\.(api|scenario|thirdparty)).*$" - -export DEVSTACK_GATE_TEMPEST_REGEX="$r" diff --git a/devstack/entry_points b/devstack/entry_points deleted file mode 100644 index ed9a2263a..000000000 --- a/devstack/entry_points +++ /dev/null @@ -1,369 +0,0 @@ -#!/bin/bash - - -# cleanup_opendaylight() - Remove residual data files, anything left over -# from previous runs that a clean run would need to clean up -function cleanup_opendaylight { - # Wipe out the data, journal and snapshots directories ... grumble grumble grumble - rm -rf $ODL_DIR/$ODL_NAME/{data,journal,snapshots} - - # Remove existing logfiles - if [[ -n "$LOGDIR" ]]; then - rm -f "$LOGDIR/$ODL_KARAF_LOG_BASE*" - fi - if [[ -n "$SCREEN_LOGDIR" ]]; then - rm -f "$SCREEN_LOGDIR/$ODL_KARAF_LOG_BASE*" - fi - rm -f "$DEST/logs/$ODL_KARAF_LOG_BASE*" - - move_interface_addresses "outof_bridge" - - unbind_opendaylight_controller -} - - -# configure_opendaylight() - Set config files, create data dirs, etc -function configure_opendaylight { - echo "Configuring OpenDaylight" - - # The logging config file in ODL - local ODL_LOGGING_CONFIG=${ODL_DIR}/${ODL_NAME}/etc/org.ops4j.pax.logging.cfg - - # Add netvirt feature in Karaf, if it's not already there - local ODLFEATUREMATCH=$(cat $ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg | \ - grep featuresBoot= | grep $ODL_NETVIRT_KARAF_FEATURE) - if [ "$ODLFEATUREMATCH" == "" ]; then - sed -i "/^featuresBoot=/ s/$/,$ODL_NETVIRT_KARAF_FEATURE/" \ - $ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg - fi - - # Move Jetty to $ODL_PORT - local _ODLPORT=$(cat $ODL_DIR/$ODL_NAME/etc/jetty.xml | grep $ODL_PORT) - if [ "$_ODLPORT" == "" ]; then - sed -i "/\> $ODL_DIR/$ODL_NAME/etc/custom.properties - fi - - # Configure L3 GW MAC if it's not there - local L3GW_MAC=$(cat $ODL_DIR/$ODL_NAME/etc/custom.properties | \ - grep ^ovsdb.l3gateway.mac) - if [[ -z "$L3GW_MAC" && -n "$ODL_L3GW_MAC" ]]; then - echo "ovsdb.l3gateway.mac=$ODL_L3GW_MAC" >> $ODL_DIR/$ODL_NAME/etc/custom.properties - fi - fi - - # Remove existing logfiles - local ODL_LOGDIR=$DEST/logs - if [[ -n "$LOGDIR" ]]; then - ODL_LOGDIR=$LOGDIR - fi - - rm -f "$ODL_LOGDIR/$ODL_KARAF_LOG_BASE*" - # Log karaf output to a file - _LF=$ODL_LOGDIR/$ODL_KARAF_LOG_NAME - LF=$(echo $_LF | sed 's/\//\\\//g') - # Soft link for easy consumption - sudo mkdir -p "$ODL_LOGDIR" - ln -sf $_LF "$ODL_LOGDIR/screen-karaf.log" - if [[ -n $SCREEN_LOGDIR ]]; then - ln -sf $_LF "$SCREEN_LOGDIR/screen-karaf.log" - fi - - # Change the karaf logfile - # disable log rotation by setting max fiel size large enough - sed -i -e "/^log4j\.appender\.out\.file/ s/.*/log4j\.appender\.out\.file\=$LF/" \ - -e "/^log4j\.appender\.out\.maxFileSize/ s/.*/log4j\.appender\.out\.maxFileSize\=1024GB/" \ - $ODL_DIR/$ODL_NAME/etc/org.ops4j.pax.logging.cfg - - # Configure DEBUG logs for network virtualization in odl, if the user wants it - if [ "${ODL_NETVIRT_DEBUG_LOGS}" == "True" ]; then - local OVSDB_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | grep ^log4j.logger.org.opendaylight.ovsdb) - if [ "${OVSDB_DEBUG_LOGS}" == "" ]; then - echo 'log4j.logger.org.opendaylight.ovsdb = TRACE, out' >> $ODL_LOGGING_CONFIG - echo 'log4j.logger.org.opendaylight.ovsdb.lib = INFO, out' >> $ODL_LOGGING_CONFIG - echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter = DEBUG, out' >> $ODL_LOGGING_CONFIG - echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.TenantNetworkManagerImpl = DEBUG, out' >> $ODL_LOGGING_CONFIG - echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services.arp.GatewayMacResolverService = DEBUG, out' >> $ODL_LOGGING_CONFIG - echo 'log4j.logger.org.opendaylight.ovsdb.plugin.md.OvsdbInventoryManager = INFO, out' >> $ODL_LOGGING_CONFIG - fi - local ODL_NEUTRON_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | \ - grep ^log4j.logger.org.opendaylight.neutron) - if [ "${ODL_NEUTRON_DEBUG_LOGS}" == "" ]; then - echo 'log4j.logger.org.opendaylight.neutron = TRACE, out' >> $ODL_LOGGING_CONFIG - fi - fi -} - -# configure_neutron_opendaylight() - Set Neutron config files according to ODL settings -function configure_neutron_odl { - echo "Configuring ML2 for OpenDaylight" - - # https://bugs.launchpad.net/neutron/+bug/1614766 - # Allow ovsdb_interface native by avoiding port conflict. - if [[ -n "$ODL_OVSDB_ALTPORT" ]]; then - iniset $NEUTRON_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT - iniset $NEUTRON_DHCP_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT - fi - - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl url=$ODL_ENDPOINT - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl username=$ODL_USERNAME - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl password=$ODL_PASSWORD - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl port_binding_controller=$ODL_PORT_BINDING_CONTROLLER - if [[ -n "$ODL_TIMEOUT" ]]; then - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl timeout=$ODL_TIMEOUT - fi - # When it's not set, the default value is set by networking-odl - if [[ -n "$ODL_HOSTCONF_URI" ]]; then - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl odl_hostconf_uri=$ODL_HOSTCONF_URI - fi - - # NOTE(mgkwill): ODL layer-3 and DHCP services currently lack support - # for metadata. Enabling both native services also requires enabling - # config drive to provide instances with metadata. If conventional DHCP agent - # is used instead, configure it to provide instances with metadata. - if is_service_enabled q-dhcp; then - # Conventional DHCP agent must provide all metadata when ODL - # layer-3 is enabled. The conventional DHCP agent will be forced - # to provide metadata for all networks. - iniset $Q_DHCP_CONF_FILE DEFAULT force_metadata True - fi - if [[ "$ODL_L3" == "True" ]]; then - if is_service_enabled n-cpu; then - iniset $NOVA_CONF DEFAULT force_config_drive True - fi - fi -} - -function configure_neutron_odl_lightweight_testing { - echo "Configuring lightweight testing for OpenDaylight" - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl enable_lightweight_testing=True -} - -# init_opendaylight() - Initialize databases, etc. -function init_opendaylight { - # clean up from previous (possibly aborted) runs - # create required data files - : -} - - -# install_opendaylight() - Collect source and prepare -function install_opendaylight { - echo "Installing OpenDaylight and dependent packages" - if [[ "$ODL_USING_EXISTING_JAVA" != "True" ]] - then - if ! setup_java "${ODL_REQUIRED_JAVA_VERSION:-7}"; then - exit 1 - fi - fi - - # Download OpenDaylight - cd $ODL_DIR - - if [[ "$OFFLINE" != "True" ]]; then - wget -N $ODL_URL/$ODL_PKG - fi - unzip -u -o $ODL_PKG -} - - -# install_networking_odl() - Install the ML2 driver and other plugins/drivers -function install_networking_odl { - echo "Installing the Networking-ODL driver for OpenDaylight" - setup_develop $NETWORKING_ODL_DIR -} - - -# install_opendaylight_compute() - Make sure OVS is installed -function install_opendaylight_compute { - if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then - echo "Skipping OVS installation." - else - # packages are the same as for Neutron OVS agent - _neutron_ovs_base_install_agent_packages - fi -} - - -# start_opendaylight() - Start running processes, including screen -function start_opendaylight { - echo "Starting OpenDaylight" - - # Wipe out the data and journal directories ... grumble grumble grumble - rm -rf $ODL_DIR/$ODL_NAME/{data,journal} - - # The following variables are needed by the running karaf process. - # See the "bin/setenv" file in the OpenDaylight distribution for - # their individual meaning. - setup_java_env - export JAVA_MIN_MEM=$ODL_JAVA_MIN_MEM - export JAVA_MAX_MEM=$ODL_JAVA_MAX_MEM - export JAVA_MAX_PERM_MEM=$ODL_JAVA_MAX_PERM_MEM - - # this is a forking process, just start it in the background - $ODL_DIR/$ODL_NAME/bin/start - - if [ -n "$ODL_BOOT_WAIT_URL" ]; then - echo "Waiting for OpenDaylight to start via $ODL_BOOT_WAIT_URL ..." - # Probe ODL restconf for netvirt until it is operational - local testcmd="curl -o /dev/null --fail --silent --head -u \ - ${ODL_USERNAME}:${ODL_PASSWORD} http://${ODL_MGR_HOST}:${ODL_PORT}/${ODL_BOOT_WAIT_URL}" - test_with_retry "$testcmd" "OpenDaylight did not start after $ODL_BOOT_WAIT" \ - $ODL_BOOT_WAIT $ODL_RETRY_SLEEP_INTERVAL - else - echo "Waiting for OpenDaylight to start ..." - # Sleep a bit to let OpenDaylight finish starting up - sleep $ODL_BOOT_WAIT - fi -} - - -# stop_opendaylight() - Stop running processes (non-screen) -function stop_opendaylight { - # Stop the karaf container - $ODL_DIR/$ODL_NAME/bin/stop -} - - -# cleanup_opendaylight_compute() - Remove all OVS ports, bridges and disconnects -# controller from switch -function cleanup_opendaylight_compute { - # Remove the patch ports - for port in $(sudo ovs-vsctl show | grep Port | awk '{print $2}' | cut -d '"' -f 2 | grep patch); do - sudo ovs-vsctl del-port ${port} - done - - # remove all OVS ports that look like Neutron created ports - for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do - sudo ovs-vsctl del-port ${port} - done - - # Remove all the vxlan ports - for port in $(sudo ovs-vsctl list port | grep name | grep vxlan | awk '{print $3}' | cut -d '"' -f 2); do - sudo ovs-vsctl del-port ${port} - done - - # Disconnect controller from switch - unbind_opendaylight_controller - - # remove all OVS bridges created by ODL - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BR} -e ${PUBLIC_BRIDGE}); do - sudo ovs-vsctl del-br ${bridge} - done -} - -# bind_opendaylight_controller() - set control manager to OVS -function bind_opendaylight_controller { - echo_summary "Initializing OpenDaylight" - ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} - ODL_MGR_PORT=${ODL_MGR_PORT:-6640} - read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) - local ODL_MANAGERS_PARAM=() - for manager in $(echo $ODL_OVS_MANAGERS | tr "," "\n"); do - local manager_ip=$(gethostip -d ${manager}) - ODL_MANAGERS_PARAM=( "${ODL_MANAGERS_PARAM[@]}" "tcp:${manager_ip}:$ODL_MGR_PORT" ) - done - # don't overwrite the already existing managers - local ODL_MANAGERS_OLD=$(sudo ovs-vsctl get-manager) - local ODL_MANAGERS=$(echo $ODL_MANAGERS_OLD ${ODL_MANAGERS_PARAM[@]} | tr ' ' '\n' | sort | uniq | tr '\n' ' ') - sudo ovs-vsctl set-manager ${ODL_MANAGERS} - if [[ -n "$PUBLIC_BRIDGE" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - fi - if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then - sudo ovs-vsctl set Open_vSwitch $ovstbl \ - other_config:provider_mappings=$ODL_PROVIDER_MAPPINGS - fi - sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP - # for pseudo agent port binding - if [ "$ODL_PORT_BINDING_CONTROLLER" == "pseudo-agentdb-binding" ]; then - ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS:---debug --noovs_dpdk} - if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then - ODL_OVS_HOSTCONFIGS_OPTIONS="${ODL_OVS_HOSTCONFIGS_OPTIONS} --bridge_mappings=${ODL_PROVIDER_MAPPINGS}" - fi - if [[ -n "$ODL_OVS_HOSTCONFIGS" ]]; then - ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS} --ovs_hostconfigs="$ODL_OVS_HOSTCONFIGS" - fi - if [[ ! -f $NEUTRON_CONF ]]; then - sudo neutron-odl-ovs-hostconfig $ODL_OVS_HOSTCONFIGS_OPTIONS - else - sudo neutron-odl-ovs-hostconfig --config-file=$NEUTRON_CONF $ODL_OVS_HOSTCONFIGS_OPTIONS - fi - fi -} - -# unbind_opendaylight_controller() - disconnect controller from switch and clear bridges -function unbind_opendaylight_controller { - sudo ovs-vsctl del-manager - BRIDGES=$(sudo ovs-vsctl list-br) - for bridge in $BRIDGES ; do - sudo ovs-vsctl del-controller $bridge - done -} - - -function _configure_veth { - ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || - sudo ip link add $Q_PUBLIC_VETH_INT type veth \ - peer name $Q_PUBLIC_VETH_EX - sudo ip link set $Q_PUBLIC_VETH_INT up - sudo ip link set $Q_PUBLIC_VETH_EX up - sudo ip addr flush dev $Q_PUBLIC_VETH_EX - if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then - local OVSBR_EX=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1) - sudo ovs-vsctl --may-exist add-port $OVSBR_EX $Q_PUBLIC_VETH_INT - else - sudo ovs-vsctl --may-exist add-port $OVS_BR $Q_PUBLIC_VETH_INT - fi - - local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr replace ${PUBLIC_NETWORK_GATEWAY}/$cidr_len dev $Q_PUBLIC_VETH_EX - sudo ip route replace $FLOATING_RANGE dev $Q_PUBLIC_VETH_EX - if [[ -n "$IPV6_PUBLIC_RANGE" ]] && [[ -n "$IPV6_PUBLIC_NETWORK_GATEWAY" ]] && [[ -n "$FIXED_RANGE_V6" ]] && [[ -n "$IPV6_ROUTER_GW_IP" ]]; then - local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr replace ${IPV6_PUBLIC_NETWORK_GATEWAY}/$ipv6_cidr_len dev ${Q_PUBLIC_VETH_EX} - sudo ip -6 route replace $IPV6_PUBLIC_RANGE dev $Q_PUBLIC_VETH_EX - fi -} - -function _configure_opendaylight_l3_legacy_netvirt { - wait_for_active_bridge $PUBLIC_BRIDGE $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT - - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - _configure_veth - fi -} - -function _configure_opendaylight_l3_new_netvirt { - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - _configure_veth - fi -} - - -# configure_opendaylight_l3() - configure bridges for OpenDaylight L3 forwarding -function configure_opendaylight_l3 { - if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then - _configure_opendaylight_l3_legacy_netvirt - else - _configure_opendaylight_l3_new_netvirt - fi -} diff --git a/devstack/files/debs/networking-odl b/devstack/files/debs/networking-odl deleted file mode 100644 index 098269fe3..000000000 --- a/devstack/files/debs/networking-odl +++ /dev/null @@ -1 +0,0 @@ -syslinux-utils diff --git a/devstack/files/rpms/networking-odl b/devstack/files/rpms/networking-odl deleted file mode 100644 index 7c0641151..000000000 --- a/devstack/files/rpms/networking-odl +++ /dev/null @@ -1 +0,0 @@ -syslinux diff --git a/devstack/functions b/devstack/functions deleted file mode 100644 index 2569df078..000000000 --- a/devstack/functions +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -# -# functions - OpenDaylight driver utility functions - -function _odl_nexus_path { - local ODL_URL_PREFIX=$1 - echo "${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_SNAPSHOT_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf}" -} - -function _wget { - local MAVENMETAFILE=$1 - local URL=$2 - local $OFFLINE=$3 - - if [[ "$OFFLINE" == "True" ]]; then - return - fi - - # Remove stale MAVENMETAFILE for cases where you switch releases - rm -f $MAVENMETAFILE - - # Acquire the timestamp information from maven-metadata.xml - wget -O $MAVENMETAFILE $URL -} - -function _xpath { - local XPATH=$1 - local MAVENMETAFILE=$2 - local result="" - if is_ubuntu; then - install_package libxml-xpath-perl >/dev/null - result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null` - elif [ "$os_VENDOR" = "Fedora" ]; then - yum_install perl-XML-XPath >/dev/null - result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null` - else - yum_install perl-XML-XPath >/dev/null - result=`xpath $MAVENMETAFILE "$XPATH" 2>/dev/null` - fi - echo $result -} - -# get snapshot version . -> .. -function odl_snapshot_full_version { - local ODL_DIR=$1 - local ODL_URL_PREFIX=$2 - local MAJOR_MINOR=$3 - local OFFLINE=$4 - - local MAVENMETAFILE=$ODL_DIR/maven-metadata-snapshot.xml - local NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX) - _wget $MAVENMETAFILE ${NEXUSPATH}/maven-metadata.xml $OFFLINE - if [[ ! -r $MAVENMETAFILE ]]; then - echo "$MAVENMETAFILE doesn't exist. Please try with OFFLINE=False and check internet connection to $NEXUSPATH" - exit 1 - fi - - if [[ "$MAJOR_MINOR" == "latest" ]]; then - local ODL_FULL_VERSION=$(_xpath "//latest/text()" $MAVENMETAFILE) - else - local ODL_FULL_VERSION=$(_xpath "//version[starts-with(text(), '$MAJOR_MINOR')][last()]/text()" $MAVENMETAFILE) - fi - ODL_FULL_VERSION=${ODL_FULL_VERSION/-SNAPSHOT/} - echo $ODL_FULL_VERSION -} - -function _odl_export_snapshot_url_pkg { - local ODL_DIR=$1 - local ODL_URL_PREFIX=$2 - local BUNDLEVERSION=$3 - local OFFLINE=$4 - local BUNDLE_TIMESTAMP=$5 - - local MAVENMETAFILE=$ODL_DIR/maven-metadata.xml - local NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX) - - if [ "$BUNDLE_TIMESTAMP" == "latest" ]; then - # Get build information - _wget $MAVENMETAFILE ${NEXUSPATH}/${BUNDLEVERSION}/maven-metadata.xml $OFFLINE - BUNDLE_TIMESTAMP=$(_xpath "//snapshotVersion[extension='zip'][1]/value/text()" $MAVENMETAFILE) - fi - - export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION} - export ODL_PKG=distribution-karaf-${BUNDLE_TIMESTAMP}.zip -} - -function _odl_export_release_url_pkg { - local ODL_URL_PREFIX=$1 - local BUNDLEVERSION=$2 - local NEXUSPATH="${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_RELEASE_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf}" - - export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION} - export ODL_PKG=distribution-karaf-${BUNDLEVERSION}.zip -} - -function setup_opendaylight_package { - if [[ -n "$ODL_SNAPSHOT_VERSION" ]]; then - _odl_export_snapshot_url_pkg ${ODL_DIR} ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} ${OFFLINE} ${ODL_SNAPSHOT_VERSION} - else - _odl_export_release_url_pkg ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} - fi -} - -# Test if OpenDaylight is enabled -function is_opendaylight_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 - return 1 -} - - -# Check that the bridge is up and running -function wait_for_active_bridge { - local BRIDGE=$1 - local SLEEP_INTERVAL=$2 - local MAX_WAIT=$3 - - echo "Waiting for bridge $BRIDGE to be available..." - local testcmd="sudo ovs-vsctl list Bridge | grep $BRIDGE" - test_with_retry "$testcmd" \ - "$BRIDGE did not become available in $MAX_WAIT seconds." \ - $MAX_WAIT $SLEEP_INTERVAL - echo "Bridge $BRIDGE is available." -} - -# Move the public IP addresses to the OVS bridge on startup, -# or back to the public interface on cleanup -function move_interface_addresses { - local direction=$1 - - if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then - local VETH_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1) - local PHYSICAL_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f2) - - if [[ "$direction" == "into_bridge" ]]; then - _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" True False "inet" - if _has_public_ipv6_address "$PHYSICAL_INTERFACE"; then - _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" False False "inet6" - fi - elif [[ "$direction" == "outof_bridge" ]]; then - _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False True "inet" - if _has_public_ipv6_address "$VETH_INTERFACE"; then - _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False False "inet6" - fi - fi - fi -} - -# Check that the interface has an IP v6 address which -# is routable on external network -function _has_public_ipv6_address { - local interface=$1 - local interface_public_ipv6_addresses=$(ip -f inet6 a s dev "$interface" | grep -c 'global') - echo "$interface public IPv6 address count: $interface_public_ipv6_addresses" - if [[ "$interface_public_ipv6_addresses" != 0 ]]; then - return 0 - else - return 1 - fi -} diff --git a/devstack/local.conf.example b/devstack/local.conf.example deleted file mode 100644 index 7fe13f296..000000000 --- a/devstack/local.conf.example +++ /dev/null @@ -1,109 +0,0 @@ -[[local|localrc]] -# This will fetch the latest ODL snapshot -ODL_RELEASE=latest-snapshot - -# Default is V2 driver, uncomment below line to use V1 -#ODL_V2DRIVER=False - -# Default is psuedo-port-binding-controller -#ODL_PORT_BINDING_CONTROLLER= - - -# Set here which ODL openstack service provider to use -# These are core ODL features -ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs - -# Set DLUX Karaf features needed for the ODL GUI at http://:8181/index.html -ODL_NETVIRT_KARAF_FEATURE+=,odl-dluxapps-nodes,odl-dluxapps-topology,odl-dluxapps-yangui,odl-dluxapps-yangvisualizer - -# Set L2 Karaf features needed for the ODL GUI at http://:8181/index.html -ODL_NETVIRT_KARAF_FEATURE+=,odl-l2switch-switch,odl-l2switch-switch-ui,odl-ovsdb-hwvtepsouthbound-ui,odl-ovsdb-southbound-impl-ui,odl-netvirt-ui - -# Set OpenFlow Karaf features needed for the ODL GUI at http://:8181/index.html -ODL_NETVIRT_KARAF_FEATURE+=,odl-openflowplugin-flow-services-ui - -# odl-netvirt-openstack is used for new netvirt -ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-openstack - -# optional feature neutron-logger to log changes of neutron yang models -ODL_NETVIRT_KARAF_FEATURE+=,odl-neutron-logger - -# Switch to using the ODL's L3 implementation -ODL_L3=True - -# Set Host IP here. It is externally reachable network, set -# below param to use ip from a different network -HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}') - -# public network connectivity -Q_USE_PUBLIC_VETH=True -Q_PUBLIC_VETH_EX=veth-pub-ex -Q_PUBLIC_VETH_INT=veth-pub-int -ODL_PROVIDER_MAPPINGS=public:${Q_PUBLIC_VETH_INT} - -# Enable debug logs for odl ovsdb -ODL_NETVIRT_DEBUG_LOGS=True - -#Q_USE_DEBUG_COMMAND=True - -DEST=/opt/stack/ -# move DATA_DIR outside of DEST to keep DEST a bit cleaner -DATA_DIR=/opt/stack/data - -ADMIN_PASSWORD=password -MYSQL_PASSWORD=${ADMIN_PASSWORD} -RABBIT_PASSWORD=${ADMIN_PASSWORD} -SERVICE_PASSWORD=${ADMIN_PASSWORD} -SERVICE_TOKEN=supersecrettoken - -enable_service dstat -enable_service g-api -enable_service g-reg -enable_service key -enable_service mysql -enable_service n-api -enable_service n-cond -enable_service n-cpu -enable_service n-crt -enable_service n-novnc -enable_service n-sch -enable_service placement-api -enable_service placement-client -enable_service q-dhcp -enable_service q-meta -enable_service q-svc -enable_service rabbit -enable_service tempest - -# These can be enabled if storage is needed to do -# any feature or testing for integration -disable_service c-api -disable_service c-vol -disable_service c-sch - -SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca - -# Screen console logs will capture service logs. -SYSLOG=False -SCREEN_LOGDIR=/opt/stack/new/screen-logs -LOGFILE=/opt/stack/new/devstacklog.txt -VERBOSE=True -FIXED_RANGE=10.1.0.0/20 -FLOATING_RANGE=172.24.5.0/24 -PUBLIC_NETWORK_GATEWAY=172.24.5.1 -FIXED_NETWORK_SIZE=4096 -VIRT_DRIVER=libvirt - -export OS_NO_CACHE=1 - -# Additional repositories need to be cloned can be added here. -#LIBS_FROM_GIT= - -# Enable MySql Logging -DATABASE_QUERY_LOGGING=True - -# set this until all testing platforms have libvirt >= 1.2.11 -# see bug #1501558 -EBTABLES_RACE_FIX=True - -enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl diff --git a/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml b/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml deleted file mode 100644 index bfd91905d..000000000 --- a/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - true - - diff --git a/devstack/odl-releases/boron-0.5.0 b/devstack/odl-releases/boron-0.5.0 deleted file mode 100644 index 65619d5a6..000000000 --- a/devstack/odl-releases/boron-0.5.0 +++ /dev/null @@ -1 +0,0 @@ -export ODL_BUNDLEVERSION='0.5.0-Boron' diff --git a/devstack/odl-releases/boron-0.5.1-SR1 b/devstack/odl-releases/boron-0.5.1-SR1 deleted file mode 100644 index 56735667b..000000000 --- a/devstack/odl-releases/boron-0.5.1-SR1 +++ /dev/null @@ -1 +0,0 @@ -export ODL_BUNDLEVERSION='0.5.1-Boron-SR1' diff --git a/devstack/odl-releases/boron-0.5.2-SR2 b/devstack/odl-releases/boron-0.5.2-SR2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/boron-0.5.3-SR3 b/devstack/odl-releases/boron-0.5.3-SR3 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/boron-0.5.4-SR4 b/devstack/odl-releases/boron-0.5.4-SR4 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/boron-snapshot-0.5 b/devstack/odl-releases/boron-snapshot-0.5 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/boron-snapshot-0.5.5 b/devstack/odl-releases/boron-snapshot-0.5.5 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/carbon-0.6.0 b/devstack/odl-releases/carbon-0.6.0 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/carbon-0.6.1-SR1 b/devstack/odl-releases/carbon-0.6.1-SR1 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/carbon-snapshot-0.6 b/devstack/odl-releases/carbon-snapshot-0.6 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/carbon-snapshot-0.6.2 b/devstack/odl-releases/carbon-snapshot-0.6.2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/common b/devstack/odl-releases/common deleted file mode 100644 index 57d14e122..000000000 --- a/devstack/odl-releases/common +++ /dev/null @@ -1,77 +0,0 @@ -_XTRACE_ODL_RELEASE_COMMON=$(set +o | grep xtrace) -set -o xtrace - -_odl_release=$1 -if [[ "$_odl_release" =~ -snapshot ]]; then - # -snapshot-.. -> ..-SNAPSHOT - _odl_version=${_odl_release/[[:alpha:]]*-snapshot-/} - if [[ "$_odl_release" == "latest-snapshot" ]]; then - # get latest revision of snapshot - _odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX "latest" $OFFLINE) - # update ODL_RELEASE to prevent odl_snapshot_full_version from being called - # every time networking-odl/devstack/plugin.sh is called by devstack - # latest-snapshot -> latest-snapshot-.. - ODL_RELEASE=${ODL_RELEASE}-${_odl_version} - elif [[ "${_odl_version}" =~ ^[[:digit:]]\.[[:digit:]]$ ]]; then - # get latest revision of given major.minor - # . -> .. - _odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX $_odl_version $OFFLINE) - # update ODL_RELEASE to prevent odl_snapshot_full_version from being called - # every time networking-odl/devstack/plugin.sh is called by devstack - # -snapshot-. -> -snapshot-.. - _odl_revision=${_odl_version/[[:digit:]]\.[[:digit:]]\./} - ODL_RELEASE=${ODL_RELEASE}.${_odl_revision} - fi - _odl_bundleversion_default=${_odl_version}-SNAPSHOT - export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}} - export ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest} -else - # -..[-SR] -> ..-[-SR] - _name=$(echo ${_odl_release} | awk -F- '{print toupper(substr($1, 1, 1))substr($1, 2)}') - _version=$(echo ${_odl_release} | awk -F- '{print $2}') - _sr=$(echo ${_odl_release} | awk -F- '{print $3}') - _odl_bundleversion_default=${_version}-${_name} - if [[ -n $_sr ]]; then - _odl_bundleversion_default=${_odl_bundleversion_default}-${_sr} - fi - export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}} -fi - - -# Java major version required to run OpenDaylight: 7, 8, ... -# by default, ODL uses jdk 8 as of Boron -export ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} - -# karaf distribution name of ODL to download -export ODL_NAME=${ODL_NAME:-distribution-karaf-${ODL_BUNDLEVERSION}} - -# The network virtualization older feature name (ovsdb based) -export ODL_NETVIRT_KARAF_FEATURE_OVSDB=${ODL_NETVIRT_KARAF_FEATURE_OVSDB:-odl-ovsdb-openstack} - -# The network virtualization newer feature name (vpnservice based) -export ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE=${ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE:-odl-netvirt-openstack} - -ODL_NETVIRT_KARAF_FEATURE_DEFAULT=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs -# new netvirt has been introduced into netvirt from Boron release -# odl-neutron-logger has been introduced from Boron release -case "$ODL_BUNDLEVERSION" in - 0.5.?-*) - # 0.5.?-* - ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE - ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger - ;; - *) - # 0.6.?-* or later - ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE - ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-hostconfig-ovs - ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger - ;; -esac - -# The network virtualization feature used by opendaylight loaded by Karaf -export ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE:-$ODL_NETVIRT_KARAF_FEATURE_DEFAULT} - -# The url that this version of ODL netvirt can use to know ODL is fully up -export ODL_BOOT_WAIT_URL=${ODL_BOOT_WAIT_URL:-restconf/operational/network-topology:network-topology/topology/netvirt:1} - -$_XTRACE_ODL_RELEASE_COMMON diff --git a/devstack/odl-releases/latest-snapshot b/devstack/odl-releases/latest-snapshot deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/nitrogen-snapshot-0.7 b/devstack/odl-releases/nitrogen-snapshot-0.7 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/odl-releases/nitrogen-snapshot-0.7.0 b/devstack/odl-releases/nitrogen-snapshot-0.7.0 deleted file mode 100644 index e69de29bb..000000000 diff --git a/devstack/override-defaults b/devstack/override-defaults deleted file mode 100644 index 2b89b45d6..000000000 --- a/devstack/override-defaults +++ /dev/null @@ -1,38 +0,0 @@ -# Override few things here as early as we can - -# We will enable the opendaylight ML2 MechanismDriver v1 version by default. -# Note we are also enabling the logger driver, which is helpful for -# debugging things on the Neutron side. -if [[ "$ODL_V2DRIVER" == "True" ]] -then - V2_POSTFIX="_v2" -else - V2_POSTFIX="" -fi - -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-"logger,opendaylight${V2_POSTFIX}"} - -# This triggers the provisioning of L3 resources like routers and -# external network, if not overridden. -Q_L3_ENABLED=${Q_L3_ENABLED:-True} - -# We have to disable the neutron L2 agent. OpenDaylight does not use the -# L2 agent, it instead uses a combination of OpenFlow and OVSDB commands -# to program OVS on each compute and network node host. -disable_service q-agt - -# If ODL_L3 is enabled, then we don't need the L3 agent and OpenDaylight -# is going to act as the ML2's L3 service plugin. -# NETVIRT_VPNSERVICE feature enables ODL L3 by default, so ODL_L3 is disregarded. -if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE," ]] || [ "$ODL_L3" == "True" ]; -then - disable_service q-l3 - ML2_L3_PLUGIN="${ML2_L3_PLUGIN:-odl-router${V2_POSTFIX}}" -fi - -# bug work around -# https://bugs.launchpad.net/neutron/+bug/1614766 -# ODL ovsdb listens to 6640 and -# neutron agent with native uses also 6640 to connect to ovsdb-server -# If ODL server and neutron agent run in same box, alternative port is needed. -export ODL_OVSDB_ALTPORT=${ODL_OVSDB_ALTPORT:-6641} diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 9b18525ee..000000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,156 +0,0 @@ -#!/bin/bash -# -# devstack/plugin.sh -# Functions to control the configuration and operation of the opendaylight service - -# Save trace setting -_XTRACE_NETWORKING_ODL=$(set +o | grep xtrace) -set +o xtrace - -# OpenDaylight directories -NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl} -ODL_DIR=$DEST/opendaylight - -# Make sure $ODL_DIR exists -mkdir -p $ODL_DIR - -# Import utility functions -source $TOP_DIR/functions -source $NETWORKING_ODL_DIR/devstack/functions -source $TOP_DIR/lib/neutron-legacy - -# Import bridge data -source $TOP_DIR/lib/neutron_plugins/ovs_base - -# Import ODL settings -source $NETWORKING_ODL_DIR/devstack/settings.odl -if [ -r $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE ]; then - source $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE -fi -source $NETWORKING_ODL_DIR/devstack/odl-releases/common $ODL_RELEASE - -# Utilities functions for setting up Java -source $NETWORKING_ODL_DIR/devstack/setup_java.sh - -# Import Entry Points -# ------------------- -source $NETWORKING_ODL_DIR/devstack/entry_points - -# Restore xtrace -$_XTRACE_NETWORKING_ODL - -if [[ "$ODL_USING_EXISTING_JAVA" == "True" ]]; then - echo 'Using installed java.' - java -version || exit 1 -fi - -# main loop -if is_service_enabled odl-server; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - install_networking_odl - setup_opendaylight_package - install_opendaylight - configure_opendaylight - init_opendaylight - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - configure_neutron_odl - # This has to start before Neutron - start_opendaylight - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then - stop_opendaylight - cleanup_opendaylight - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - -if is_service_enabled odl-compute; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - install_networking_odl - install_opendaylight_compute - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if is_service_enabled nova; then - create_nova_conf_neutron - fi - bind_opendaylight_controller - sudo ovs-vsctl --may-exist add-br $OVS_BR - wait_for_active_bridge $OVS_BR $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT - - # L3 needs to be configured only for netvirt-ovsdb - in netvirt-vpnservice L3 is configured - # by provider_mappings, and the provider mappings are added to br-int by default - if [[ "${ODL_L3}" == "True" ]]; then - configure_opendaylight_l3 - fi - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # no-op - : - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then - cleanup_opendaylight_compute - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - -if is_service_enabled odl-neutron; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - install_networking_odl - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - configure_neutron_odl - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" ]]; then - # no-op - : - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - -if is_service_enabled odl-lightweight-testing; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - install_networking_odl - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - configure_neutron_odl - configure_neutron_odl_lightweight_testing - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" ]]; then - # no-op - : - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/post_test_hook.sh b/devstack/post_test_hook.sh deleted file mode 100644 index 83b92fcc5..000000000 --- a/devstack/post_test_hook.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -set -xe - -GATE_DEST=$BASE/new -DEVSTACK_PATH=$GATE_DEST/devstack - -source $DEVSTACK_PATH/functions -source $DEVSTACK_PATH/openrc admin admin - -TEMPEST_CODE_DIR="$BASE/new/tempest" -TEMPEST_DATA_DIR="$DATA_DIR/tempest" -NETWORKING_ODL_DIR="${NETWORKING_ODL_DIR:-$BASE/new/networking-odl}" - -owner=stack -sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_CODE_DIR/etc" - -cd $TEMPEST_CODE_DIR -sudo chown -R $owner:stack $TEMPEST_CODE_DIR -sudo mkdir -p "$TEMPEST_DATA_DIR" -sudo chown -R $owner:stack $TEMPEST_DATA_DIR - -function _odl_show_info { - sudo ip address - sudo ip link - sudo ip route - sudo ovsdb-client dump - sudo ovs-vsctl show - for br in $(sudo ovs-vsctl list-br); do - echo "--- flows on $br ---" - sudo ovs-ofctl --protocols OpenFlow13 dump-ports $br - sudo ovs-ofctl --protocols OpenFlow13 dump-ports-desc $br - sudo ovs-ofctl --protocols OpenFlow13 dump-flows $br - done - - openstack network list - openstack port list - openstack subnet list - openstack router list - - # ODL_UESRNAME=admin - # ODL_PASSWORD=admin - # ODL_MGR_HOST=$SERVICE_HOST - # ODL_PORT=8087 - # There is no good way to retrieve from setting.odl at the moment - curl --silent --user admin:admin "http://localhost:8087/restconf/config/neutron:neutron?prettyPrint=true" - echo -e "\n" -} - -echo "Some pre-process info" -_odl_show_info - -echo "Running networking-odl test suite" -set +e -sudo -H -u $owner $sudo_env tox -eall -- "$DEVSTACK_GATE_TEMPEST_REGEX" --serial -retval=$? -set -e - -echo "Some post-process info" -_odl_show_info - -return $retval diff --git a/devstack/pre_test_hook.sh b/devstack/pre_test_hook.sh deleted file mode 100644 index c4a91e72b..000000000 --- a/devstack/pre_test_hook.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env bash - -set -xe - -# Drop a token that marks the build as coming from openstack infra -GATE_DEST=$BASE/new -DEVSTACK_PATH=$GATE_DEST/devstack -# for localrc_set -source $DEVSTACK_PATH/inc/ini-config - -case "$ODL_RELEASE_BASE" in - latest-snapshot) - ODL_RELEASE=latest-snapshot - ;; - nitrogen-snapshot) - ODL_RELEASE=nitrogen-snapshot-0.7 - ;; - carbon-snapshot) - ODL_RELEASE=carbon-snapshot-0.6 - ;; - boron-snapshot) - ODL_RELEASE=boron-snapshot-0.5 - ;; - *) - echo "Unknown ODL release base: $ODL_RELEASE_BASE" - exit 1 - ;; -esac - -if [[ -z "$ODL_GATE_V2DRIVER" ]] && [[ -n "${RALLY_SCENARIO}" ]]; then - ODL_GATE_V2DRIVER=v2driver -fi -case "$ODL_GATE_V2DRIVER" in - v2driver) - ODL_V2DRIVER=True - ;; - v1driver|*) - ODL_V2DRIVER=False - ;; -esac - -ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding - -ODL_GATE_SERVICE_PROVIDER=${ODL_GATE_SERVICE_PROVIDER%-} -if [[ -z "$ODL_GATE_SERVICE_PROVIDER" ]] && [[ -n "${RALLY_SCENARIO}" ]]; then - ODL_GATE_SERVICE_PROVIDER=vpnservice -fi - -case "$ODL_GATE_SERVICE_PROVIDER" in - vpnservice) - ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack - # $PUBLIC_PHYSICAL_NETWORK = public by default - ODL_MAPPING_KEY=public - ;; - netvirt|*) - ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-ovsdb-openstack - # $PUBLIC_BRIDGE = br-ex by default - ODL_MAPPING_KEY=br-ex - ;; -esac - -ODL_NETVIRT_KARAF_FEATURE=$ODL_NETVIRT_KARAF_FEATURE,odl-neutron-logger -case "$ODL_RELEASE_BASE" in - carbon-snapshot|nitrogen-snapshot) - ODL_NETVIRT_KARAF_FEATURE=$ODL_NETVIRT_KARAF_FEATURE,odl-neutron-hostconfig-ovs - ;; -esac - -local localrc_file=$DEVSTACK_PATH/local.conf - -localrc_set $localrc_file "IS_GATE" "True" - -# Set here the ODL release to use for the Gate job -localrc_set $localrc_file "ODL_RELEASE" "${ODL_RELEASE}" - -# Set here which driver, v1 or v2 driver -localrc_set $localrc_file "ODL_V2DRIVER" "${ODL_V2DRIVER}" - -# Set timeout in seconds for http client to ODL neutron northbound -localrc_set $localrc_file "ODL_TIMEOUT" "60" - -# Set here which port binding controller -localrc_set $localrc_file "ODL_PORT_BINDING_CONTROLLER" "${ODL_PORT_BINDING_CONTROLLER}" - -# Set here which ODL openstack service provider to use -localrc_set $localrc_file "ODL_NETVIRT_KARAF_FEATURE" "${ODL_NETVIRT_KARAF_FEATURE}" - -# Switch to using the ODL's L3 implementation -localrc_set $localrc_file "ODL_L3" "True" - -# Since localrc_set adds it in reverse order, ODL_PROVIDER_MAPPINGS needs to be -# before depending variables - -if [[ "$ODL_GATE_SERVICE_PROVIDER" == "vpnservice" ]]; then - localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "public:br-ex" - localrc_set $localrc_file "PUBLIC_PHYSICAL_NETWORK" "public" - localrc_set $localrc_file "PUBLIC_BRIDGE" "br-ex" - localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "False" -else - localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "\${ODL_PROVIDER_MAPPINGS:-${ODL_MAPPING_KEY}:\${Q_PUBLIC_VETH_INT}}" - localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "True" - localrc_set $localrc_file "Q_PUBLIC_VETH_EX" "veth-pub-ex" - localrc_set $localrc_file "Q_PUBLIC_VETH_INT" "veth-pub-int" -fi - -# Enable debug logs for odl ovsdb -localrc_set $localrc_file "ODL_NETVIRT_DEBUG_LOGS" "True" - -localrc_set $localrc_file "RALLY_SCENARIO" "${RALLY_SCENARIO}" - -# delete and recreate network to workaroud netvirt bug: -# https://bugs.opendaylight.org/show_bug.cgi?id=7456 -# https://bugs.opendaylight.org/show_bug.cgi?id=8133 -if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] || [[ "$ODL_GATE_SERVICE_PROVIDER" == "vpnservice" ]]; then - cat <> $DEVSTACK_PATH/local.sh -#!/usr/bin/env bash - -sudo ifconfig br-ex 172.24.5.1/24 up -source $DEVSTACK_PATH/openrc admin -openstack router unset --external-gateway router1 -openstack port list --router router1 -c ID -f value | xargs -I {} openstack router remove port router1 {} -openstack router delete router1 -openstack subnet list | grep -e public -e private | cut -f2 -d'|' | xargs openstack subnet delete -openstack network list | grep -e public -e private | cut -f2 -d'|' | xargs openstack network delete -openstack network create public --external --provider-network-type=flat --provider-physical-network=public -openstack subnet create --network=public --subnet-range=172.24.5.0/24 --gateway 172.24.5.1 public-subnet -EOF - chmod 755 $DEVSTACK_PATH/local.sh -fi diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index c200bcc35..000000000 --- a/devstack/settings +++ /dev/null @@ -1,114 +0,0 @@ -# Devstack settings - -# Each service you enable has the following meaning: -# odl-neutron - Add this config flag if OpenDaylight controller and OpenStack -# Controller are on different nodes. -# odl-server - Add this config flag if OpenDaylight controller and OpenStack -# Controller are on the same node. -# odl-compute - Add this config flag for OpenStack Compute. -# -# odl-lightweight-testing - Add this config flag for testing neutron ODL ML2 -# driver and networking-odl without a real running -# OpenDaylight instance -# -# NOTE: odl-server includes odl-neutron. -# -# An example of enabling all-in-one ODL is below. -#enable_service odl-compute odl-server - -# This can be overridden in the localrc file -ODL_MODE=${ODL_MODE:-allinone} - -# ODL_MODE is used to configure how devstack works with OpenDaylight. You -# can configure this three ways: -# -# ODL_MODE=allinone -# Use this mode if you want to run ODL in this devstack instance. Useful -# for a single node deployment or on the control node of a multi-node -# devstack environment. -# -# ODL_MODE=compute -# Use this for the compute nodes of a multi-node devstack install. -# -# ODL_MODE=externalodl -# This installs the neutron code for ODL, but does not attempt to -# manage ODL in devstack. This is used for development environments -# similar to the allinone case except where you are using bleeding edge ODL -# which is not yet released, and thus don't want it managed by -# devstack. -# -# ODL_MODE=lightweight-testing -# Use this for testing neutron ML2 driver plus networking-odl without -# a running OpenDaylight instance. -# -# ODL_MODE=manual -# You're on your own here, and are enabling services outside the scope of -# the ODL_MODE variable. - -case $ODL_MODE in - allinone) - enable_service odl-server odl-compute - ;; - externalodl) - enable_service odl-neutron odl-compute - ;; - compute) - enable_service odl-compute - ;; - lightweight-testing) - enable_service odl-lightweight-testing - ;; - manual) - echo "Manual mode: Enabling services explicitly." - ;; -esac - - -IS_GATE=$(trueorfalse False IS_GATE) -if [[ "$IS_GATE" == "True" ]] -then - NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl} -fi - -# in tempest.conf -# [networking-feature-enabled] api-extensions -# api-extensions=all means any kind of extensions is enabled irrelevant of -# what plugin supports ML2 plugin with ODL driver supports only the following -# extensions, not all Those list must be maintained as ML2 plugin -# with ODL driver supports more extensions -if [[ -z "$NETWORK_API_EXTENSIONS" ]]; then - NETWORK_API_EXTENSIONS=address-scope - NETWORK_API_EXTENSIONS+=,agent - NETWORK_API_EXTENSIONS+=,allowed-address-pairs - NETWORK_API_EXTENSIONS+=,binding - NETWORK_API_EXTENSIONS+=,dhcp_agent_scheduler - NETWORK_API_EXTENSIONS+=,dns-integration - NETWORK_API_EXTENSIONS+=,dvr - NETWORK_API_EXTENSIONS+=,ext-gw-mode - NETWORK_API_EXTENSIONS+=,external-net - NETWORK_API_EXTENSIONS+=,extra_dhcp_opt - NETWORK_API_EXTENSIONS+=,extraroute - NETWORK_API_EXTENSIONS+=,flavors - NETWORK_API_EXTENSIONS+=,multi-provider - NETWORK_API_EXTENSIONS+=,net-mtu - NETWORK_API_EXTENSIONS+=,network-ip-availability - NETWORK_API_EXTENSIONS+=,pagination - NETWORK_API_EXTENSIONS+=,port-security - NETWORK_API_EXTENSIONS+=,project-id - NETWORK_API_EXTENSIONS+=,provider - NETWORK_API_EXTENSIONS+=,qos - NETWORK_API_EXTENSIONS+=,quotas - NETWORK_API_EXTENSIONS+=,rbac-policies - NETWORK_API_EXTENSIONS+=,router - NETWORK_API_EXTENSIONS+=,router-interface-fip - NETWORK_API_EXTENSIONS+=,security-group - NETWORK_API_EXTENSIONS+=,service-type - NETWORK_API_EXTENSIONS+=,sorting - NETWORK_API_EXTENSIONS+=,standard-attr-description - NETWORK_API_EXTENSIONS+=,standard-attr-revisions - NETWORK_API_EXTENSIONS+=,standard-attr-timestamp - NETWORK_API_EXTENSIONS+=,subnet_allocation - NETWORK_API_EXTENSIONS+=,tag - NETWORK_API_EXTENSIONS+=,timestamp_core - NETWORK_API_EXTENSIONS+=,vlan-transparent -fi diff --git a/devstack/settings.odl b/devstack/settings.odl deleted file mode 100644 index 05f737ca6..000000000 --- a/devstack/settings.odl +++ /dev/null @@ -1,134 +0,0 @@ -# Add here any global default values that apply for any ODL release -# ----------------------------------------------------------------- - -# What release to use. Choices are: -# https://wiki.opendaylight.org/view/Release_Plan -# -# latest-snapshot (master latest snapshot) -# nitrogen-snapshot-0.7 (master latest nitrogen snapshot) -# nitrogen-snapshot-0.7.0 (master) -# carbon-snapshot-0.6 (stable/carbon latest carbon snapshot) -# carbon-snapshot-0.6.2 (stable/carbon) -# carbon-0.6.1-SR1 -# carbon-0.6.0 -# boron-snapshot-0.5 (stable/boron latest boron snapshot) -# boron-snapshot-0.5.5 (stable/boron) -# boron-0.5.2-SR4 -# boron-0.5.2-SR3 -# boron-0.5.2-SR2 -# boron-0.5.1-SR1 -# boron-0.5.0 - -ODL_RELEASE=${ODL_RELEASE:-latest-snapshot} - -# The IP address of ODL. Set this in local.conf. - -#Set ODL_MGR_HOST to ODL_MGR_IP if ODL_MGR_HOST is not set -ODL_MGR_HOST=${ODL_MGR_HOST:-$ODL_MGR_IP} - -# Set ODL_MGR_HOST to SERVICE_HOST if neither ODL_MGR_HOST nor ODL_MGR_IP is set -ODL_MGR_HOST=${ODL_MGR_HOST:-$SERVICE_HOST} - -# The list of IP addresses used as OVS manager, separated by a comma. -# In non-clustering cases, this is normally the same as ODL_MGR_HOST. However, -# for HA deployments the southbound portion to ODL is expected to -# use the ip addresses of the ODL instances instead of a single vip. That -# enables OVS to simultaneously connect to more than one ODL instance. -# Example of expected format: ODL_OVS_MANAGERS=1.1.1.1,2.2.2.2,3.3.3.3 -ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS:-$ODL_MGR_HOST} - -# The default ODL port for Jetty to use -# NOTE: We make this configurable because by default, ODL uses port 8080 for -# Jetty, and this conflicts with swift which also uses port 8080. -ODL_PORT=${ODL_PORT:-8087} - -# The ODL endpoint URL -ODL_ENDPOINT=${ODL_ENDPOINT:-http://${ODL_MGR_HOST}:${ODL_PORT}/controller/nb/v2/neutron} - -# The ODL username -ODL_USERNAME=${ODL_USERNAME:-admin} - -# The ODL password -ODL_PASSWORD=${ODL_PASSWORD:-admin} - -# The http timeout in seconds for http client to ODL neutron northbound. -# unset or empty string means default. -ODL_TIMEOUT=${ODL_TIMEOUT:-""} - -# use v2 type driver -# this requires post mitaka -ODL_V2DRIVER=${ODL_V2DRIVER:-True} - -# The OpenDaylight URL PREFIX -ODL_URL_PREFIX=${ODL_URL_PREFIX:-https://nexus.opendaylight.org} - -# OpenDaylight snapshot & release repositories paths -# Can be overridden in case you host proxy repositories which have a different directory structure than OpenDaylight's -ODL_URL_SNAPSHOT_REPOSITORY_PATH=${ODL_URL_SNAPSHOT_REPOSITORY_PATH:-content/repositories/opendaylight.snapshot} -ODL_URL_RELEASE_REPOSITORY_PATH=${ODL_URL_RELEASE_REPOSITORY_PATH:-content/repositories/opendaylight.release} - -# How long (in seconds) to pause after ODL starts to let it complete booting -ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-600} - -# Enable conntrack support for legacy netvirt -ODL_LEGACY_NETVIRT_CONNTRACK=${ODL_LEGACY_NETVIRT_CONNTRACK:-False} - -# Enable OpenDaylight l3 forwarding -ODL_L3=${ODL_L3:-False} - -# If you need to route the traffic out of the box, set -# ODL_PROVIDER_MAPPINGS to map br-ex as shown below. Note -# This used to be accomplished via PUBLIC_BRIDGE, but that -# is no longer necessary. -# -# The physical provider network to device mapping. Use this -# to instruct ODL to map ports into specific bridges -# Examples: -# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2} -# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth1,br-ex:eth2} - -# MAC address for next hop gateway at external network -ODL_L3GW_MAC=${ODL_L3GW_MAC:-''} - -# Enable debug logs for odl ovsdb -ODL_NETVIRT_DEBUG_LOGS=${ODL_NETVIRT_DEBUG_LOGS:-False} - -# Karaf logfile information -ODL_KARAF_LOG_DATE=$(date +%Y-%m-%d-%H%M%S) -ODL_KARAF_LOG_BASE=${ODL_KARAF_LOG_BASE:-screen-karaf.log} -ODL_KARAF_LOG_NAME=$ODL_KARAF_LOG_BASE.$ODL_KARAF_LOG_DATE - -# The bridge to configure -OVS_BR=${OVS_BR:-br-int} - -# Use the existing ready java env -ODL_USING_EXISTING_JAVA=${ODL_USING_EXISTING_JAVA:-False} - -# Allow the min/max/perm Java memory to be configurable -ODL_JAVA_MIN_MEM=${ODL_JAVA_MIN_MEM:-256m} -ODL_JAVA_MAX_MEM=${ODL_JAVA_MAX_MEM:-512m} -ODL_JAVA_MAX_PERM_MEM=${ODL_JAVA_MAX_PERM_MEM:-512m} - -# Interval in test_with_retry calls -ODL_RETRY_SLEEP_INTERVAL=${ODL_RETRY_SLEEP_INTERVAL:-5} - -# Skip installation of distribution provided Open vSwitch -SKIP_OVS_INSTALL=$(trueorfalse False SKIP_OVS_INSTALL) - -# The ODL Restconf URL -# URI to hostconfigs: empty for default value -ODL_HOSTCONF_URI=${ODL_HOSTCONF_URI:-} - -# Port binding controller -# pseudo-agentdb-binding, legacy-port-binding -# pseudo-agentdb-binding is supported by Boron or later -ODL_PORT_BINDING_CONTROLLER=${ODL_PORT_BINDING_CONTROLLER:-pseudo-agentdb-binding} - -# Snapshot version - allows using a specific version e.g. 0.5.0-20160719.101233-3643 -# latest: check the latest snapshot -# specific version: the specific version of the snapshot -# "": odl release -ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-} - -# Set to True to keep odl running after unstack -UNSTACK_KEEP_ODL=${UNSTACK_KEEP_ODL:-False} diff --git a/devstack/setup_java.sh b/devstack/setup_java.sh deleted file mode 100644 index 9c5d28231..000000000 --- a/devstack/setup_java.sh +++ /dev/null @@ -1,207 +0,0 @@ -#!/bin/bash - -ORACLE_JAVA_URL="http://download.oracle.com/otn-pub/java/jdk" -ORACLE_JAVA7_URL="${ORACLE_JAVA7_URL:-$ORACLE_JAVA_URL/7u80-b15/jdk-7u80}" -ORACLE_JAVA7_NAME="jdk1.7.0_80" -ORACLE_JAVA8_URL="${ORACLE_JAVA8_URL:-$ORACLE_JAVA_URL/8u112-b15/jdk-8u112}" -ORACLE_JAVA8_NAME="jdk1.8.0_112" - -function setup_java { - # Java version 8 is the last stable one - local VERSION="${1:-8}" - - echo "Setup Java version: $VERSION" - if test_java_version "$VERSION" && setup_java_env; then - echo "Current Java version is already $VERSION." - elif select_java "$VERSION"; then - echo "Java version $VERSION has been selected." - elif install_openjdk "$VERSION" && select_java "$VERSION"; then - echo "OpenJDK version $VERSION has been installed and selected." - elif install_other_java "$VERSION" && select_java "$VERSION"; then - echo "Some Java version $VERSION has been installed and selected." - else - echo "ERROR: Unable to setup Java version $VERSION." - return 1 - fi - - return 0 -} - -function setup_java_env { - local JAVA_COMMAND="${1:-${JAVA:-java}}" - - JAVA_LINK="$(which $JAVA_COMMAND)" - if [[ "$JAVA_LINK" == "" ]]; then - return 1 - fi - - export JAVA="$(readlink -f $JAVA_LINK)" - export JAVA_HOME=$(echo $JAVA | sed "s:/bin/java::" | sed "s:/jre::") - if [ "$JAVA" != "$(readlink -f $(which java))" ]; then - export PATH="$(dirname $JAVA):$PATH" - if [ "$JAVA" != "$(readlink -f $(which java))" ]; then - echo "Unable to set $JAVA as current." - return 1 - fi - fi - - echo "JAVA is: $JAVA" - echo "JAVA_HOME is: $JAVA_HOME" - echo "Java version is:" - $JAVA -version 2>&1 -} - -function select_java { - local VERSION="$1" - local COMMAND - - for COMMAND in $(list_java_commands); do - if test_java_version "$VERSION" "$COMMAND"; then - if setup_java_env "$COMMAND"; then - return 0 - fi - fi - done - - echo 'Required java version not found.' - return 1 -} - -function test_java_version { - local EXPECTED_VERSION="'"*' version "1.'$1'.'*'"'"'" - local COMMAND="${2:-${JAVA:-java}}" - local ACTUAL_VERSION="'"$($COMMAND -version 2>&1 | head -n 1)"'" - - if [[ $ACTUAL_VERSION == $EXPECTED_VERSION ]]; then - echo "Found matching java version: $ACTUAL_VERSION" - return 0 - else - return 1 - fi -} - -if is_ubuntu; then - # --- Ubuntu ------------------------------------------------------------- - - function list_java_commands { - update-alternatives --list java - } - - function install_openjdk { - local REQUIRED_VERSION="$1" - apt_get install "openjdk-$REQUIRED_VERSION-jre-headless" - } - - function install_other_java { - local VERSION="$1" - local PPA_REPOSITORY="ppa:webupd8team/java" - local JAVA_INSTALLER="oracle-java${VERSION}-installer" - local JAVA_SET_DEFAULT="oracle-java${VERSION}-set-default" - - # Accept installer license - echo "$JAVA_INSTALLER" shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections - - # Remove all existing set-default versions - apt_get remove oracle-java*-set-default - if apt_get install $JAVA_INSTALLER ; then - if apt_get install $JAVA_SET_DEFAULT ; then - return 0 # Some PPA was already providing desired packages - fi - fi - - # Add PPA only when package is not available - if apt_get install software-properties-common; then - # I pipe this after echo to emulate an user key-press - if echo | sudo -E add-apt-repository "$PPA_REPOSITORY"; then - if apt_get update; then - if apt_get install $JAVA_INSTALLER ; then - if apt_get install $JAVA_SET_DEFAULT ; then - return 0 - fi - fi - fi - fi - fi - - # Something has gone wrong! - return 1 - } - -else - # --- Red Hat ------------------------------------------------------------- - - function list_java_commands { - alternatives --display java 2>&1 | grep -v '^[[:space:]]' | awk '/[[:space:]]- priority[[:space:]]/{print $1}' - } - - function install_openjdk { - local VERSION="$1" - yum_install java-1.$VERSION.*-openjdk-headless - } - - function install_other_java { - local VERSION="$1" - - if [[ "$(uname -m)" == "x86_64" ]]; then - local ARCH=linux-x64 - else - local ARCH=linux-i586 - fi - - if [[ "$VERSION" == "7" ]]; then - ORIGIN=$ORACLE_JAVA7_URL - TARGET=$ORACLE_JAVA7_NAME - elif [[ "$VERSION" == "8" ]]; then - ORIGIN=$ORACLE_JAVA8_URL - TARGET=$ORACLE_JAVA8_NAME - else - echo "Unsupported Java version: $VERSION." - return 1 - fi - - local NEW_JAVA="/usr/java/$TARGET/jre/bin/java" - if test_java_version "$VERSION" "$NEW_JAVA"; then - if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then - return 0 - fi - fi - - local EXT - local WGET_OPTIONS="-c --no-check-certificate --no-cookies" - local HEADER="Cookie: oraclelicense=accept-securebackup-cookie" - - for EXT in "rpm" "tar.gz"; do - local URL="$ORIGIN-$ARCH.$EXT" - local PACKAGE="/tmp/$(basename $URL)" - - if wget $WGET_OPTIONS --header "$HEADER" "$URL" -O "$PACKAGE"; then - case "$EXT" in - "rpm") - sudo rpm -i "$PACKAGE" - ;; - "tar.gz") - sudo mkdir -p /usr/java && sudo tar -C /usr/java -xzf "$PACKAGE" - ;; - *) - echo "Unsupported extension: $EXT" - ;; - esac - - if test_java_version "$VERSION" "$NEW_JAVA"; then - if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then - return 0 - fi - fi - - echo "Unable to register installed java." - - else - echo "Unable to download java archive: $URL" - fi - - done - - return 1 - } - -fi diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh deleted file mode 100755 index e69de29bb..000000000 diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings deleted file mode 100644 index efb3bf07a..000000000 --- a/devstack/upgrade/settings +++ /dev/null @@ -1,34 +0,0 @@ -register_project_for_upgrade networking-odl - -# NOTE(manjeets) Workaround for bug 1648176 to upgrade -# networking-odl before neutron -UPGRADE_PROJECTS="networking-odl ${UPGRADE_PROJECTS/ networking-odl/}" - -# Add karaf features to be enabled for ODL -ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-mdsal-apidocs -ODL_NETVIRT_KARAF_FEATURE+=,odl-l2switch-switch,odl-netvirt-openstack - -# for base it should be enabling recent stable/release -devstack_localrc base enable_plugin networking-odl http://github.com/openstack/networking-odl.git stable/ocata - -devstack_localrc target enable_plugin networking-odl http://github.com/openstack/networking-odl.git - -for w in base target; do - devstack_localrc $w disable_service q-agt - devstack_localrc $w disable_service q-l3 - devstack_localrc $w enable_service q-dhcp - devstack_localrc $w enable_service q-meta - devstack_localrc $w enable_service placement-api - devstack_localrc $w enable_service placement-client - devstack_localrc $w Q_PLUGIN=ml2 - devstack_localrc $w ODL_CONFIG_BRIDGES=True - devstack_localrc $w ODL_L3=True - devstack_localrc $w ODL_V2DRIVER=True - devstack_localrc $w Q_ML2_PLUGIN_TYPE_DRIVERS=flat,vlan,gre,vxlan - devstack_localrc $w Q_USE_PUBLIC_VETH=True - devstack_localrc $w Q_PUBLIC_VETH_EX=veth-pub-ex - devstack_localrc $w Q_PUBLIC_VETH_INT=veth-pub-int - devstack_localrc $w ODL_RELEASE=carbon-snapshot-0.6 - devstack_localrc $w ODL_PROVIDER_MAPPINGS=public:${Q_PUBLIC_VETH_INT} - devstack_localrc $w ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE} -done diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh deleted file mode 100755 index e3c7027b9..000000000 --- a/devstack/upgrade/upgrade.sh +++ /dev/null @@ -1,23 +0,0 @@ -echo "*********************************************************************" -echo "Begin $0" -echo "*********************************************************************" - -set -o xtrace - -# Set for DevStack compatibility - -source $GRENADE_DIR/grenaderc -source $TARGET_DEVSTACK_DIR/stackrc - -# Get functions from current DevStack -source $TARGET_DEVSTACK_DIR/inc/python - -NETWORKING_ODL_DIR="$TARGET_RELEASE_DIR/networking-odl" - -setup_develop $NETWORKING_ODL_DIR - -set +x -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End $0" -echo "*********************************************************************" diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 2f61fd28f..000000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -==================== -Administration Guide -==================== - -.. toctree:: - :maxdepth: 2 - :glob: - - * diff --git a/doc/source/admin/reference_architecture.rst b/doc/source/admin/reference_architecture.rst deleted file mode 100644 index 46473fc0b..000000000 --- a/doc/source/admin/reference_architecture.rst +++ /dev/null @@ -1,116 +0,0 @@ -Reference Architecture -====================== -This document lists the minimum reference architecture to get OpenStack -installed with OpenDayLight. Wherever possible, additional resources will be -stated. - -Cloud Composition ------------------ -The basic cloud will have 3 types of nodes: - -* Controller Node - Runs OpenStack services and the ODL controller. -* Network Node - Runs the DHCP agent, the metadata agent, and the L3 agent (for - SNAT). -* Compute Node - VMs live here. - -Usually each of the first 2 types of nodes will have a cluster of 3 nodes to -support HA. It's also possible to run the ODL controller on separate hardware -than the OpenStack services, but this isn't mandatory. - -The last type of nodes can have as many nodes as scale requirements dictate. - -Networking Requirements ------------------------ -There are several types of networks on the cloud, the most important for the -reference architecture are: - -* Management Network - This is the network used to communicate between the - different management components, i.e. Nova controller to Nova agent, Neutron - to ODL, ODL to OVS, etc. -* External Network - This network provides VMs with external connectivity (i.e. - internet) usually via virtual routers. -* Data Network - This is the network used to connect the VMs to each other and - to network resources such as virtual routers. - -The Control Nodes usually are only connected to the Management Network, unless -they have an externally reachable IP on the External Network. - -The other node types are connected to all the networks since ODL uses a -distributed routing model so that each Compute Node hosts a "virtual router" -responsible for connecting the VMs from that node to other networks (including -the External Network). - -This diagram illustrates how these nodes might be connected:: - - Controller Node - +-----------------+ - | | - +-----------+192.168.0.251 | - | | | - | +-----------------+ - | - | Compute Node +----------------+ - | +---------------+ | Legend | - | | | +----------------+ - +-----------+192.168.0.1 | | | - | | | | --- Management | - | +~~~~~~~~~+10.0.0.1 | | | - | | | | | ~~~ Data | - | | +=======+br-int | | | - | | | | | | === External | - | | | +---------------+ | | - | | | +----------------+ - | | | Network Node - | | | +-----------------+ - | | | | | - +-----------+192.168.0.100 | - | | | | - +~~~~~~~~~+10.0.0.100 | - | | | - |=======+br-int | - | | | - | +-----------------+ - +----+---+ - | | - | Router | - | | - +--------+ - - -Minimal Hardware Requirements ------------------------------ -The rule of thumb is the bigger the better, more RAM and more cores will -translate to a better environment. For a POC environment the following is -necessary: - -Management Node -~~~~~~~~~~~~~~~ -CPU: 2 cores - -Memory: 8 GB - -Storage: 100 GB - -Network: 1 * 1 Gbps NIC - -Network Node -~~~~~~~~~~~~ -CPU: 2 cores - -Memory: 2 GB - -Storage: 50 GB - -Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs - - -Compute Node -~~~~~~~~~~~~ -CPU: 2+ cores - -Memory: 8+ GB - -Storage: 100 GB - -Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs - diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 5cc53a3d8..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - #'sphinx.ext.intersphinx', - 'openstackdocstheme', - 'oslo_config.sphinxext', -] - -# openstackdocstheme options -repository_name = 'openstack/networking-odl' -bug_project = 'networking-odl' -bug_tag = 'doc' -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'networking-odl' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index b5bb41d01..000000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,11 +0,0 @@ - -Configuration options -===================== - -Networking-odl uses the following configuration options -in the Neutron server configuration, which is typically -`/etc/neutron/neutron.conf`. - -.. show-options:: - - ml2_odl diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index 2aa070771..000000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -============ -Contributing -============ -.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/contributor/drivers_architecture.rst b/doc/source/contributor/drivers_architecture.rst deleted file mode 100644 index ce9384c57..000000000 --- a/doc/source/contributor/drivers_architecture.rst +++ /dev/null @@ -1,89 +0,0 @@ -ODL Drivers Architecture -======================== - -This document covers architectural concepts of the ODL drivers. Although -'driver' is an ML2 term, it's used widely in ODL to refer to any -implementation of APIs. Any mention of ML2 in this document is solely for -reference purposes. - -V1 Driver Overview ------------------- - -The first driver version was a naive implementation which synchronously -mirrored all calls to the ODL controller. For example, a create network request -would first get written to the DB by Neutron's ML2 plugin, and then the ODL -driver would send the request to POST the network to the ODL controller. - -Although this implementation is simple, it has a few problems: - -* ODL is not really synchronous, so if the REST call succeeds it doesn't mean - the action really happened on ODL. -* The "synchronous" call can be a bottleneck under load. -* Upon failure the V1 driver would try to "full sync" the entire Neutron DB - over on the next call, so the next call could take a very long time. -* It doesn't really handle race conditions: - - - For example, create subnet and then create port could be sent in parallel - by the driver in an HA Neutron environment, causing the port creation to - fail. - - Full-sync could possibly recreate deleted resources if the deletion happens - in parallel. - -.. _v2_design: - -V2 Driver Design ----------------- - -The V2 driver set upon to tackle problems encountered in the V1 driver while -maintaining feature parity. -The major design concept of the V2 driver is *journaling* - instead of passing -the calls directly to the ODL controller, they get registered -in the journal table which keeps a sort of queue of the various operations that -occurred on Neutron and should be mirrored to the controller. - -The journal is processed mainly by a journaling thread which runs periodically -and checks if the journal table has any entries in need of processing. -Additionally the thread is triggered in the postcommit hook of the operation -(where applicable). - -If we take the example of create network again, after it gets stored in the -Neutron DB by the ML2 plugin, the ODL driver stores a "journal entry" -representing that operation and triggers the journalling thread to take care of -the entry. - -The journal entry is recorded in the pre-commit phase (whenever applicable) so -that in case of a commit failure the journal entry gets aborted along with the -original operation, and there's nothing extra needed. - -Journal Entry Lifecycle ------------------------ - -The first state in which a journal entry is created is the 'pending' state. In -this state, the entry is awaiting a thread to pick it up and process it. -Multiple threads can try to grab the same journal entry, but only one will -succeed since the "selection" is done inside a 'select for update' clause. -Special care is taken for GaleraDB since it reports a deadlock if more than -one thread selects the same row simultaneously. - -Once an entry has been selected it will be put into the 'processing' state -which acts as a lock. This is done in the same transaction so that in case -multiple threads try to "lock" the same entry only one of them will succeed. -When the winning thread succeeds it will continue with processing the entry. - -The first thing the thread does is check for dependencies - if the entry -depends on another one to complete. If a dependency is found, the entry is put -back into the queue and the thread moves on to the next entry. - -When there are no dependencies for the entry, the thread analyzes the operation -that occurred and performs the appropriate call to the ODL controller. The call -is made to the correct resource or collection and the type of call (PUT, POST, -DELETE) is determined by the operation type. At this point if the call was -successful (i.e. got a 200 class HTTP code) the entry is marked 'completed'. - -In case of a failure the thread determines if this is an expected failure (e.g. -network connectivity issue) or an unexpected failure. For unexpected failures -a counter is raised, so that a given entry won't be retried more than a given -amount of times. Expected failures don't change the counter. If the counter -exceeds the configured amount of retries, the entry is marked as 'failed'. -Otherwise, the entry is marked back as 'pending' so that it can later be -retried. diff --git a/doc/source/contributor/hostconfig.rst b/doc/source/contributor/hostconfig.rst deleted file mode 100644 index b344a09e7..000000000 --- a/doc/source/contributor/hostconfig.rst +++ /dev/null @@ -1,148 +0,0 @@ -Host Configuration -================== - -Overview --------- - -ODL is agentless configuration. In this scenario Host Configuration is used -to specify the physical host type and other configurations for the host -system. This information is populated by the Cloud Operator is in OVSDB in -Open_vSwitch configuration data in the external_ids field as a key value pair. -This information is then read by ODL and made available to networking-odl -through REST API. Networking-odl populates this information in agent_db in -Neutron and is then used by Neutron scheduler. This information is required -for features like Port binding and Router scheduling. - -Refer to this link for detailed design for this feature. - -https://docs.google.com/presentation/d/1kq0elysCDEmIWs3omTi5RoXTSBbrewn11Je2d26cI4M/edit?pref=2&pli=1#slide=id.g108988d1e3_0_6 - -Related ODL changes: - -https://git.opendaylight.org/gerrit/#/c/36767/ - -https://git.opendaylight.org/gerrit/#/c/40143/ - -Host Configuration fields -------------------------- - -- **host-id** - - This represents host identification string. This string will be stored in - external_ids field with the key as odl_os_hostconfig_hostid. - Refer to Neutron config definition for host field for details on this field. - - http://docs.openstack.org/kilo/config-reference/content/section_neutron.conf.html - -- **host-type** - - The field is for type of the node. This value corresponds to agent_type in - agent_db. Example value are “ODL L2” and “ODL L3” for Compute and Network - node respectively. Same host can be configured to have multiple - configurations and can therefore can have both L2, L3 and other - configurations at the same time. This string will be populated by ODL based - on the configurations available on the host. See example in section below. - -- **config** - - This is the configuration data for the host type. Since same node can be - configured to store multiple configurations different external_ids key value - pair are used to store these configuration. The external_ids with keys as - odl_os_hostconfig_config_odl_XXXXXXXX store different configurations. - 8 characters after the suffix odl_os_hostconfig_config_odl are host type. - ODL extracts these characters and store that as the host-type fields. For - example odl_os_hostconfig_config_odl_l2, odl_os_hostconfig_config_odl_l3 keys - are used to provide L2 and L3 configurations respectively. ODL will extract - "ODL L2" and "ODL L3" as host-type field from these keys and populate - host-type field. - -Config is a Json string. Some examples of config: - -OVS configuration example:: - - {“supported_vnic_types”: [{ - “vnic_type”: “normal”, - “vif_type”: “ovs”, - “vif_details”: “{}” - }] - “allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”, - “bridge_mappings”: {“physnet1":"br-ex”} - }" - -OVS_DPDK configuration example:: - - {“supported_vnic_types”: [{ - “vnic_type”: “normal”, - “vif_type”: “vhostuser”, - “vif_details”: { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "vhu_", - # Assumption: /var/run mounted as tmpfs - "vhostuser_socket_dir": "/var/run/openvswitch", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "client", - "vhostuser_socket": "/var/run/openvswitch/vhu_$PORT_ID"} - }] - “allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”, - “bridge_mappings”: {“physnet1":"br-ex”} - }" - -VPP configuration example:: - - { {"supported_vnic_types": [ - {"vnic_type": "normal", - "vif_type": “vhostuser”, - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "socket_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "server", - "vhostuser_socket": "/tmp/socket_$PORT_ID" - }}], - "allowed_network_types": ["local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}} - -**Host Config URL** - -Url : http://ip:odlport/restconf/operational/neutron:neutron/hostconfigs/ - -**Commands to setup host config in OVSDB** -:: - - export OVSUUID=$(ovs-vsctl get Open_vSwitch . _uuid) - ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_hostid=test_host - ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_config_odl_l2 = - "{“supported_vnic_types”: [{“vnic_type”: “normal”, “vif_type”: “ovs”, "vif_details": {} }], “allowed_network_types”: [“local”], “bridge_mappings”: {“physnet1":"br-ex”}}" - -Example for host configuration -------------------------------- - -:: - - { - "hostconfigs": { - "hostconfig": [ - { - "host-id": "test_host1", - "host-type": "ODL L2", - "config": - "{“supported_vnic_types”: [{ - “vnic_type”: “normal”, - “vif_type”: “ovs”, - “vif_details”: {} - }] - “allowed_network_types”: ["local", "gre", "vlan", "vxlan"], - “bridge_mappings”: {“physnet1":"br-ex”}}" - }, - { - "host-id": "test_host2", - "host-type": "ODL L3", - "config": {} - }] - } - } diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 66d02704a..000000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,39 +0,0 @@ -Contributor Guide -================= - -In the Developer/Contributor Guide, you will find information on -networking-odl's lower level design and implementation details. -We will cover only essential details related to just networking-odl -and we won't repeat neutron devref here, for details in neutron, -neutron's devref can be checked: -https://docs.openstack.org/neutron/latest/contributor/index.html - -For details regarding OpenStack Neutron's Api: -https://developer.openstack.org/api-ref/networking/ - -Contributor's Reference ------------------------ -.. toctree:: - :maxdepth: 2 - - testing - drivers_architecture - maintenance - usage - contributing - specs/index - -Tutorial --------- -.. toctree:: - :maxdepth: 2 - - quickstart.rst - - -Networking OpenDayLight Internals ---------------------------------- -.. toctree:: - :maxdepth: 2 - - hostconfig diff --git a/doc/source/contributor/maintenance.rst b/doc/source/contributor/maintenance.rst deleted file mode 100644 index ae394798a..000000000 --- a/doc/source/contributor/maintenance.rst +++ /dev/null @@ -1,44 +0,0 @@ -Journal Maintenance -=================== - -Overview --------- - -The V2 ODL driver is Journal based [#]_, which means that there's a journal of -entries detailing the various operations done on a Neutron resource. -The driver has a thread which is in charge of processing the journal of -operations which entails communicating the operation forward to the ODL -controller. - -The journal entries can wind up in several states due to various reasons: - -* PROCESSING - Stale lock left by a thread due to thread dying or other error -* COMPLETED - After the operation is processed successfully -* FAILED - If there was an unexpected error during the operation - -These journal entries need to be dealt with appropriately, hence a maintenance -thread was introduced that takes care of journal maintenance and other related -tasks. -This thread runs in a configurable interval and is HA safe using a shared state -kept in the DB. - -Currently the maintenance thread performs: - -* Stale lock release -* Completed entries clean up -* Failed entries are handled by the recovery mechanism -* Full sync detect when ODL is "tabula rasa" and syncs all the resources to it - -Creating New Maintenance Operations ------------------------------------ - -Creating a new maintenance operation is as simple as writing a function -that receives the database session object and registering it using a call to:: - - MaintenanceThread.register_operation - -The best place to do so would be at the _start_maintenance_thread method of -the V2 OpenDaylightMechanismDriver class. - -.. [#] See :ref:`v2_design` for details. - diff --git a/doc/source/contributor/quickstart.rst b/doc/source/contributor/quickstart.rst deleted file mode 100644 index 10536c7ac..000000000 --- a/doc/source/contributor/quickstart.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _quickstart: - -===================== -Developer Quick-Start -===================== - -This is a quick walkthrough to get you started developing code for -networking-odl. This assumes you are already familiar with submitting code -reviews to an OpenStack project. - -.. see also:: - - http://docs.openstack.org/infra/manual/developers.html - -Setup Dev Environment -===================== - -Install OS-specific prerequisites:: - - # Ubuntu/Debian 14.04: - sudo apt-get update - sudo apt-get install -y python-dev libssl-dev libxml2-dev curl \ - libmysqlclient-dev libxslt1-dev libpq-dev git \ - libffi-dev gettext build-essential - - # CentOS/RHEL 7.2: - sudo yum install -y python-devel openssl-devel mysql-devel curl \ - libxml2-devel libxslt-devel postgresql-devel git \ - libffi-devel gettext gcc - - # openSUSE/SLE 12: - sudo zypper --non-interactive install git libffi-devel curl \ - libmysqlclient-devel libopenssl-devel libxml2-devel \ - libxslt-devel postgresql-devel python-devel \ - gettext-runtime - -Install pip:: - - curl -s https://bootstrap.pypa.io/get-pip.py | sudo python - -Install common prerequisites:: - - sudo pip install virtualenv flake8 tox testrepository git-review - -You may need to explicitly upgrade virtualenv if you've installed the one -from your OS distribution and it is too old (tox will complain). You can -upgrade it individually, if you need to:: - - sudo pip install -U virtualenv - -Networking-odl source code should be pulled directly from git:: - - # from your home or source directory - cd ~ - git clone https://git.openstack.org/openstack/networking-odl - cd networking-odl - - -For installation of networking-odl refer to :doc:`/install/index`. -For testing refer to :doc:`Testing ` guide. - -Verifying Successful Installation -================================== - -There are some checks you can run quickly to verify that networking-odl -has been installed sucessfully. - -#. Neutron agents must be in runing state, if you are using pseudo-agent - for port binding then output of **openstack network agent list** should - be something like:: - - ubuntu@ubuntu-14:~/devstack$ openstack network agent list - +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ - | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | - +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ - | 00628905-6550-43a5-9cda- | ODL L2 | ubuntu-14 | None | True | UP | neutron-odlagent- | - | 175a309ea538 | | | | | | portbinding | - | 37491134-df2a- | DHCP agent | ubuntu-14 | nova | True | UP | neutron-dhcp-agent | - | 45ab-8373-e186154aebee | | | | | | | - | 8e0e5614-4d68-4a42-aacb- | Metadata agent | ubuntu-14 | None | True | UP | neutron-metadata-agent | - | d0a10df470fb | | | | | | | - +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ - - Your output of this command may vary depending on the your environment, - for example hostname etc. - -#. You can check that opendaylight is running by executing following - command:: - - ubuntu@ubuntu-14:~/devstack$ ps -eaf | grep opendaylight - - - -Launching Instance and floating IP -================================== - -#. Gather paramters required for launching instance. We need flavor Id, - image Id and network id, following comand can be used for launching an - instance:: - - openstack server create --flavor --image \ - --nic net-id= --security-group \ - \ - - - For details on creating instances refer to [#third]_ and - [#fourth]_. - -#. Attaching floating IPs to created server can be done by following command:: - - openstack server add floating ip - - For details on attaching floating IPs refer to [#fifth]_. - - -Useful Commands -================ - -#. For verifying status try following command:: - - ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./karaf status - - You should receive following output:: - - Running ... - -#. You can login using available client:: - - ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./client - - You will receive output in following format:: - - Logging in as karaf - 3877 [sshd-SshClient[6dbb137d]-nio2-thread-3] WARN org.apache.sshd.client.keyverifier.AcceptAllServerKeyVerifier - Server at [/0.0.0.0:8101, RSA, 56:41:48:1c:38:3b:73:a8:a5:96:8e:69:a5:4c:93:e0] presented unverified {} key: {} - ________ ________ .__ .__ .__ __ - \_____ \ ______ ____ ____ \______ \ _____ ___.__.| | |__| ____ | |___/ |_ - / | \\____ \_/ __ \ / \ | | \\__ \< | || | | |/ ___\| | \ __\ - / | \ |_> > ___/| | \| ` \/ __ \\___ || |_| / /_/ > Y \ | - \_______ / __/ \___ >___| /_______ (____ / ____||____/__\___ /|___| /__| - \/|__| \/ \/ \/ \/\/ /_____/ \/ - - Hit '' for a list of available commands - and '[cmd] --help' for help on a specific command. - Hit '' or type 'system:shutdown' or 'logout' to shutdown OpenDaylight. - - Now you can run commands as per your for example:: - - opendaylight-user@root>subnet-show - No SubnetOpData configured. - Following subnetId is present in both subnetMap and subnetOpDataEntry - - - - Following subnetId is present in subnetMap but not in subnetOpDataEntry - - Uuid [_value=2131f292-732d-4ba4-b74e-d70c07eceeb4] - - Uuid [_value=7a03e5d8-3adb-4b19-b1ec-a26691a08f26] - - Uuid [_value=7cd269ea-e06a-4aa3-bc11-697d71be4cbd] - - Uuid [_value=6da591bc-6bba-4c8a-a12b-671265898c4f] - - - Usage 1: To display subnetMaps for a given subnetId subnet-show --subnetmap [] - - Usage 2: To display subnetOpDataEntry for a given subnetId subnet-show --subnetopdata [] - - To get help on some command:: - - opendaylight-user@root>help feature - COMMANDS - info Shows information about selected feature. - install Installs a feature with the specified name and version. - list Lists all existing features available from the defined repositories. - repo-add Add a features repository. - repo-list Displays a list of all defined repositories. - repo-refresh Refresh a features repository. - repo-remove Removes the specified repository features service. - uninstall Uninstalls a feature with the specified name and version. - version-list Lists all versions of a feature available from the currently available repositories. - - There are other helpfull commands, for example, log:tail, log:set, shutdown - to get tail of logs, set log levels and shutdown. - - For checking neutron bundle is installed:: - - opendaylight-user@root>feature:list -i | grep neutron - odl-neutron-service | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API - odl-neutron-northbound-api | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Northbound - odl-neutron-spi | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API - odl-neutron-transcriber | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Implementation - odl-neutron-logger | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Logger - - For checking netvirt bundle is installed:: - - opendaylight-user@root>feature:list -i | grep netvirt - odl-netvirt-api | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: api - odl-netvirt-impl | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: impl - odl-netvirt-openstack | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: OpenStack - - -#. For exploration of API's following links can be used:: - - API explorer: - http://localhost:8080/apidoc/explorer - - Karaf: - http://localhost:8181/apidoc/explorer/index.html - - Detailed information can be found [#sixth]_. - -.. rubric:: References - -.. [#third] https://docs.openstack.org/mitaka/install-guide-rdo/launch-instance-selfservice.html -.. [#fourth] https://docs.openstack.org/draft/install-guide-rdo/launch-instance.html -.. [#fifth] https://docs.openstack.org/user-guide/cli-manage-ip-addresses.html -.. [#sixth] https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf_API_Explorer diff --git a/doc/source/contributor/specs/index.rst b/doc/source/contributor/specs/index.rst deleted file mode 100644 index 0f5dee1d0..000000000 --- a/doc/source/contributor/specs/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. networking-odl specs documentation index - -============== -Specifications -============== - -Pike specs -========== - -.. toctree:: - :glob: - :maxdepth: 1 - - pike/* - -Ocata specs -=========== - -.. toctree:: - :glob: - :maxdepth: 1 - - ocata/* - -Newton specs -============ - -.. toctree:: - :glob: - :maxdepth: 1 - - newton/* - diff --git a/doc/source/contributor/specs/newton/qos-driver.rst b/doc/source/contributor/specs/newton/qos-driver.rst deleted file mode 100644 index c95d5c37e..000000000 --- a/doc/source/contributor/specs/newton/qos-driver.rst +++ /dev/null @@ -1,104 +0,0 @@ -========================================== -Quality of Service Driver for OpenDaylight -========================================== - -This spec describes the plan to implement quality of service driver for -OpenDaylight Controller. - -Problem Statement -================= -OpenStack networking project (neutron [1]) have a extension plugin implemented -and which expose api for quality of service that can be also be implemented by -any backend networking service provider to support QoS. These APIs provide a -way to integrate OpenStack Neutron QoS with any of the backend QoS providers. -OpenDaylight will provide backend for existing functionalities in neutron-QoS. -A notification driver is needed for integration of existing api in Openstack -neutron for QoS with OpenDaylight backend. - -Proposed Change -=============== -This change will introduce a new notification driver in networking-odl that -will take CRUD requests data for QoS policies from OpenStack neutron and notify -the OpenDaylight controller about the respective operation. - -Detailed Design -=============== -To enable the formal end to end integration between OpenStack QoS and -OpenDaylight requires an networking-odl QoS notification driver. QoS driver -will act as a shim layer between OpenStack and OpenDaylight that will carry -out following task: - -#. After getting QoS policy request data from neutron, It will log a operation - request in opendaylightjournal table. - -#. The operation will be picked from opendaylightjournal table and a rest call - for notifying OpenDaylight server will be prepared and sent. - -#. This request will processed by neutron northbound in OpenDaylight. -The OpenDaylight neutron northbound project. These models will be based -on the existing neutron qos plugin APIs. - -QoS providers in OpenDaylight can listen to these OpenDaylight Neutron -Northbound QoS models and translate it to their specific yang models for QoS. -The following diagram shows the high level integration between OpenStack and -the OpenDaylight QoS provider:: - - +---------------------------------------------+ - | OpenStack Network Server (neutron qos) | - | | - | +---------------------+ | - | | networking-odl | | - | | | | - | | +---------------| | - | | | Notification | | - | | | driver QoS | | - +----------------------|----------------------+ - | - | Rest Communication - | - OpenDaylight Controller | - +-----------------------|------------+ - | +----------V----+ | - | ODL | QoS Yang Model| | - | Northbound | | | - | (neutron) +---------------+ | - | | | - | | | - | ODL +----V----+ | - | Southbound | QoS | | - | (neutron) +---------+ | - +-----------------|------------------+ - | - | - +------------------------------------+ - | Network/OVS | - | | - +------------------------------------+ - -In the above diagram, the OpenDaylight components are shown just to understand -the overall architecture, but it's out of scope of this spec's work items. -This spec will only track progress related to networking-odl notification QoS -driver work. - -Dependencies -============ -It has a dependency on OpenDaylight Neutron Northbound QoS yang models, but -that is out of scope of this spec. - -Impact -====== -None - -Assignee(s) -=========== - -Following developers will be the initial contributor to the driver, but we -will be happy to have more contributor on board. - -* Manjeet Singh Bhatia (manjeet.s.bhatia@intel.com, irc: manjeets) - -References -========== - -* [1] https://docs.openstack.org/neutron/latest/contributor/internals/quality_of_service.html -* [2] https://wiki.opendaylight.org/view/NeutronNorthbound:Main diff --git a/doc/source/contributor/specs/newton/sfc-driver.rst b/doc/source/contributor/specs/newton/sfc-driver.rst deleted file mode 100644 index ded1e82d3..000000000 --- a/doc/source/contributor/specs/newton/sfc-driver.rst +++ /dev/null @@ -1,139 +0,0 @@ -================================================= -Service Function Chaining Driver for OpenDaylight -================================================= - -This spec describes the plan to implement OpenStack networking-sfc[1] driver -for OpenDaylight Controller. - -Problem Statement -=================== -OpenStack SFC project (networking-sfc [1]) exposes generic APIs[2] for Service -Function Chaining (SFC) that can be implemented by any backend networking -service provider to support SFC. These APIs provide a way to integrate -OpenStack SFC with any of the backend SFC providers. OpenDaylight SFC project -provides a very mature implementation of SFC [3], but currently there is no -formal integration mechanism present to consume OpenDaylight as an SFC provider -for networking-sfc. - -Recently Tacker project [4] has been approved as an official project in -OpenStack, that opens many possibilities to realize the NFV use cases (e.g SFC) -using OpenStack as a platform. Providing a formal end to end integration -between OpenStack and OpenDaylight for SFC use case will help NFV users -leverage OpenStack, Tacker and OpenDaylight as a solution. A POC for this -integration work has already been implemented [5][6] by Tim Rozet, but in -this POC work, Tacker directly communicates to OpenDaylight SFC & classifier -providers and not through OpenStack SFC APIs (networking-sfc). - -Proposed Change -=============== -Implementation of this spec will introduce a networking-sfc[1] driver for -OpenDaylight Controller in networking-odl project that will pass through -the networking-sfc API's call to the OpenDaylight Controller. - -Detailed Design -=============== -To enable the formal end to end integration between OpenStack SFC and -OpenDaylight requires an SFC driver for OpenDaylight. ODL SFC driver will -act as a shim layer between OpenStack and OpenDaylight that will carry out -following two main tasks: - -* Translation of OpenStack SFC Classifier API to ODL SFC classifier yang - models**. - -* Translation of OpenStack SFC API's to OpenDaylight Neutron Northbound - SFC models** [8]. - -** This work is not yet done, but the OpenDaylight neutron northbound project -needs to come up with yang models for SFC classification/chain. These models -will be based on the existing networking-sfc APIs. This work is out of scope -of networking-odl work and will be collaborated in the scope of OpenDaylight -Neutron Northbound project. - -SFC providers (E.g Net-Virt, GBP, SFC ) in OpenDaylight can listen to these -OpenDaylight Neutron Northbound SFC models and translate it to their specific -yang models for classification/sfc. The following diagram shows the high level -integration between OpenStack and the OpenDaylight SFC provider:: - - +---------------------------------------------+ - | OpenStack Network Server (networking-sfc) | - | +-------------------+ | - | | networking-odl | | - | | SFC Driver | | - | +-------------------+ | - +----------------------|----------------------+ - | REST Communication - | - ----------------------- - OpenDaylight Controller | | - +-----------------------|-----------------------|---------------+ - | +----------v----+ +---v---+ | - | Neutron | SFC Classifier| |SFC | Neutron | - | Northbound | Models | |Models | Northbound| - | Project +---------------+ +-------+ Project | - | / \ | | - | / \ | | - | / \ | | - | +-----V--+ +---V----+ +---V---+ | - | |Net-Virt| ... | GBP | | SFC | ... | - | +---------+ +--------+ +-------+ | - +-----------|----------------|------------------|---------------+ - | | | - | | | - +-----------V----------------V------------------V---------------+ - | Network/OVS | - | | - +---------------------------------------------------------------+ - -In the above architecture, the opendaylight components are shown just to -understand the overall architecture, but it's out of scope of this spec's -work items. This spec will only track progress related to networking-odl -OpenStack sfc driver work. - -Given that OpenStack SFC APIs are port-pair based API's and OpenDaylight SFC -API's are based on IETF SFC yang models[8], there might be situations where -translation might requires API enhancement from OpenStack SFC. Networking SFC -team is open for these new enhancement requirements given that they are generic -enough to be leveraged by other backend SFC providers[9]. This work will be -leveraging the POC work done by Tim [10] to come up with the first version of -SFC driver. - -Dependencies -============ -It has a dependency on OpenDaylight Neutron Northbound SFC classifier and chain -yang models, but that is out of scope of this spec. - -Impact -====== -None - -Assignee(s) -=========== - -Following developers will be the initial contributor to the driver, but we will -be happy to have more contributor on board. - -* Anil Vishnoi (vishnoianil@gmail.com, irc: vishnoianil) -* Tim Rozet (trozet@redhat.com, irc: trozet) - -References -========== - -[1] https://docs.openstack.org/networking-sfc/latest/ - -[2] https://github.com/openstack/networking-sfc/blob/master/doc/source/api.rst - -[3] https://wiki.opendaylight.org/view/Service_Function_Chaining:Main - -[4] https://wiki.openstack.org/wiki/Tacker - -[5] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc - -[6] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc_classifier - -[7] https://tools.ietf.org/html/draft-ietf-netmod-acl-model-05 - -[8] https://wiki.opendaylight.org/view/NeutronNorthbound:Main - -[9] http://eavesdrop.openstack.org/meetings/service_chaining/2016/service_chaining.2016-03-31-17.00.log.html - -[10] https://github.com/trozet/tacker/blob/SFC_brahmaputra/tacker/sfc/drivers/opendaylight.py diff --git a/doc/source/contributor/specs/ocata/journal-recovery.rst b/doc/source/contributor/specs/ocata/journal-recovery.rst deleted file mode 100644 index a805d5e5e..000000000 --- a/doc/source/contributor/specs/ocata/journal-recovery.rst +++ /dev/null @@ -1,152 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================ -Journal Recovery -================ - -https://blueprints.launchpad.net/networking-odl/+spec/journal-recovery - -Journal entries in the failed state need to be handled somehow. This spec will -try to address the issue and propose a solution. - -Problem Description -=================== - -Currently there is no handling for Journal entries that reach the failed state. -A journal entry can reach the failed state for several reasons, some of which -are: - -* Reached maximum failed attempts for retrying the operation. - -* Inconsistency between ODL and the Neutron DB. - - * For example: An update fails because the resource doesn't exist in ODL. - -* Bugs that can lead to failure to sync up. - -These entries will be left in the journal table forever which is a bit wasteful -since they take up some space on the DB storage and also affect the performance -of the journal table. -Albeit each entry has a negligble effect on it's own, the impact of a large -number of such entries can become quite significant. - -Proposed Change -=============== - -A "journal recovery" routine will run as part of the current journal -maintenance process. -This routine will scan the journal table for rows in the "failed" state and -will try to sync the resource for that entry. - -The procedure can be best described by the following flow chart: - -asciiflow:: - - +-----------------+ - | For each entry | - | in failed state | - +-------+---------+ - | - +-------v--------+ - | Query resource | - | on ODL (REST) | - +-----+-----+----+ - | | +-----------+ - Resource | | Determine | - exists +--Resource doesn't exist--> operation | - | | type | - +-----v-----+ +-----+-----+ - | Determine | | - | operation | | - | type | | - +-----+-----+ | - | +------------+ | - +--Create------> Mark entry <--Delete--+ - | | completed | | - | +----------^-+ Create/ - | | Update - | | | - | +------------+ | +-----v-----+ - +--Delete--> Mark entry | | | Determine | - | | pending | | | parent | - | +---------^--+ | | relation | - | | | +-----+-----+ - +-----v------+ | | | - | Compare to +--Different--+ | | - | resource | | | - | in DB +--Same------------+ | - +------------+ | - | - +-------------------+ | - | Create entry for <-----Has no parent------+ - | resource creation | | - +--------^----------+ Has a parent - | | - | +---------v-----+ - +------Parent exists------+ Query parent | - | on ODL (REST) | - +---------+-----+ - +------------------+ | - | Create entry for <---Parent doesn't exist--+ - | parent creation | - +------------------+ - -For every error during the process the entry will remain in failed state but -the error shouldn't stop processing of further entries. - - -The implementation could be done in two phases where the parent handling is -done in a second phase. -For the first phase if we detect an entry that is in failed for a create/update -operation and the resource doesn't exist on ODL we create a new "create -resource" journal entry for the resource. - -This proposal utilises the journal mechanism for it's operation while the only -part that deviates from the standard mode of operation is when it queries ODL -directly. This direct query has to be done to get ODL's representation of the -resource. - -Performance Impact ------------------- - -The maintenance thread will have another task to handle. This can lead to -longer processing time and even cause the thread to skip an iteration. -This is not an issue since the maintenance thread runs in parallel and doesn't -directly impact the responsiveness of the system. - -Since most operations here involve I/O then CPU probably won't be impacted. - -Network traffic would be impacted slightly since we will attempt to fetch the -resource each time from ODL and we might attempt to fetch it's parent. -This is however negligble as we do this only for failed entries, which are -expected to appear rarely. - - -Alternatives ------------- - -The partial sync process could make this process obsolete (along with full -sync), but it's a far more complicated and problematic process. -It's better to start with this process which is more lightweight and doable -and consider partial sync in the future. - - -Assignee(s) -=========== - -Primary assignee: - mkolesni - -Other contributors: - None - - -References -========== - -https://goo.gl/IOMpzJ - diff --git a/doc/source/contributor/specs/pike/dep-validations-on-create.rst b/doc/source/contributor/specs/pike/dep-validations-on-create.rst deleted file mode 100644 index 664ef6dcb..000000000 --- a/doc/source/contributor/specs/pike/dep-validations-on-create.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -Dependency Validations on Create -================================ - -https://blueprints.launchpad.net/networking-odl/+spec/dep-validations-on-create - -Right now V2 driver entry dependency validations happen when a journal entry is -picked for processing. This spec proposes that this be moved to entry creation -time, in order to have a clear understanding of the entry dependencies and -conserve journal resources. - - -Problem Description -=================== - -Dependency validations are necessary in the V2 driver because each operation -gets recorded in a journal entry and sent to ODL asynchronously. Thus, a -consecutive operation might be sent to ODL before the first one finishes, while -relying on the first operation. -For example, when a subnet gets created it references a network, but if the -network was created right before the subnet was then the subnet create -shouldn't be sent over until the network create was sent. - -Currently these checks are performed each time an entry is selected for -processing - if the entry passes the dependency checks then it gets processed -and if the dependency check fails (i.e. finds a previous unhandled entry that -needs to execute before this one) then the entry gets sent back to the queue. - -Generally this is not optimal for several reasons: - * No clear indication of relations between the entries. - - * The logic is hidden in the code and there's no good way to know why an - entry fails a dependency check. - * Difficult to debug in case of problems. - * Difficult to spot phenomenon such as a cyclic dependency. - - * Wasted CPU effort. - - * An entry can be checked multiple times for dependencies. - * Lots of redundant DB queries to determine dependencies each time. - - -Proposed Change -=============== - -The proposed solution is to move the dependency calculation to entry creation -time. - -When a journal entry is created the dependency management system will calculate -the dependencies on other entries (Similarly to how it does now) and if there -are journal entries the new entry should depend on, their IDs will be inserted -into a link table. - -Thus, when the journal looks for an entry to pick up it will only look for -entries that no other entry depends on by making sure there aren't any entries -in the dependency table. - -When a journal entry is done processing (either successfully or reaches failed -state), the dependency links will be removed from the dependency table so that -dependent rows can be processed. - -The proposed table:: - - +------------------------+ - | odl_journal_dependency | - +------------------------+ - | parent_id | - | dependent_id | - +------------------------+ - -The table columns will be foreign keys to the seqnum column in the journal -table. The constraints will be defined as "ON DELETE CASCADE" so that when a -journal entry is removed any possible rows will be removed as well. -The primary key will be made from both columns of the table as this is a link -table and not an actual entity. -If we face DB performance issues (highly unlikely, since this table should -normally have a very small amount of rows if any at all) then an index can be -constructed on the dependent_id column. - -The dependency management mechanism will locate parent entries for the given -entry and will populate the table so that the parent entry's seqnum will be -set as the parent_id, and the dependent entry id will be set as dependent_id. -When the journal picks up an entry for processing it will condition it on not -having any rows with the parent_id in the dependency table. This will ensure -that dependent rows get handled after the parent rows have finished processing. - - -Performance Considerations -========================== - -Generally the performance shouldn't be impacted as we're moving the part of -code that does dependency calculations from the entry selection time to entry -creation time. This will assure that dependency calculations happen only once -per journal entry. - -However, some simple benchmarks should be performed before & after the change: - * Average Tempest run time. - * Average CPU consumption on Tempest. - * Full sync run time (Start to finish of all entries). - -If performance suffers a severe degradation then we should consider -alternative solutions. - - -Questions -========= - -Q: Should entries in "failed" state block other entries? - -A: Currently "failed" rows are not considered as blocking for dependency - validations, but we might want to change this as it makes little sense to - process a dependent entry that failed processing. - -Q: How will this help debug-ability? - -A: It will be easy to query the table contents at any time to figure out which - entries depend on which other entries. - -Q: How will we be able to spot cyclic dependencies? - -A: Currently this isn't planned as part of the spec, but a DB query (or a - series of them) can help determine if this problem exists. - diff --git a/doc/source/contributor/specs/pike/neutron-port-dhcp.rst b/doc/source/contributor/specs/pike/neutron-port-dhcp.rst deleted file mode 100644 index 37676ec14..000000000 --- a/doc/source/contributor/specs/pike/neutron-port-dhcp.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================================================== -Neutron Port Allocation per Subnet for OpenDaylight DHCP Proxy Service -====================================================================== - -This spec describes the proposal to allocate a Neutron DHCP Port just for -use by OpenDaylight Controller on Subnets that are created or updated with -enable-dhcp to True. - -When in OpenDaylight controller, the "controller-dhcp-enabled" configuration -flag is set to true, these Neutron DHCP Ports will be used by the OpenDaylight -Controller to provide DHCP Service instead of using the subnet-gateway-ip as -the DHCP Server IP as it stands today. - -The networking-odl driver is not aware about the above OpenDaylight controller -parameter configuration. When controller-dhcp-enabled configuration flag is set -to false the DHCP port will be created and destroyed without causing any harm -to either OpenDaylight controller or networking-odl driver. - -Problem Statement -================= - -The DHCP service within OpenDaylight currently assumes availability of the -subnet gateway IP address. The subnet gateway ip is not a mandatory parameter -for an OpenStack subnet, and so it might not be available from OpenStack -orchestration. This renders the DHCP service in OpenDaylight to not be -able to serve DHCP offers to virtual endpoints requesting for IP addresses, -thereby resulting in service unavailability. Even if subnet-gateway-ip is -available in the subnet, it is not a good design in OpenDaylight to hijack -that ip address and use that as the DHCP Server IP Address. - -Problem - 1: L2 Deployment with 3PP gateway -------------------------------------------- - -There can be deployment scenario in which L2 network is created with no -distributed Router/VPN functionality. This deployment can have a separate -gateway for the network such as a 3PP LB VM, which acts as a TCP termination -point and this LB VM is configured with a default gateway IP. It means all -inter-subnet traffic is terminated on this VM which takes the responsibility -of forwarding the traffic. - -But the current DHCP service in OpenDaylight controller hijacks gateway IP -address for serving DHCP discover/request messages. If the LB is up, this can -continue to work, DHCP broadcasts will get hijacked by the OpenDaylight, and -responses sent as PKT_OUTs with SIP = GW IP. - -However, if the LB is down, and the VM ARPs for the same IP as part of a DHCP -renew workflow, the ARP resolution can fail, due to which renew request will -not be generated. This can cause the DHCP lease to lapse. - -Problem - 2: Designated DHCP for SR-IOV VMs via HWVTEP ------------------------------------------------------- - -In this Deployment scenario, L2 network is created with no distributed Router/ -VPN functionality, and HWVTEP for SR-IOV VMs. DHCP flood requests from SR-IOV -VMs(DHCP discover, request during bootup), are flooded by the HWVTEP on the -L2 Broadcast domain, and punted to the controller by designated vswitch. DHCP -offers are sent as unicast responses from Controller, which are forwarded by -the HWVTEP to the VM. DHCP renews can be unicast requests, which the HWVTEP -may forward to an external Gateway VM (3PPLB VM) as unicast packets. Designated -vswitch will never receive these pkts, and thus not be able to punt them to the -controller, so renews will fail. - -Proposed Change -=============== -In general as part of implementation of this spec, we are introducing a new -configuration parameter 'create_opendaylight_dhcp_port' whose truth value -determines whether the dhcp-proxy-service within the openstack-odl framework -need to be made functional. This service will be responsible for managing the -create/update/delete lifecycle for a new set of Neutron DHCP Ports which will -be provisioned specifically for use by the OpenDaylight Controller's existing -DHCP Service Module. - -Detailed Design -=============== -Introduce a driver config parameter(create_opendaylight_dhcp_port) to determine -if OpenDaylight based DHCP service is being used. Default setting for the -parameter is false. - -When 'create_opendaylight_dhcp_port' is set to True, it triggers the networking --odl ml2 driver to hook on to OpenStack subnet resource lifecycle and use that -to manage a special DHCP port per subnet for OpenDaylight Controller use. These -special DHCP ports will be shipped to OpenDaylight controller, so that DHCP -Service within the OpenDaylight controller can make use of these as DHCP -Server ports themselves. The port will be used to service DHCP requests for -virtual end points belonging to that subnet. - -These special DHCP Ports (one per subnet), will carry unique device-id and -device-owner values. - -* device-owner(network:dhcp) -* device-id(OpenDaylight-) - -OpenDaylight DHCP service will also introduce a new config parameter controller --dhcp-mode to indicate if the above DHCP port should be used for servicing DHCP -requests. When the parameter is set to use-odl-dhcp-neutron-port, it is -recommended to enable the create_opendaylight_dhcp_port flag for the networking --odl driver. - -Alternative 1 --------------- -The creation of Neutron OpenDaylight DHCP port will be invoked within the -OpenDaylight mechanism Driver subnet-postcommit execution. - -Any failures during the neutron dhcp port creation or allocation for the subnet -should trigger failure of the subnet create operation with an appropriate -failure message in logs. On success the subnet and port information will be -persisted to Journal DB and will subsequently synced with the OpenDaylight -controller. - -The plugin should initiate the removal of allocated dhcp neutron port at the -time of subnet delete. The port removal will be handled in a subnet-delete- -post-commit execution and any failure during this process should rollback the -subnet delete operation. The subnet delete operation will be allowed only when -all other VMs launched on this subnet are already removed as per existing -Neutron behavior. - -A subnet update operation configuring the DHCP state as enabled should allocate -such a port if not previously allocated for the subnet. Similarly a subnet -update operation configuring DHCP state to disabled should remove any -previously allocated OpenDaylight DHCP neutron ports. - -Since the invocation of create/delete port will be synchronous within subnet -post-commit, a failure to create/delete port will result in an exception being -thrown which makes the ML2 Plugin to fail the subnet operation and not alter -Openstack DB. - -Alternative 2 -------------- -The OpenDaylight Neutron DHCP Port creation/deletion is invoked asyncronously -driven by a journal entry callback for any Subnet resource state changes as -part of create/update/delete. A generic journal callback mechanism to be -implemented. Initial consumer of this callback would be the OpenDaylight -DHCP proxy service but this could be used by other services in future. - -The Neutron DHCP Port (for OpenDaylight use) creation is triggered when the -subnet journal-entry is moved from PENDING to PROCESSING. On a failure of -port-creation, the journal will be retained in PENDING state and the subnet -itself won't be synced to the OpenDaylight controller. The journal-entry state -is marked as COMPLETED only on successful port creation and successful -synchronization of that subnet resource to OpenDaylight controller. The same -behavior is applicable for subnet update and delete operations too. - -The subnet create/update operation that allocates an OpenDaylight DHCP port -to always check if a port exists and allocate new port only if none exists -for the subnet. - -Since the invocation of create/delete port will be within the journal callback -and asynchronous to subnet-postcommit, the failure to create/delete port -will result in the created (or updated) subnet to remain in PENDING state. Next -journal sync of this pending subnet will again retry creation/deletion of port -and this cycle will happen until either create/delete port succeeds or the -subnet is itself deleted by the orchestrating tenant. This could result in -piling up of journal PENDING entries for these subnets when there is an -unexpected failure in create/delete DHCP port operation. It is recommended to -not keep retrying the port operation and instead failures would be indicated -in OpenDaylight as DHCP offers/renews will not be honored by the dhcp service -within the OpenDaylight controller, for that subnet. - -Recommended Alternative ------------------------ - -All of the following cases will need to be addressed by the design. - -* Neutron server can crash after submitting information to DB but before - invoking post-commit during a subnet create/update/delete operation. The - dhcp-proxy-service should handle the DHCP port creation/deletion during - such failures when the service is enabled. -* A subnet update operation to disable-dhcp can be immediately followed by - a subnet update operation to enable-dhcp, and such a situation should end up - in creating the neutron-dhcp-port for consumption by OpenDaylight. -* A subnet update operation to enable-dhcp can be immediately followed by a - subnet update operation to disable-dhcp, and such a situation should end up - in deleting the neutron-dhcp-port that was created for use by OpenDaylight. -* A subnet update operation to enable-dhcp can be immediately followed by a - subnet delete operation,and such a situation should end up deleting the - neutron-dhcp-port that was about to be provided for use by OpenDaylight. -* A subnet create operation (with dhcp enabled) can be immediately followed - by a subnet update operation to disable-dhcp, and such a situation should - end up in deleting the neutron-dhcp-port that was created for use by - OpenDaylight. - -Design as per Alternative 2 meets the above cases better and is what we propose -to take as the approach that we will pursue for this spec. - -Dependencies -============ -Feature is dependent on enhancement in OpenDaylight DHCP Service as per the -Spec in [1] - -Impact -====== -None - -Assignee(s) -=========== - -* Achuth Maniyedath (achuth.m@altencalsoftlabs.com) -* Karthik Prasad(karthik.p@altencalsoftlabs.com) - -References -========== - -* [1] OpenDaylight spec to cover this feature - https://git.opendaylight.org/gerrit/#/c/52298/ diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index 4735fa614..000000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../TESTING.rst diff --git a/doc/source/contributor/usage.rst b/doc/source/contributor/usage.rst deleted file mode 100644 index 003ed666d..000000000 --- a/doc/source/contributor/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -======== -Usage -======== - -To use networking-odl in a project:: - - import networking_odl diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 70f63c997..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. cover title comes from README.rst - -.. include:: ../../README.rst - -Installation ------------- -.. toctree:: - :maxdepth: 2 - - install/index - -Configuration options ---------------------- -.. toctree:: - :maxdepth: 2 - - configuration/index - -Administration Guide --------------------- -.. toctree:: - :maxdepth: 2 - - admin/index - -Contributor Guide ------------------ -.. toctree:: - :maxdepth: 2 - - contributor/index - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`search` - diff --git a/doc/source/install/devstack.rst b/doc/source/install/devstack.rst deleted file mode 100644 index da85f63d0..000000000 --- a/doc/source/install/devstack.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../devstack/README.rst diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index dd9097ad6..000000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Installation Guide -================== - -.. toctree:: - :maxdepth: 2 - - installation - DevStack plugin diff --git a/doc/source/install/installation.rst b/doc/source/install/installation.rst deleted file mode 100644 index 914e33348..000000000 --- a/doc/source/install/installation.rst +++ /dev/null @@ -1,220 +0,0 @@ -.. _installation: - -Installation -============ - -The ``networking-odl`` repository includes integration with DevStack that -enables creation of a simple OpenDaylight (ODL) development and test -environment. This document discusses what is required for manual installation -and integration into a production OpenStack deployment tool of conventional -architectures that include the following types of nodes: - -* Controller - Runs OpenStack control plane services such as REST APIs - and databases. - -* Network - Provides connectivity between provider (public) and project - (private) networks. Services provided include layer-3 (routing), DHCP, and - metadata agents. Layer-3 agent is optional. When using netvirt (vpnservice) - DHCP/metadata are optional. - -* Compute - Runs the hypervisor and layer-2 agent for the Networking - service. - -ODL Installation ----------------- - -http://docs.opendaylight.org provides manual and general documentation for ODL - -Review the following documentation regardless of install scenario: - -* `ODL installation `_. - -* `OpenDaylight with OpenStack `_. - -Choose and review one of the following installation scenarios: - -* `GBP with OpenStack `_. - OpenDaylight Group Based Policy allows users to express network configuration - in a declarative rather than imperative way. Often described as asking for - "what you want", rather than "how you can do it", Group Based Policy achieves - this by implementing an Intent System. The Intent System is a process around - an intent driven data model and contains no domain specifics but is capable - of addressing multiple semantic definitions of intent. - -* `OVSDB with OpenStack `_. - OpenDaylight OVSDB allows users to take advantage of Network Virtualization - using OpenDaylight SDN capabilities whilst utilizing OpenvSwitch. The stack - includes a Neutron Northbound, a Network Virtualization layer, an OVSDB - southbound plugin, and an OpenFlow southbound plugin. - -* `VTN with OpenStack `_. - OpenDaylight Virtual Tenant Network (VTN) is an application that provides - multi-tenant virtual network on an SDN controller. VTN Manager is - implemented as one plugin to the OpenDaylight controller and provides a REST - interface to create/update/delete VTN components. It provides an - implementation of Openstack L2 Network Functions API. - -Networking-odl Installation ---------------------------- - -.. code-block:: console - - # sudo pip install networking-odl - -.. note:: - - pip need to be installed before running above command. - - -Networking-odl Configuration ----------------------------- - -All related neutron services need to be restarted after configuration change. - -#. Configure Openstack neutron server. The neutron server implements ODL as an - ML2 driver. Edit the ``/etc/neutron/neutron.conf`` file: - - * Enable the ML2 core plug-in. - - .. code-block:: ini - - [DEFAULT] - ... - core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin - - * (Optional) Enable ODL L3 router, if QoS feature is desired, - then qos should be appended to service_plugins - - .. code-block:: ini - - [DEFAULT] - ... - service_plugins = odl-router - - -#. Configure the ML2 plug-in. Edit the - ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file: - - * Configure the ODL mechanism driver, network type drivers, self-service - (tenant) network types, and enable extension drivers(optional). - - .. code-block:: ini - - [ml2] - ... - mechanism_drivers = opendaylight_v2 - type_drivers = local,flat,vlan,vxlan - tenant_network_types = vxlan - extension_drivers = port_security, qos - - .. note:: - - The enabling of extension_driver qos is optional, it should be - enabled if service_plugins for qos is also enabled. - - * Configure the vxlan range. - - .. code-block:: ini - - [ml2_type_vxlan] - ... - vni_ranges = 1:1000 - - * Optionally, enable support for VLAN provider and self-service - networks on one or more physical networks. If you specify only - the physical network, only administrative (privileged) users can - manage VLAN networks. Additionally specifying a VLAN ID range for - a physical network enables regular (non-privileged) users to - manage VLAN networks. The Networking service allocates the VLAN ID - for each self-service network using the VLAN ID range for the - physical network. - - .. code-block:: ini - - [ml2_type_vlan] - ... - network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID - - Replace ``PHYSICAL_NETWORK`` with the physical network name and - optionally define the minimum and maximum VLAN IDs. Use a comma - to separate each physical network. - - For example, to enable support for administrative VLAN networks - on the ``physnet1`` network and self-service VLAN networks on - the ``physnet2`` network using VLAN IDs 1001 to 2000: - - .. code-block:: ini - - network_vlan_ranges = physnet1,physnet2:1001:2000 - - * Enable security groups. - - .. code-block:: ini - - [securitygroup] - ... - enable_security_group = true - - * Configure ML2 ODL - - .. code-block:: ini - - [ml2_odl] - - ... - username = - password = - url = http://:/controller/nb/v2/neutron - port_binding_controller = pseudo-agentdb-binding - - -Compute/network nodes ---------------------- - -Each compute/network node runs the OVS services. If compute/network nodes are -already configured to run with Neutron ML2 OVS driver, more steps are -necessary. `OVSDB with OpenStack `_ can be referred to. - -#. Install the ``openvswitch`` packages. - -#. Start the OVS service. - - Using the *systemd* unit: - - .. code-block:: console - - # systemctl start openvswitch - - Using the ``ovs-ctl`` script: - - .. code-block:: console - - # /usr/share/openvswitch/scripts/ovs-ctl start - -#. Configure OVS to use ODL as a manager. - - .. code-block:: console - - # ovs-vsctl set-manager tcp:${ODL_IP_ADDRESS}:6640 - - Replace ``ODL_IP_ADDRESS`` with the IP address of ODL controller node - -#. Set host OVS configurations if port_binding_controller is pseudo-agent - - .. code-block:: console - - # sudo neutron-odl-ovs-hostconfig - -#. Verify the OVS service. - - .. code-block:: console - - # ovs-vsctl show - -.. note:: - - After setting config files, you have to restart the neutron server - if you are using screen then it can be directly started from q-svc - window or you can use service neutron-server restart, latter may or - may not work depending on OS you are using. diff --git a/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/etc/neutron/plugins/ml2/ml2_conf_odl.ini deleted file mode 100644 index 8218073c4..000000000 --- a/etc/neutron/plugins/ml2/ml2_conf_odl.ini +++ /dev/null @@ -1,61 +0,0 @@ -# Configuration for the OpenDaylight MechanismDriver - -[ml2_odl] -# (StrOpt) OpenDaylight REST URL -# If this is not set then no HTTP requests will be made. -# -# url = -# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron - -# (StrOpt) Username for HTTP basic authentication to ODL. -# -# username = -# Example: username = admin - -# (StrOpt) Password for HTTP basic authentication to ODL. -# -# password = -# Example: password = admin - -# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. -# This is an optional parameter, default value is 10 seconds. -# -# timeout = 10 -# Example: timeout = 15 - -# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. -# This is an optional parameter, default value is 30 minutes. -# -# session_timeout = 30 -# Example: session_timeout = 60 - -# (IntOpt) Timeout in seconds for the V2 driver thread to fire off -# another thread run through the journal database. -# -# sync_timeout = 10 -# Example: sync_timeout = 10 - -# (IntOpt) Number of times to retry a journal transaction before -# marking it 'failed'. -# -# retry_count = 5 -# Example: retry_count = 5 - -# (IntOpt) (V2 driver) Journal maintenance operations interval in seconds. -# -# maintenance_interval = 300 -# Example: maintenance_interval = 30 - -# (IntOpt) (V2 driver) Time to keep completed rows in seconds. -# Completed rows retention will be checked every maintenance_interval by the -# cleanup thread. -# To disable completed rows deletion value should be -1 -# -# completed_rows_retention = 600 -# Example: completed_rows_retention = 30 - -# (IntOpt) (V2 driver) Timeout in seconds to wait before marking a processing -# row back to pending state. -# -# processing_timeout = 100 -# Example: maintenance_interval = 200 diff --git a/etc/policy.json b/etc/policy.json deleted file mode 100644 index 4c7f00368..000000000 --- a/etc/policy.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", - "context_is_advsvc": "role:advsvc", - "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", - "admin_only": "rule:context_is_admin", - "regular_user": "", - "shared": "field:networks:shared=True", - "shared_firewalls": "field:firewalls:shared=True", - "external": "field:networks:router:external=True", - "default": "rule:admin_or_owner", - - "create_subnet": "rule:admin_or_network_owner", - "get_subnet": "rule:admin_or_owner or rule:shared", - "update_subnet": "rule:admin_or_network_owner", - "delete_subnet": "rule:admin_or_network_owner", - - "create_network": "", - "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", - "get_network:router:external": "rule:regular_user", - "get_network:segments": "rule:admin_only", - "get_network:provider:network_type": "rule:admin_only", - "get_network:provider:physical_network": "rule:admin_only", - "get_network:provider:segmentation_id": "rule:admin_only", - "get_network:queue_id": "rule:admin_only", - "create_network:shared": "rule:admin_only", - "create_network:router:external": "rule:admin_only", - "create_network:segments": "rule:admin_only", - "create_network:provider:network_type": "rule:admin_only", - "create_network:provider:physical_network": "rule:admin_only", - "create_network:provider:segmentation_id": "rule:admin_only", - "update_network": "rule:admin_or_owner", - "update_network:segments": "rule:admin_only", - "update_network:shared": "rule:admin_only", - "update_network:provider:network_type": "rule:admin_only", - "update_network:provider:physical_network": "rule:admin_only", - "update_network:provider:segmentation_id": "rule:admin_only", - "update_network:router:external": "rule:admin_only", - "delete_network": "rule:admin_or_owner", - - "create_port": "", - "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", - "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", - "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", - "create_port:binding:host_id": "rule:admin_only", - "create_port:binding:profile": "rule:admin_only", - "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", - "get_port": "rule:admin_or_owner or rule:context_is_advsvc", - "get_port:queue_id": "rule:admin_only", - "get_port:binding:vif_type": "rule:admin_only", - "get_port:binding:vif_details": "rule:admin_only", - "get_port:binding:host_id": "rule:admin_only", - "get_port:binding:profile": "rule:admin_only", - "update_port": "rule:admin_or_owner or rule:context_is_advsvc", - "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", - "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", - "update_port:binding:host_id": "rule:admin_only", - "update_port:binding:profile": "rule:admin_only", - "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", - "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", - - "get_router:ha": "rule:admin_only", - "create_router": "rule:regular_user", - "create_router:external_gateway_info:enable_snat": "rule:admin_only", - "create_router:distributed": "rule:admin_only", - "create_router:ha": "rule:admin_only", - "get_router": "rule:admin_or_owner", - "get_router:distributed": "rule:admin_only", - "update_router:external_gateway_info:enable_snat": "rule:admin_only", - "update_router:distributed": "rule:admin_only", - "update_router:ha": "rule:admin_only", - "delete_router": "rule:admin_or_owner", - - "add_router_interface": "rule:admin_or_owner", - "remove_router_interface": "rule:admin_or_owner", - - "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", - "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", - - "create_firewall": "", - "get_firewall": "rule:admin_or_owner", - "create_firewall:shared": "rule:admin_only", - "get_firewall:shared": "rule:admin_only", - "update_firewall": "rule:admin_or_owner", - "update_firewall:shared": "rule:admin_only", - "delete_firewall": "rule:admin_or_owner", - - "create_firewall_policy": "", - "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", - "create_firewall_policy:shared": "rule:admin_or_owner", - "update_firewall_policy": "rule:admin_or_owner", - "delete_firewall_policy": "rule:admin_or_owner", - - "create_firewall_rule": "", - "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", - "update_firewall_rule": "rule:admin_or_owner", - "delete_firewall_rule": "rule:admin_or_owner", - - "create_qos_queue": "rule:admin_only", - "get_qos_queue": "rule:admin_only", - - "update_agent": "rule:admin_only", - "delete_agent": "rule:admin_only", - "get_agent": "rule:admin_only", - - "create_dhcp-network": "rule:admin_only", - "delete_dhcp-network": "rule:admin_only", - "get_dhcp-networks": "rule:admin_only", - "create_l3-router": "rule:admin_only", - "delete_l3-router": "rule:admin_only", - "get_l3-routers": "rule:admin_only", - "get_dhcp-agents": "rule:admin_only", - "get_l3-agents": "rule:admin_only", - "get_loadbalancer-agent": "rule:admin_only", - "get_loadbalancer-pools": "rule:admin_only", - - "create_floatingip": "rule:regular_user", - "create_floatingip:floating_ip_address": "rule:admin_only", - "update_floatingip": "rule:admin_or_owner", - "delete_floatingip": "rule:admin_or_owner", - "get_floatingip": "rule:admin_or_owner", - - "create_network_profile": "rule:admin_only", - "update_network_profile": "rule:admin_only", - "delete_network_profile": "rule:admin_only", - "get_network_profiles": "", - "get_network_profile": "", - "update_policy_profiles": "rule:admin_only", - "get_policy_profiles": "", - "get_policy_profile": "", - - "create_metering_label": "rule:admin_only", - "delete_metering_label": "rule:admin_only", - "get_metering_label": "rule:admin_only", - - "create_metering_label_rule": "rule:admin_only", - "delete_metering_label_rule": "rule:admin_only", - "get_metering_label_rule": "rule:admin_only", - - "get_service_provider": "rule:regular_user", - "get_lsn": "rule:admin_only", - "create_lsn": "rule:admin_only" -} diff --git a/networking_odl/__init__.py b/networking_odl/__init__.py deleted file mode 100644 index ed4f82c59..000000000 --- a/networking_odl/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import gettext - -import six - - -if six.PY2: - gettext.install('networking_odl', unicode=1) -else: - gettext.install('networking_odl') diff --git a/networking_odl/_i18n.py b/networking_odl/_i18n.py deleted file mode 100644 index 08c4d7be6..000000000 --- a/networking_odl/_i18n.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n - -DOMAIN = "networking_odl" - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The translation function using the well-known name "_" -_ = _translators.primary - -# The contextual translation function using the name "_C" -# requires oslo.i18n >=2.1.0 -_C = _translators.contextual_form - -# The plural translation function using the name "_P" -# requires oslo.i18n >=2.1.0 -_P = _translators.plural_form - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/networking_odl/bgpvpn/__init__.py b/networking_odl/bgpvpn/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/bgpvpn/odl_v2.py b/networking_odl/bgpvpn/odl_v2.py deleted file mode 100644 index 3909d895d..000000000 --- a/networking_odl/bgpvpn/odl_v2.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_bgpvpn.neutron.extensions import bgpvpn as bgpvpn_ext -from networking_bgpvpn.neutron.services.service_drivers import driver_api -from neutron_lib.api.definitions import bgpvpn as bgpvpn_const - -from networking_odl.common import constants as odl_const -from networking_odl.common import postcommit -from networking_odl.journal import full_sync -from networking_odl.journal import journal - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - -LOG = logging.getLogger(__name__) - -BGPVPN_RESOURCES = { - odl_const.ODL_BGPVPN: odl_const.ODL_BGPVPNS, - odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION: - odl_const.ODL_BGPVPN_NETWORK_ASSOCIATIONS, - - odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION: - odl_const.ODL_BGPVPN_ROUTER_ASSOCIATIONS -} - - -@postcommit.add_postcommit('bgpvpn', 'net_assoc', 'router_assoc') -class OpenDaylightBgpvpnDriver(driver_api.BGPVPNDriver): - - """OpenDaylight BGPVPN Driver - - This code is the backend implementation for the OpenDaylight BGPVPN - driver for Openstack Neutron. - """ - - @log_helpers.log_method_call - def __init__(self, service_plugin): - LOG.info("Initializing OpenDaylight BGPVPN v2 driver") - super(OpenDaylightBgpvpnDriver, self).__init__(service_plugin) - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(bgpvpn_const.LABEL, BGPVPN_RESOURCES) - - @log_helpers.log_method_call - def create_bgpvpn_precommit(self, context, bgpvpn): - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_CREATE, bgpvpn) - - @log_helpers.log_method_call - def update_bgpvpn_precommit(self, context, bgpvpn): - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) - - @log_helpers.log_method_call - def delete_bgpvpn_precommit(self, context, bgpvpn): - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_DELETE, []) - - @log_helpers.log_method_call - def create_net_assoc_precommit(self, context, net_assoc): - our_bgpvpn = None - bgpvpns = self.get_bgpvpns(context) - for bgpvpn in bgpvpns: - # ODL only allows a network to be associated with one BGPVPN - if bgpvpn['id'] == net_assoc['bgpvpn_id']: - our_bgpvpn = bgpvpn - else: - if bgpvpn['networks'] and (net_assoc['network_id'] in - bgpvpn['networks']): - raise bgpvpn_ext.BGPVPNNetworkAssocExistsAnotherBgpvpn( - driver="OpenDaylight V2", - network=net_assoc['network_id'], - bgpvpn=bgpvpn['id']) - journal.record(context, odl_const.ODL_BGPVPN, - our_bgpvpn['id'], odl_const.ODL_UPDATE, our_bgpvpn) - - @log_helpers.log_method_call - def delete_net_assoc_precommit(self, context, net_assoc): - bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id']) - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) - - @log_helpers.log_method_call - def create_router_assoc_precommit(self, context, router_assoc): - associated_routers = self.get_router_assocs(context, - router_assoc['bgpvpn_id']) - for assoc_router in associated_routers: - if(router_assoc["router_id"] != assoc_router["router_id"]): - raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported( - driver="OpenDaylight V2") - bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) - - @log_helpers.log_method_call - def delete_router_assoc_precommit(self, context, router_assoc): - bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) - journal.record(context, odl_const.ODL_BGPVPN, - bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) diff --git a/networking_odl/ceilometer/__init__.py b/networking_odl/ceilometer/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/ceilometer/network/__init__.py b/networking_odl/ceilometer/network/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/ceilometer/network/statistics/__init__.py b/networking_odl/ceilometer/network/statistics/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py b/networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py b/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py deleted file mode 100644 index 1d0d635dd..000000000 --- a/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py +++ /dev/null @@ -1,137 +0,0 @@ -# -# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_log import log -import requests -from requests import auth -import six - -from ceilometer.i18n import _ - - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(object): - """Base class of OpenDaylight REST APIs Clients.""" - - @abc.abstractproperty - def base_url(self): - """Returns base url for each REST API.""" - - def __init__(self, client): - self.client = client - - def get_statistics(self): - return self.client.request(self.base_url) - - -class OpenDaylightRESTAPIFailed(Exception): - pass - - -class SwitchStatisticsAPIClient(_Base): - """OpenDaylight Switch Statistics REST API Client - - Base URL: - {endpoint}/flow-capable-switches - """ - - base_url = '/flow-capable-switches' - - -class Client(object): - - def __init__(self, conf, endpoint, params): - self.switch_statistics = SwitchStatisticsAPIClient(self) - self._endpoint = endpoint - self.conf = conf - - self._req_params = self._get_req_params(params) - self.session = requests.Session() - - def _get_req_params(self, params): - req_params = { - 'headers': { - 'Accept': 'application/json' - }, - 'timeout': self.conf.http_timeout, - } - - auth_way = params.get('auth') - if auth_way in ['basic', 'digest']: - user = params.get('user') - password = params.get('password') - - if auth_way == 'basic': - auth_class = auth.HTTPBasicAuth - else: - auth_class = auth.HTTPDigestAuth - - req_params['auth'] = auth_class(user, password) - return req_params - - def _log_req(self, url): - - curl_command = ['REQ: curl -i -X GET', '"%s"' % (url)] - - if 'auth' in self._req_params: - auth_class = self._req_params['auth'] - if isinstance(auth_class, auth.HTTPBasicAuth): - curl_command.append('--basic') - else: - curl_command.append('--digest') - - curl_command.append('--user "%s":"***"' % auth_class.username) - - for name, value in six.iteritems(self._req_params['headers']): - curl_command.append('-H "%s: %s"' % (name, value)) - - LOG.debug(' '.join(curl_command)) - - @staticmethod - def _log_res(resp): - - dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, - resp.status_code, - resp.reason)] - dump.extend('%s: %s\n' % (k, v) - for k, v in six.iteritems(resp.headers)) - dump.append('\n') - if resp.content: - dump.extend([resp.content, '\n']) - - LOG.debug(''.join(dump)) - - def _http_request(self, url): - if self.conf.debug: - self._log_req(url) - resp = self.session.get(url, **self._req_params) - if self.conf.debug: - self._log_res(resp) - if resp.status_code // 100 != 2: - raise OpenDaylightRESTAPIFailed( - _('OpenDaylight API returned %(status)s %(reason)s') % - {'status': resp.status_code, 'reason': resp.reason}) - - return resp.json() - - def request(self, path): - - url = self._endpoint + path - return self._http_request(url) diff --git a/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py b/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py deleted file mode 100644 index 828e02c80..000000000 --- a/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py +++ /dev/null @@ -1,296 +0,0 @@ -# -# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics import driver -from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client - - -LOG = log.getLogger(__name__) -INT64_MAX_VALUE = (2 ** 64 / 2 - 1) - - -class OpenDaylightDriver(driver.Driver): - """Driver of network info collector from OpenDaylight. - - This driver uses resources in "/etc/ceilometer/polling.yaml". - Resource requires below conditions: - - * resource is url - * scheme is "opendaylight.v2" - - This driver can be configured via query parameters. - Supported parameters: - - * scheme: - The scheme of request url to OpenDaylight REST API endpoint. - (default http) - * auth: - Auth strategy of http. - This parameter can be set basic or digest.(default None) - * user: - This is username that is used by auth.(default None) - * password: - This is password that is used by auth.(default None) - - e.g.:: - - opendaylight.v2://127.0.0.1:8080/controller/statistics - ?auth=basic&user=admin&password=admin&scheme=http - - In this case, the driver send request to below URLs: - - http://127.0.0.1:8080/controller/statistics/flow-capable-switches - - Example JSON response from OpenDaylight - { - flow_capable_switches: [{ - packet_in_messages_received: 501, - packet_out_messages_sent: 300, - ports: 1, - flow_datapath_id: 55120148545607, - tenant_id: ADMIN_ID, - switch_port_counters: [{ - bytes_received: 1000, - bytes_sent: 1000, - duration: 600, - packets_internal_received: 100, - packets_internal_sent: 200, - packets_received: 100, - packets_received_drop: 0, - packets_received_error: 0, - packets_sent: 100, - port_id: 4, - tenant_id: PORT_1_TENANT_ID, - uuid: PORT_1_ID - }], - table_counters: [{ - flow_count: 90, - table_id: 0 - }] - }] - } - - """ - - @staticmethod - def _get_int_sample(key, statistic, resource_id, - resource_meta, tenant_id): - if key not in statistic: - return None - value = int(statistic[key]) - if not (0 <= value <= INT64_MAX_VALUE): - value = 0 - return value, resource_id, resource_meta, tenant_id - - def _prepare_cache(self, endpoint, params, cache): - - if 'network.statistics.opendaylight_v2' in cache: - return cache['network.statistics.opendaylight_v2'] - - data = {} - - odl_params = {} - if 'auth' in params: - odl_params['auth'] = params['auth'][0] - if 'user' in params: - odl_params['user'] = params['user'][0] - if 'password' in params: - odl_params['password'] = params['password'][0] - cs = client.Client(self.conf, endpoint, odl_params) - - try: - # get switch statistics - data['switch'] = cs.switch_statistics.get_statistics() - except Exception: - LOG.exception('Request failed to connect to OpenDaylight' - ' with NorthBound REST API') - - cache['network.statistics.opendaylight_v2'] = data - - return data - - def get_sample_data(self, meter_name, parse_url, params, cache): - - extractor = self._get_extractor(meter_name) - if extractor is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - iter = self._get_iter(meter_name) - if iter is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], - parse_url.netloc, - parse_url.path, - None, - None, - None) - endpoint = urlparse.urlunparse(parts) - - data = self._prepare_cache(endpoint, params, cache) - - samples = [] - if data: - for sample in iter(extractor, data): - if sample is not None: - # set controller name to resource_metadata - sample[2]['controller'] = 'OpenDaylight_V2' - samples.append(sample) - - return samples - - def _get_iter(self, meter_name): - if meter_name == 'switch' or meter_name == 'switch.ports': - return self._iter_switch - elif meter_name.startswith('switch.table'): - return self._iter_table - elif meter_name.startswith('switch.port'): - return self._iter_switch_port - elif meter_name.startswith('port'): - return self._iter_port - - def _get_extractor(self, meter_name): - if (meter_name == 'switch.port' or - meter_name.startswith('switch.port.')): - meter_name = meter_name.split('.', 1)[1] - method_name = '_' + meter_name.replace('.', '_') - return getattr(self, method_name, None) - - @staticmethod - def _iter_switch(extractor, data): - for switch in data['switch']['flow_capable_switches']: - yield (extractor(switch, str(switch['flow_datapath_id']), - {}, switch['tenant_id'])) - - @staticmethod - def _switch(statistic, resource_id, - resource_meta, tenant_id): - return 1, resource_id, resource_meta, tenant_id - - @staticmethod - def _switch_ports(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'ports', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _iter_switch_port(extractor, data): - for switch in data['switch']['flow_capable_switches']: - if 'switch_port_counters' in switch: - switch_id = str(switch['flow_datapath_id']) - tenant_id = switch['tenant_id'] - for port_statistic in switch['switch_port_counters']: - port_id = port_statistic['port_id'] - resource_id = '%s:%d' % (switch_id, port_id) - resource_meta = {'switch': switch_id, - 'port_number_on_switch': port_id} - if 'uuid' in port_statistic: - neutron_port_id = port_statistic['uuid'] - resource_meta['neutron_port_id'] = neutron_port_id - yield extractor(port_statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _iter_port(extractor, data): - resource_meta = {} - for switch in data['switch']['flow_capable_switches']: - if 'switch_port_counters' in switch: - for port_statistic in switch['switch_port_counters']: - if 'uuid' in port_statistic: - resource_id = port_statistic['uuid'] - yield extractor(port_statistic, - resource_id, resource_meta, - port_statistic['tenant_id']) - - @staticmethod - def _port(statistic, resource_id, resource_meta, tenant_id): - return 1, resource_id, resource_meta, tenant_id - - @staticmethod - def _port_uptime(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'duration', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_receive_packets(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'packets_received', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_transmit_packets(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'packets_sent', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_receive_bytes(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'bytes_received', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_transmit_bytes(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'bytes_sent', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_receive_drops(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'packets_received_drop', statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _port_receive_errors(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'packets_received_error', statistic, - resource_id, resource_meta, tenant_id) - - @staticmethod - def _iter_table(extractor, data): - for switch_statistic in data['switch']['flow_capable_switches']: - if 'table_counters' in switch_statistic: - switch_id = str(switch_statistic['flow_datapath_id']) - tenant_id = switch_statistic['tenant_id'] - for table_statistic in switch_statistic['table_counters']: - resource_meta = {'switch': switch_id} - resource_id = ("%s:table:%d" % - (switch_id, table_statistic['table_id'])) - yield extractor(table_statistic, resource_id, - resource_meta, tenant_id) - - @staticmethod - def _switch_table_active_entries(statistic, resource_id, - resource_meta, tenant_id): - return OpenDaylightDriver._get_int_sample( - 'flow_count', statistic, resource_id, - resource_meta, tenant_id) diff --git a/networking_odl/cmd/__init__.py b/networking_odl/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/cmd/set_ovs_hostconfigs.py b/networking_odl/cmd/set_ovs_hostconfigs.py deleted file mode 100755 index b1022aa0b..000000000 --- a/networking_odl/cmd/set_ovs_hostconfigs.py +++ /dev/null @@ -1,473 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Command line script to set host OVS configurations (it requires ovsctl) - -Examples: - NOTE: bash accepts new line characters between quotes - - To give a full custom json - - python set_ovs_hostconfigs.py --ovs_hostconfigs='{ - "ODL L2": { - "allowed_network_types": - ["local","vlan", "vxlan","gre"], - "bridge_mappings": {"physnet1":"br-ex"} - "supported_vnic_types": [ - { - "vnic_type":"normal", - "vif_type":"ovs", - "vif_details":{} - } - ], - }, - "ODL L3": {} - }' - - To make sure to use system data path (Kernel) - - python set_ovs_hostconfigs.py --noovs_dpdk - - To make sure to use user space data path (vhostuser) - - python set_ovs_hostconfigs.py --ovs_dpdk - - To give bridge mappings - - python --bridge_mapping=physnet1:br-ex,physnet2:br-eth0 - -""" - - -import os -import socket -import subprocess -import sys - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils - -from networking_odl._i18n import _ - - -LOG = log.getLogger(__name__) - -USERSPACE_DATAPATH_TYPES = ['netdev', 'dpdkvhostuser'] - -COMMAND_LINE_OPTIONS = [ - - cfg.ListOpt( - 'allowed_network_types', - default=['local', 'vlan', 'vxlan', 'gre'], - help=_(""" - Specifies allowed network types given as a Comma-separated list of - types. - - Default: --allowed_network_types=local,vlan,vxlan,gre - """)), - - cfg.DictOpt( - 'bridge_mappings', - default={}, - help=_(""" - Comma-separated list of : tuples mapping - physical network names to the agent's node-specific Open vSwitch - bridge names to be used for flat and VLAN networks. The length of - bridge names should be no more than 11. Each bridge must exist, and - should have a physical network interface configured as a port. All - physical networks configured on the server should have mappings to - appropriate bridges on each agent. - - Note: If you remove a bridge from this mapping, make sure to - disconnect it from the integration bridge as it won't be managed by - the agent anymore. - - Default: --bridge_mappings= - """)), - - cfg.StrOpt( - 'datapath_type', - choices=['system', 'netdev', 'dpdkvhostuser'], - default=None, - help=_(""" - It specifies the OVS data path to use. - - If this value is given then --ovs_dpdk will be ignored. - If neither this option or --ovs_dpdk are given then it will use a - valid value for current host. - - Choices: --datapath_type= - --datapath_type=system # kernel data path - --datapath_type=netdev # userspace data path - --datapath_type=dpdkvhostuser # userspace data path - - Default: --datapath_type=netdev # if support is detected - --datapath_type=system # in all other cases - """)), - - cfg.BoolOpt( - 'debug', - default=False, - help=_(""" - It shows debugging informations. - - Default: --nodebug - """)), - - cfg.StrOpt( - 'host', - default=socket.gethostname(), # pylint: disable=no-member - help=_(""" - It specifies the host name of the target machine. - - Default: --host=$HOSTNAME # running machine host name - """)), - - cfg.IPOpt( - 'local_ip', - help=_(""" - IP address of local overlay (tunnel) network end-point. - It accepts either an IPv4 or IPv6 address that resides on one - of the host network interfaces. The IP version of this - value must match the value of the 'overlay_ip_version' - option in the ML2 plug-in configuration file on the Neutron - server node(s). - - Default: local_ip= - """)), - - cfg.BoolOpt( - 'ovs_dpdk', - default=None, - help=_(""" - It uses user-space type of virtual interface (vhostuser) instead of - the system based one (ovs). - - If this option is not specified it tries to detect vhostuser - support on running host and in case of positive match it uses it. - - NOTE: if --datapath_type is given then this option is ignored. - - Default: - """)), - - cfg.StrOpt( - 'ovs_hostconfigs', - help=_(""" - Fives pre-made host configuration for OpenDaylight as a JSON - string. - - NOTE: when specified all other options are ignored! - - An entry should look like: - --ovs_hostconfigs='{ - "ODL L2": { - "allowed_network_types": - ["local","vlan", "vxlan","gre"], - "bridge_mappings": {"physnet1":"br-ex"} - "supported_vnic_types": [ - { - "vnic_type":"normal", - "vif_type":"ovs", - "vif_details":{} - } - ], - }, - "ODL L3": {} - }' - - Default: --ovs_hostconfigs= - """)), - - cfg.StrOpt( - 'vhostuser_mode', - choices=['client', 'server'], - default='client', - help=_(""" - It specifies the OVS VHostUser mode. - - Choices: --vhostuser_mode=client - --vhostuser_mode=server - - Default: --vhostuser_mode=client - """)), - - cfg.BoolOpt( - 'vhostuser_ovs_plug', - default=True, - help=_(""" - Enable VHostUser OVS Plug. - - Default: --vhostuser_ovs_plug - """)), - - cfg.StrOpt( - 'vhostuser_port_prefix', - choices=['vhu', 'socket'], - default='vhu', - help=_(""" - VHostUser socket port prefix. - - Choices: --vhostuser_socket_dir=vhu - --vhostuser_socket_dir=socket - - Default: --vhostuser_socket_dir=vhu - """)), - - cfg.StrOpt( - 'vhostuser_socket_dir', - default='/var/run/openvswitch', - help=_(""" - OVS VHostUser socket directory. - - Default: --vhostuser_socket_dir=/var/run/openvswitch - """)), -] - - -DEFAULT_COMMAND_LINE_OPTIONS = tuple(sys.argv[1:]) - - -def set_ovs_extid_hostconfigs(conf, ovs_vsctl): - if conf.ovs_hostconfigs: - json_str = conf.ovs_hostconfigs.replace("\'", "\"") - LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str) - hostconfigs = jsonutils.loads(json_str) - - else: - uuid = ovs_vsctl.uuid() - userspace_datapath_types = ovs_vsctl.userspace_datapath_types() - hostconfigs = _hostconfigs_from_conf( - conf=conf, uuid=uuid, - userspace_datapath_types=userspace_datapath_types) - - ovs_vsctl.set_host_name(conf.host) - for name in sorted(hostconfigs): - ovs_vsctl.set_host_config(name, hostconfigs[name]) - - # for new netvirt - if conf.local_ip: - ovs_vsctl.set_local_ip(conf.local_ip) - if conf.bridge_mappings: - provider_mappings = ",".join( - "{}:{}".format(k, v) for k, v in conf.bridge_mappings.items()) - ovs_vsctl.set_provider_mappings(provider_mappings) - - -def _hostconfigs_from_conf(conf, uuid, userspace_datapath_types): - vif_type = _vif_type_from_conf( - conf=conf, userspace_datapath_types=userspace_datapath_types) - datapath_type = conf.datapath_type or ( - 'system' if vif_type == 'ovs' else userspace_datapath_types[0]) - vif_details = _vif_details_from_conf( - conf=conf, uuid=uuid, vif_type=vif_type) - - return { - "ODL L2": { - "allowed_network_types": conf.allowed_network_types, - "bridge_mappings": conf.bridge_mappings, - "datapath_type": datapath_type, - "supported_vnic_types": [ - { - "vif_details": vif_details, - "vif_type": vif_type, - "vnic_type": "normal", - } - ] - } - } - - -def _vif_type_from_conf(conf, userspace_datapath_types): - - # take vif_type from datapath_type ------------------------------------ - if conf.datapath_type: - # take it from datapath_type - if conf.datapath_type in USERSPACE_DATAPATH_TYPES: - if conf.datapath_type not in userspace_datapath_types: - LOG.warning( - "Using user space data path type '%s' even if no " - "support was detected.", conf.datapath_type) - return 'vhostuser' - else: - return 'ovs' - - # take vif_type from ovs_dpdk ----------------------------------------- - if conf.ovs_dpdk is True: - if userspace_datapath_types: - return 'vhostuser' - - raise ValueError(_( - "--ovs_dpdk option was specified but the 'netdev' datapath_type " - "was not enabled. " - "To override use option --datapath_type=netdev")) - - elif conf.ovs_dpdk is False: - return 'ovs' - - # take detected dtype ------------------------------------------------- - if userspace_datapath_types: - return 'vhostuser' - - return 'ovs' - - -def _vif_details_from_conf(conf, uuid, vif_type): - host_addresses = [conf.local_ip or conf.host] - if vif_type == 'ovs': - # OVS legacy mode - return {"uuid": uuid, - "host_addresses": host_addresses, - "has_datapath_type_netdev": False, - "support_vhost_user": False} - - elif vif_type == 'vhostuser': - # enable VHOSTUSER - return {"uuid": uuid, - "host_addresses": host_addresses, - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": conf.vhostuser_port_prefix, - "vhostuser_socket_dir": conf.vhostuser_socket_dir, - "vhostuser_ovs_plug": conf.vhostuser_ovs_plug, - "vhostuser_mode": conf.vhostuser_mode, - "vhostuser_socket": os.path.join( - conf.vhostuser_socket_dir, - conf.vhostuser_port_prefix + '$PORT_ID')} - - -def setup_conf(args=None): - """setup cmdline options.""" - - if args is None: - args = DEFAULT_COMMAND_LINE_OPTIONS - - conf = cfg.ConfigOpts() - if '-h' in args or '--help' in args: - # Prints out script documentation." - print(__doc__) - - conf.register_cli_opts(COMMAND_LINE_OPTIONS) - conf(args=args) - return conf - - -class OvsVsctl(object): - """Wrapper class for ovs-vsctl command tool - - """ - - COMMAND = 'ovs-vsctl' - TABLE = 'Open_vSwitch' - - _uuid = None - - def uuid(self): - uuid = self._uuid - if uuid is None: - self._uuid = uuid = self._get('.', '_uuid') - return uuid - - _datapath_types = None - - def datapath_types(self): - datapath_types = self._datapath_types - if datapath_types is None: - try: - datapath_types = self._get('.', 'datapath_types') - except subprocess.CalledProcessError: - datapath_types = 'system' - self._datapath_types = datapath_types - return datapath_types - - _userspace_datapath_types = None - - def userspace_datapath_types(self): - userspace_datapath_types = self._userspace_datapath_types - if userspace_datapath_types is None: - datapath_types = self.datapath_types() - userspace_datapath_types = tuple( - datapath_type - for datapath_type in USERSPACE_DATAPATH_TYPES - if datapath_types.find(datapath_type) >= 0) - self._userspace_datapath_types = userspace_datapath_types - return userspace_datapath_types - - def set_host_name(self, host_name): - self._set_external_ids('odl_os_hostconfig_hostid', host_name) - - def set_host_config(self, name, value): - self._set_external_ids( - name='odl_os_hostconfig_config_' + name.lower().replace(' ', '_'), - value=jsonutils.dumps(value)) - - def set_local_ip(self, local_ip): - self._set_other_config("local_ip", local_ip) - - def set_provider_mappings(self, provider_mappings): - self._set_other_config("provider_mappings", provider_mappings) - - # --- implementation details ---------------------------------------------- - - def _set_external_ids(self, name, value): - # Refer below for ovs ext-id strings - # https://review.openstack.org/#/c/309630/ - value = 'external_ids:{}={}'.format(name, value) - self._set(record=self.uuid(), value=value) - - def _set_other_config(self, name, value): - value = 'other_config:{}={}'.format(name, value) - self._set(record=self.uuid(), value=value) - - def _get(self, record, name): - return self._execute('get', self.TABLE, record, name) - - def _set(self, record, value): - self._execute('set', self.TABLE, record, value) - - def _execute(self, *args): - command_line = (self.COMMAND,) + args - LOG.info( - "SET-HOSTCONFIGS: Executing cmd: %s", ' '.join(command_line)) - return subprocess.check_output(command_line).strip() - - -def main(args=None): - """Main.""" - - conf = setup_conf(args) - - if os.geteuid() != 0: - LOG.error('Root permissions are required to configure ovsdb.') - return 1 - - try: - set_ovs_extid_hostconfigs(conf=conf, ovs_vsctl=OvsVsctl()) - - except Exception as ex: # pylint: disable=broad-except - LOG.error("Fatal error: %s", ex, exc_info=conf.debug) - return 1 - - else: - return 0 - - -if __name__ == '__main__': - exit(main()) diff --git a/networking_odl/cmd/test_setup_hostconfigs.sh b/networking_odl/cmd/test_setup_hostconfigs.sh deleted file mode 100755 index 1651d0ed5..000000000 --- a/networking_odl/cmd/test_setup_hostconfigs.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}' diff --git a/networking_odl/common/__init__.py b/networking_odl/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/common/callback.py b/networking_odl/common/callback.py deleted file mode 100644 index d669d21a7..000000000 --- a/networking_odl/common/callback.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from neutron_lib.callbacks import events -from neutron_lib.callbacks import registry -from neutron_lib.callbacks import resources -from oslo_log import log as logging - -from networking_odl.common import constants as odl_const - -LOG = logging.getLogger(__name__) - -ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural')) -_RESOURCE_MAPPING = { - resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS), - resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE, - odl_const.ODL_SG_RULES), -} -_OPERATION_MAPPING = { - events.PRECOMMIT_CREATE: odl_const.ODL_CREATE, - events.PRECOMMIT_UPDATE: odl_const.ODL_UPDATE, - events.PRECOMMIT_DELETE: odl_const.ODL_DELETE, - events.AFTER_CREATE: odl_const.ODL_CREATE, - events.AFTER_UPDATE: odl_const.ODL_UPDATE, - events.AFTER_DELETE: odl_const.ODL_DELETE, -} - - -class OdlSecurityGroupsHandler(object): - - def __init__(self, precommit, postcommit): - assert postcommit is not None - self._precommit = precommit - self._postcommit = postcommit - self._subscribe() - - def _subscribe(self): - if self._precommit is not None: - for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE): - registry.subscribe(self.sg_callback_precommit, - resources.SECURITY_GROUP, event) - registry.subscribe(self.sg_callback_precommit, - resources.SECURITY_GROUP_RULE, event) - registry.subscribe( - self.sg_callback_precommit, resources.SECURITY_GROUP, - events.PRECOMMIT_UPDATE) - - for event in (events.AFTER_CREATE, events.AFTER_DELETE): - registry.subscribe(self.sg_callback_postcommit, - resources.SECURITY_GROUP, event) - registry.subscribe(self.sg_callback_postcommit, - resources.SECURITY_GROUP_RULE, event) - - registry.subscribe(self.sg_callback_postcommit, - resources.SECURITY_GROUP, events.AFTER_UPDATE) - - def _sg_callback(self, callback, resource, event, trigger, **kwargs): - context = kwargs['context'] - res = kwargs.get(resource) - res_id = kwargs.get("%s_id" % resource) - if res_id is None: - res_id = res.get('id') - odl_res_type = _RESOURCE_MAPPING[resource] - - odl_ops = _OPERATION_MAPPING[event] - odl_res_dict = None if res is None else {odl_res_type.singular: res} - - LOG.debug("Calling sync_from_callback with ODL_OPS (%(odl_ops)s) " - "ODL_RES_TYPE (%(odl_res_type)s) RES_ID (%(res_id)s) " - "ODL_RES_DICT (%(odl_res_dict)s) KWARGS (%(kwargs)s)", - {'odl_ops': odl_ops, 'odl_res_type': odl_res_type, - 'res_id': res_id, 'odl_res_dict': odl_res_dict, - 'kwargs': kwargs}) - - copy_kwargs = kwargs.copy() - copy_kwargs.pop('context') - callback(context, odl_ops, odl_res_type, res_id, odl_res_dict, - **copy_kwargs) - - def sg_callback_precommit(self, resource, event, trigger, **kwargs): - self._sg_callback(self._precommit, resource, event, trigger, **kwargs) - - def sg_callback_postcommit(self, resource, event, trigger, **kwargs): - self._sg_callback(self._postcommit, resource, event, trigger, **kwargs) diff --git a/networking_odl/common/client.py b/networking_odl/common/client.py deleted file mode 100644 index 51307a233..000000000 --- a/networking_odl/common/client.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2014 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import excutils -import requests -from requests import sessions - -from networking_odl.common import constants as odl_const -from networking_odl.common import utils - -LOG = log.getLogger(__name__) -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - - -class OpenDaylightRestClient(object): - @staticmethod - def _check_opt(url): - if not url: - raise cfg.RequiredOptError('url', cfg.OptGroup('ml2_odl')) - required_opts = ('url', 'username', 'password') - for opt in required_opts: - if not getattr(cfg.CONF.ml2_odl, opt): - raise cfg.RequiredOptError(opt, cfg.OptGroup('ml2_odl')) - - @classmethod - def create_client(cls, url=None): - if cfg.CONF.ml2_odl.enable_lightweight_testing: - LOG.debug("ODL lightweight testing is enabled, " - "returning a OpenDaylightLwtClient instance") - - # Have to import at here, otherwise we create a dependency loop - from networking_odl.common import lightweight_testing as lwt - cls = lwt.OpenDaylightLwtClient - - url = url or cfg.CONF.ml2_odl.url - cls._check_opt(url) - return cls( - url, - cfg.CONF.ml2_odl.username, - cfg.CONF.ml2_odl.password, - cfg.CONF.ml2_odl.timeout) - - def __init__(self, url, username, password, timeout): - super(OpenDaylightRestClient, self).__init__() - self.url = url - self.timeout = timeout - self.session = sessions.Session() - self.session.auth = (username, password) - - def get_resource(self, resource_type, resource_id): - response = self.get(utils.make_url_object(resource_type) + '/' + - resource_id) - if response.status_code == requests.codes.not_found: - return None - - return self._check_response(response).json() - - def get(self, urlpath='', data=None): - return self.request('get', urlpath, data) - - def put(self, urlpath='', data=None): - return self.request('put', urlpath, data) - - def delete(self, urlpath='', data=None): - return self.request('delete', urlpath, data) - - def request(self, method, urlpath='', data=None): - headers = {'Content-Type': 'application/json'} - url = '/'.join([self.url, urlpath]) - LOG.debug( - "Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)", - {'method': method, 'url': url, 'data': data}) - return self.session.request( - method, url=url, headers=headers, data=data, timeout=self.timeout) - - def sendjson(self, method, urlpath, obj): - """Send json to the OpenDaylight controller.""" - data = jsonutils.dumps(obj, indent=2) if obj else None - try: - return self._check_response( - self.request(method, urlpath, data)) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("REST request ( %(method)s ) to " - "url ( %(urlpath)s ) is failed. " - "Request body : [%(body)s] service", - {'method': method, - 'urlpath': urlpath, - 'body': obj}) - - def send_request(self, operation, service_type, object_type, data): - """Wrapper method for sendjson()""" - obj_id = data['id'] - base_path = service_type + '/' + object_type + 's' - if operation == odl_const.ODL_DELETE: - urlpath = base_path + '/' + obj_id - self.try_delete(urlpath) - return - elif operation == odl_const.ODL_CREATE: - urlpath = base_path - method = 'post' - elif operation == odl_const.ODL_UPDATE: - urlpath = base_path + '/' + obj_id - method = 'put' - self.sendjson(method, urlpath, {object_type: data}) - - def try_delete(self, urlpath): - response = self.delete(urlpath) - if response.status_code == requests.codes.not_found: - # The resource is already removed. ignore 404 gracefully - LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath}) - return False - - self._check_response(response) - return True - - def _check_response(self, response): - try: - response.raise_for_status() - except requests.HTTPError as error: - with excutils.save_and_reraise_exception(): - LOG.debug("Exception from ODL: %(e)s %(text)s", - {'e': error, 'text': response.text}, exc_info=1) - else: - LOG.debug("Got response:\n" - "(%(response)s)", {'response': response.text}) - return response - - -class OpenDaylightRestClientGlobal(object): - """ODL Rest client as global variable - - The creation of OpenDaylightRestClient needs to be delayed until - configuration values need to be configured at first. - """ - def __init__(self): - super(OpenDaylightRestClientGlobal, self).__init__() - self._lock = threading.Lock() - self._client = None - - def get_client(self): - with self._lock: - if self._client is None: - self._client = OpenDaylightRestClient.create_client() - return self._client diff --git a/networking_odl/common/config.py b/networking_odl/common/config.py deleted file mode 100644 index 093bc4494..000000000 --- a/networking_odl/common/config.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2014 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from networking_odl._i18n import _ - - -odl_opts = [ - cfg.StrOpt('url', - help=_("HTTP URL of OpenDaylight REST interface.")), - cfg.StrOpt('username', - help=_("HTTP username for authentication.")), - cfg.StrOpt('password', secret=True, - help=_("HTTP password for authentication.")), - cfg.IntOpt('timeout', default=10, - help=_("HTTP timeout in seconds.")), - cfg.IntOpt('session_timeout', default=30, - help=_("Tomcat session timeout in minutes.")), - cfg.IntOpt('sync_timeout', default=10, - help=_("(V2 driver) Sync thread timeout in seconds.")), - cfg.IntOpt('retry_count', default=5, - help=_("(V2 driver) Number of times to retry a row " - "before failing.")), - cfg.IntOpt('maintenance_interval', default=300, - help=_("(V2 driver) Journal maintenance operations interval " - "in seconds.")), - cfg.IntOpt('completed_rows_retention', default=600, - help=_("(V2 driver) Time to keep completed rows in seconds." - "Completed rows retention will be checked every " - "maintenance_interval by the cleanup thread." - "To disable completed rows deletion " - "value should be -1")), - cfg.BoolOpt('enable_lightweight_testing', - default=False, - help=_('Test without real ODL.')), - cfg.StrOpt('port_binding_controller', - default='pseudo-agentdb-binding', - help=_('Name of the controller to be used for port binding.')), - cfg.IntOpt('processing_timeout', default='100', - help=_("(V2 driver) Time in seconds to wait before a " - "processing row is marked back to pending.")), - cfg.StrOpt('odl_hostconf_uri', - help=_("Path for ODL host configuration REST interface"), - default="/restconf/operational/neutron:neutron/hostconfigs"), - cfg.IntOpt('restconf_poll_interval', default=30, - help=_("Poll interval in seconds for getting ODL hostconfig")), - cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False, - help=_('Enable websocket for pseudo-agent-port-binding.')), - cfg.IntOpt('odl_features_retry_interval', default=5, - help=_("Wait this many seconds before retrying the odl features" - " fetch")), - cfg.ListOpt('odl_features', item_type=str, - help='A list of features supported by ODL') -] - -cfg.CONF.register_opts(odl_opts, "ml2_odl") - - -def list_opts(): - return [('ml2_odl', odl_opts)] diff --git a/networking_odl/common/constants.py b/networking_odl/common/constants.py deleted file mode 100644 index b02400e07..000000000 --- a/networking_odl/common/constants.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -ODL_NETWORK = 'network' -ODL_NETWORKS = 'networks' -ODL_SUBNET = 'subnet' -ODL_SUBNETS = 'subnets' -ODL_PORT = 'port' -ODL_PORTS = 'ports' -ODL_SG = 'security_group' -ODL_SGS = 'security_groups' -ODL_SG_RULE = 'security_group_rule' -ODL_SG_RULES = 'security_group_rules' -ODL_ROUTER = 'router' -ODL_ROUTERS = 'routers' -ODL_FLOATINGIP = 'floatingip' -ODL_FLOATINGIPS = 'floatingips' - -ODL_LOADBALANCER = 'loadbalancer' -ODL_LOADBALANCERS = 'loadbalancers' -ODL_LISTENER = 'listener' -ODL_LISTENERS = 'listeners' -ODL_POOL = 'pool' -ODL_POOLS = 'pools' -ODL_MEMBER = 'member' -ODL_MEMBERS = 'members' -ODL_HEALTHMONITOR = 'healthmonitor' -ODL_HEALTHMONITORS = 'healthmonitors' - -ODL_QOS = 'qos' -ODL_QOS_POLICY = 'policy' -ODL_QOS_POLICIES = 'policies' - -ODL_SFC = 'sfc' -ODL_SFC_FLOW_CLASSIFIER = 'flowclassifier' -ODL_SFC_FLOW_CLASSIFIERS = 'flowclassifiers' -ODL_SFC_PORT_PAIR = 'portpair' -ODL_SFC_PORT_PAIRS = 'portpairs' -ODL_SFC_PORT_PAIR_GROUP = 'portpairgroup' -ODL_SFC_PORT_PAIR_GROUPS = 'portpairgroups' -ODL_SFC_PORT_CHAIN = 'portchain' -ODL_SFC_PORT_CHAINS = 'portchains' - -ODL_TRUNK = 'trunk' -ODL_TRUNKS = 'trunks' - -ODL_L2GATEWAY = 'l2_gateway' -ODL_L2GATEWAYS = 'l2_gateways' -ODL_L2GATEWAY_CONNECTION = 'l2gateway_connection' -ODL_L2GATEWAY_CONNECTIONS = 'l2_gateway_connections' - -ODL_BGPVPN = 'bgpvpn' -ODL_BGPVPNS = 'bgpvpns' -ODL_BGPVPN_NETWORK_ASSOCIATION = 'bgpvpn_network_association' -ODL_BGPVPN_NETWORK_ASSOCIATIONS = 'bgpvpn_network_associations' -ODL_BGPVPN_ROUTER_ASSOCIATION = 'bgpvpn_network_association' -ODL_BGPVPN_ROUTER_ASSOCIATIONS = 'bgpvpn_network_associations' - -ODL_ML2_MECH_DRIVER_V1 = "opendaylight" -ODL_ML2_MECH_DRIVER_V2 = "opendaylight_v2" - -ODL_CREATE = 'create' -ODL_UPDATE = 'update' -ODL_DELETE = 'delete' - -# Constants for journal operation states -PENDING = 'pending' -PROCESSING = 'processing' -FAILED = 'failed' -COMPLETED = 'completed' - -# dict to store url mappings -RESOURCE_URL_MAPPINGS = {ODL_QOS_POLICY: "%s/%s" % (ODL_QOS, ODL_QOS_POLICIES)} diff --git a/networking_odl/common/filters.py b/networking_odl/common/filters.py deleted file mode 100644 index 7d11e7266..000000000 --- a/networking_odl/common/filters.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron_lib import constants as n_const - -from networking_odl.common import constants as odl_const -from networking_odl.common import utils as odl_utils - - -# NOTE(yamahata): As neutron keyston v3 support, tenant_id would be renamed to -# project_id. In order to keep compatibility, populate both -# 'project_id' and 'tenant_id' -# for details refer to -# https://specs.openstack.org/openstack/neutron-specs/specs/newton/moving-to-keystone-v3.html -def _populate_project_id_and_tenant_id(resource_dict): - # NOTE(yamahata): l3 plugin passes data as dependency_list as python list - # delete_router, delete_floatingip - if not isinstance(resource_dict, dict): - return - - project_id = resource_dict.get('project_id', - resource_dict.get('tenant_id')) - if project_id is not None: - # NOTE(yamahata): project_id can be ""(empty string) - resource_dict.setdefault('project_id', project_id) - resource_dict.setdefault('tenant_id', project_id) - - -def _filter_unmapped_null(resource_dict, unmapped_keys): - # NOTE(yamahata): bug work around - # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475 - # Null-value for an unmapped element causes next mapped - # collection to contain a null value - # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] } - # - # Java Object: - # class Root { - # Collection mappedCollection = new ArrayList; - # } - # - # Result: - # Field B contains one element; null - # - # TODO(yamahata): update along side with neutron and ODL - # add when neutron adds more extensions - # delete when ODL neutron northbound supports it - # TODO(yamahata): do same thing for other resources - keys_to_del = [key for key in unmapped_keys - if resource_dict.get(key) is None] - if keys_to_del: - odl_utils.try_del(resource_dict, keys_to_del) - - -_NETWORK_UNMAPPED_KEYS = ['qos_policy_id'] -_SUBNET_UNMAPPED_KEYS = ['segment_id', 'subnetpool_id'] -_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name', - 'port_security_enabled', 'qos_policy_id'] - - -def _filter_network_create(network): - odl_utils.try_del(network, ['status', 'subnets']) - _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) - - -def _filter_network_update(network): - odl_utils.try_del(network, ['id', 'status', 'subnets', - 'tenant_id', 'project_id']) - _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) - - -def _filter_subnet_create(subnet): - _filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS) - - -def _filter_subnet_update(subnet): - odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', - 'allocation_pools', 'tenant_id', 'project_id']) - _filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS) - - -def _filter_port_create(port): - """Filter out port attributes not required for a create.""" - odl_utils.try_del(port, ['status']) - _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS) - - -def _filter_port_update(port): - """Filter out port attributes for an update operation.""" - odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id', - 'project_id']) - _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS) - - -def _filter_router_update(router): - """Filter out attributes for an update operation.""" - odl_utils.try_del(router, ['id', 'tenant_id', 'project_id', 'status']) - - -# neutron has multiple ICMPv6 names -# https://bugs.launchpad.net/tempest/+bug/1671366 -# REVISIT(yamahata): once neutron upstream is fixed to store unified form, -# this can be removed. -_ICMPv6_NAMES = ( - n_const.PROTO_NAME_ICMP, - n_const.PROTO_NAME_IPV6_ICMP, - n_const.PROTO_NAME_IPV6_ICMP_LEGACY, -) - - -def _sgrule_scrub_icmpv6_name(sgrule): - if (sgrule.get('ethertype') == n_const.IPv6 and - sgrule.get('protocol') in _ICMPv6_NAMES): - sgrule['protocol'] = n_const.PROTO_NAME_IPV6_ICMP_LEGACY - - -# ODL boron neturon northbound knows the following protocol names. -# It's safe to pass those names -_ODL_KNOWN_PROTOCOL_NAMES = ( - n_const.PROTO_NAME_TCP, - n_const.PROTO_NAME_UDP, - n_const.PROTO_NAME_ICMP, - n_const.PROTO_NAME_IPV6_ICMP_LEGACY, -) - - -def _sgrule_scrub_unknown_protocol_name(protocol): - """Convert unknown protocol name to actual interger. - - OpenDaylight does't want to keep catching up list of protocol names. - So networking-odl converts unknown protcol name into integer - """ - if protocol in _ODL_KNOWN_PROTOCOL_NAMES: - return protocol - if protocol in n_const.IP_PROTOCOL_MAP: - return n_const.IP_PROTOCOL_MAP[protocol] - return protocol - - -# TODO(yamahata): used by mech_driver. -# make this private when v1 mech_driver is removed -def filter_security_group_rule(sg_rule): - _sgrule_scrub_icmpv6_name(sg_rule) - if sg_rule.get('protocol'): - sg_rule['protocol'] = _sgrule_scrub_unknown_protocol_name( - sg_rule['protocol']) - - -_FILTER_MAP = { - (odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create, - (odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update, - (odl_const.ODL_SUBNET, odl_const.ODL_CREATE): _filter_subnet_create, - (odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update, - (odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create, - (odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update, - (odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update, - (odl_const.ODL_SG_RULE, odl_const.ODL_CREATE): filter_security_group_rule, - (odl_const.ODL_SG_RULE, odl_const.ODL_UPDATE): filter_security_group_rule, -} - - -def filter_for_odl(object_type, operation, data): - """Filter out the attributed before sending the data to ODL""" - filter_key = (object_type, operation) - if filter_key in _FILTER_MAP: - _FILTER_MAP[filter_key](data) - _populate_project_id_and_tenant_id(data) diff --git a/networking_odl/common/lightweight_testing.py b/networking_odl/common/lightweight_testing.py deleted file mode 100644 index 1c0f5d4a1..000000000 --- a/networking_odl/common/lightweight_testing.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2015 Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy - -import requests -import six - -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from networking_odl._i18n import _ -from networking_odl.common import client -from networking_odl.common import constants as odl_const - - -LOG = logging.getLogger(__name__) - -OK = requests.codes.ok -NO_CONTENT = requests.codes.no_content -NOT_ALLOWED = requests.codes.not_allowed -NOT_FOUND = requests.codes.not_found -BAD_REQUEST = requests.codes.bad_request - - -class OpenDaylightLwtClient(client.OpenDaylightRestClient): - """Lightweight testing client""" - - lwt_dict = {odl_const.ODL_NETWORKS: {}, - odl_const.ODL_SUBNETS: {}, - odl_const.ODL_PORTS: {}, - odl_const.ODL_SGS: {}, - odl_const.ODL_SG_RULES: {}, - odl_const.ODL_LOADBALANCERS: {}, - odl_const.ODL_LISTENERS: {}, - odl_const.ODL_POOLS: {}, - odl_const.ODL_MEMBERS: {}, - odl_const.ODL_HEALTHMONITORS: {}} - - @classmethod - def _make_response(cls, status_code=OK, content=None): - """Only supports 'content-type': 'application/json'""" - response = requests.models.Response() - response.status_code = status_code - if content: - response.raw = six.BytesIO( - jsonutils.dumps(content).encode('utf-8')) - - return response - - @classmethod - def _get_resource_id(cls, urlpath): - # resouce ID is the last element of urlpath - return str(urlpath).rsplit('/', 1)[-1] - - @classmethod - def post(cls, resource_type, resource_dict, urlpath, resource_list): - """No ID in URL, elements in resource_list must have ID""" - - if resource_list is None: - raise ValueError(_("resource_list can not be None")) - - for resource in resource_list: - if resource['id'] in resource_dict: - LOG.debug("%s %s already exists", resource_type, - resource['id']) - response = cls._make_response(NOT_ALLOWED) - raise requests.exceptions.HTTPError(response=response) - - resource_dict[resource['id']] = deepcopy(resource) - - return cls._make_response(NO_CONTENT) - - @classmethod - def put(cls, resource_type, resource_dict, urlpath, resource_list): - - resource_id = cls._get_resource_id(urlpath) - - if resource_list is None: - raise ValueError(_("resource_list can not be None")) - - if resource_id and len(resource_list) != 1: - LOG.debug("Updating %s with multiple resources", urlpath) - response = cls._make_response(BAD_REQUEST) - raise requests.exceptions.HTTPError(response=response) - - for resource in resource_list: - res_id = resource_id or resource['id'] - if res_id in resource_dict: - resource_dict[res_id].update(deepcopy(resource)) - else: - LOG.debug("%s %s does not exist", resource_type, res_id) - response = cls._make_response(NOT_FOUND) - raise requests.exceptions.HTTPError(response=response) - - return cls._make_response(NO_CONTENT) - - @classmethod - def delete(cls, resource_type, resource_dict, urlpath, resource_list): - - if resource_list is None: - resource_id = cls._get_resource_id(urlpath) - id_list = [resource_id] - else: - id_list = [res['id'] for res in resource_list] - - for res_id in id_list: - removed = resource_dict.pop(res_id, None) - if removed is None: - LOG.debug("%s %s does not exist", resource_type, res_id) - response = cls._make_response(NOT_FOUND) - raise requests.exceptions.HTTPError(response=response) - - return cls._make_response(NO_CONTENT) - - @classmethod - def get(cls, resource_type, resource_dict, urlpath, resource_list=None): - - resource_id = cls._get_resource_id(urlpath) - - if resource_id: - resource = resource_dict.get(resource_id) - if resource is None: - LOG.debug("%s %s does not exist", resource_type, resource_id) - response = cls._make_response(NOT_FOUND) - raise requests.exceptions.HTTPError(response=response) - else: - # When getting single resource, return value is a dict - r_list = {resource_type[:-1]: deepcopy(resource)} - return cls._make_response(OK, r_list) - - r_list = [{resource_type[:-1]: deepcopy(res)} - for res in resource_dict.values()] - - return cls._make_response(OK, r_list) - - def sendjson(self, method, urlpath, obj=None): - """Lightweight testing without ODL""" - - if '/' not in urlpath: - urlpath += '/' - - resource_type = str(urlpath).split('/', 1)[0] - resource_type = resource_type.replace('-', '_') - - resource_dict = self.lwt_dict.get(resource_type) - - if resource_dict is None: - LOG.debug("Resource type %s is not supported", resource_type) - response = self._make_response(NOT_FOUND) - raise requests.exceptions.HTTPError(response=response) - - func = getattr(self, str(method).lower()) - - resource_list = None - if obj: - # If obj is not None, it can only have one entry - assert len(obj) == 1, "Obj can only have one entry" - - key, resource_list = list(obj.items())[0] - - if not isinstance(resource_list, list): - # Need to transform resource_list to a real list, i.e. [res] - resource_list = [resource_list] - - return func(resource_type, resource_dict, urlpath, resource_list) diff --git a/networking_odl/common/odl_features.py b/networking_odl/common/odl_features.py deleted file mode 100644 index cafe8cc0a..000000000 --- a/networking_odl/common/odl_features.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import time - -from oslo_config import cfg -from oslo_log import log -from requests import exceptions - -from networking_odl.common import client as odl_client -from networking_odl.common import utils - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = log.getLogger(__name__) - -OPERATIONAL_PORT_STATUS = 'operational-port-status' - -feature_set = set() - - -def init(): - '''initialize odl_features. - - Initialize odl_features. Try first from configuration and then try pulling - via rest call from ODL. - ''' - - global feature_set - feature_set = None - - if cfg.CONF.ml2_odl.odl_features is not None: - feature_set = set(cfg.CONF.ml2_odl.odl_features) - return - - wait_interval = cfg.CONF.ml2_odl.odl_features_retry_interval - - for times_tried in itertools.count(): - feature_set = _fetch_features() - if feature_set is not None: - break - LOG.warning('Failed to retrieve ODL features, attempt %i', times_tried) - time.sleep(wait_interval) - - -def has(feature): - return feature in feature_set - - -def deinit(): - '''Set odl_features back to it's pre-initlialized ''' - global feature_set - feature_set = set() - - -def _load_features(json): - """parse and save features from json""" - features = json['features'] - if 'feature' not in features: - return - - LOG.info('Retrieved ODL features %s', features) - response = set() - for feature in features['feature']: - response.add(feature['service-provider-feature'].split(':')[1]) - return response - - -def _fetch_features(): - '''Fetch the list of features declared by ODL. - - This function should be called once during initialization - ''' - - path = 'restconf/operational/neutron:neutron/neutron:features' - features_url = utils.get_odl_url(path) - - client = odl_client.OpenDaylightRestClient.create_client(features_url) - try: - response = client.request('get') - except exceptions.ConnectionError: - LOG.error("Error connecting to ODL to retrieve features", - exc_info=True) - return None - - if response.status_code == 400: - LOG.debug('ODL does not support feature negotiation') - return set() - - if response.status_code == 404: - LOG.debug('No features configured') - return set() - - if response.status_code != 200: - LOG.warning('error fetching features: %i', - response.status_code) - return None - - return _load_features(response.json()) diff --git a/networking_odl/common/postcommit.py b/networking_odl/common/postcommit.py deleted file mode 100644 index 974c9f7f0..000000000 --- a/networking_odl/common/postcommit.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import types - -from oslo_log import helpers as log_helpers -import six - - -def _build_func(client_method): - @log_helpers.log_method_call - def f(self, *args, **kwargs): - self.journal.set_sync_event() - - f.__name__ = client_method - return f - - -def _unboundmethod(func, cls): - if six.PY3: - # python 3.x doesn't have unbound methods - func.__qualname__ = cls.__qualname__ + '.' + func.__name__ # PEP 3155 - return func - - # python 2.x - return types.MethodType(func, None, cls) - - -def _get_method_name(op, resource): - return op + '_' + resource + '_postcommit' - - -def _build_method(cls, resource): - # add methods like the following: - # - # @log_helpers.log_method_call - # def __postcommit(self, *args, **kwargs): - # self.journal.set_sync_event() - - operations = ['create', 'update', 'delete'] - for op in operations: - client_method = _get_method_name(op, resource) - if hasattr(cls, client_method) and client_method not in cls.__dict__: - f = _build_func(client_method) - unbound = _unboundmethod(f, cls) - setattr(cls, client_method, unbound) - - -def _build_methods(cls, *resources): - for resource in resources: - _build_method(cls, resource) - - -def add_postcommit(*args): - def postcommit(cls): - _build_methods(cls, *args) - return cls - - return postcommit diff --git a/networking_odl/common/utils.py b/networking_odl/common/utils.py deleted file mode 100644 index fa9d56e5a..000000000 --- a/networking_odl/common/utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2014 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import six.moves.urllib.parse as urlparse - -from networking_odl.common import constants as odl_const - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - - -def try_del(d, keys): - """Ignore key errors when deleting from a dictionary.""" - for key in keys: - try: - del d[key] - except KeyError: - pass - - -def make_url_object(object_type): - obj_pl = odl_const.RESOURCE_URL_MAPPINGS.get(object_type, None) - if obj_pl is None: - obj_pl = neutronify(object_type + 's') - return obj_pl - - -# TODO(manjeets) consolidate this method with make_url_object -def neutronify(name): - """Adjust the resource name for use with Neutron's API""" - return name.replace('_', '-') - - -def get_odl_url(path=''): - '''Make a URL for some ODL resource (path)''' - purl = urlparse.urlsplit(cfg.CONF.ml2_odl.url) - features_url = urlparse.urlunparse(( - purl.scheme, purl.netloc, path, '', '', '')) - return features_url diff --git a/networking_odl/common/websocket_client.py b/networking_odl/common/websocket_client.py deleted file mode 100644 index 2cce78312..000000000 --- a/networking_odl/common/websocket_client.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import threading -import time - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import excutils -from requests import codes -from requests import exceptions -import websocket - -from networking_odl._i18n import _ -from networking_odl.common import client as odl_client - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = log.getLogger(__name__) - -ODL_OPERATIONAL_DATASTORE = "OPERATIONAL" -ODL_CONFIGURATION_DATASTORE = "CONFIGURATION" -ODL_NOTIFICATION_SCOPE_BASE = "BASE" -ODL_NOTIFICATION_SCOPE_ONE = "ONE" -ODL_NOTIFICATION_SCOPE_SUBTREE = "SUBTREE" - -ODL_WEBSOCKET_DISCONNECTED = "ODL_WEBSOCKET_DISCONNECTED" -ODL_WEBSOCKET_CONNECTING = "ODL_WEBSOCKET_CONNECTING" -ODL_WEBSOCKET_CONNECTED = "ODL_WEBSOCKET_CONNECTED" - - -class OpenDaylightWebsocketClient(object): - """Thread for the OpenDaylight Websocket """ - - def __init__(self, odl_rest_client, path, datastore, scope, leaf_node_only, - packet_handler, timeout, status_cb=None): - self.odl_rest_client = odl_rest_client - self.path = path - self.datastore = datastore - self.scope = scope - self.leaf_node_only = leaf_node_only - self.packet_handler = packet_handler - self.timeout = timeout - self.exit_websocket_thread = False - self.status_cb = status_cb - self.current_status = ODL_WEBSOCKET_DISCONNECTED - self._odl_sync_thread = self.start_odl_websocket_thread() - - @classmethod - def odl_create_websocket(cls, odl_url, path, datastore, scope, - packet_handler, status_cb=None, - leaf_node_only=False): - """Create a websocket connection with ODL. - - This method will create a websocket client based on path, - datastore and scope params. On data recv from websocket - packet_handler callback is called. status_cb callback can be - provided if notifications are requried for socket status - changes - """ - - if odl_url is None: - LOG.error("invalid odl url", exc_info=True) - raise ValueError(_("Invalid ODL URL")) - - odl_rest_client = odl_client.OpenDaylightRestClient.create_client( - odl_url) - return cls( - odl_rest_client, path, datastore, scope, leaf_node_only, - packet_handler, cfg.CONF.ml2_odl.timeout, status_cb - ) - - def start_odl_websocket_thread(self): - # Start the websocket thread - LOG.debug("starting a new websocket thread") - odl_websocket_thread = threading.Thread( - name='websocket', - target=self.run_websocket_thread) - odl_websocket_thread.start() - return odl_websocket_thread - - def set_exit_flag(self, value=True): - # set flag to exit - self.exit_websocket_thread = value - - def run_websocket_thread(self, exit_after_run=False): - # TBD connections are persistent so there is really no way to know - # when it is a "first connection". We need to wait for the - # dis/reconnect logic to be able to know this - first_connection = True - ws = None - while not self.exit_websocket_thread: - if exit_after_run: - # Permanently waiting thread model breaks unit tests - # Adding this arg to exit after one run for unit tests - self.set_exit_flag() - # connect if necessary - if ws is None: - try: - ws = self._connect_ws() - except ValueError: - LOG.error("websocket irrecoverable error ") - return - if ws is None: - time.sleep(cfg.CONF.ml2_odl.restconf_poll_interval) - continue - # read off the websocket - try: - data = ws.recv() - if not data: - LOG.warning("websocket received 0 bytes") - continue - except websocket.WebSocketTimeoutException: - continue - except websocket.WebSocketConnectionClosedException: - # per websocket-client, "If remote host closed the connection - # or some network error happened" - LOG.warning("websocket connection closed or IO error", - exc_info=True) - self._close_ws(ws) - ws = None - continue - except Exception: - # Connection closed trigger reconnection - LOG.error("websocket unexpected exception, " - "closing and restarting...", exc_info=True) - # TODO(rsood): Websocket reconnect can cause race conditions - self._close_ws(ws) - ws = None - continue - - # Call handler for data received - try: - self.packet_handler(data, first_connection) - first_connection = False - except Exception: - LOG.error("Error in packet_handler callback", - exc_info=True) - - self._close_ws(ws) - - def _set_websocket_status(self, status): - try: - if self.status_cb: - self.status_cb(status) - except Exception: - LOG.error("Error in status_cb", exc_info=True) - - def _subscribe_websocket(self): - """ODL Websocket change notification subscription""" - # Check ODL URL for details on this process - # https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf:Change_event_notification_subscription#rpc_create-data-change-event-subscription # noqa: E501 # pylint: disable=line-too-long - - # Invoke rpc create-data-change-event-subscription - ws_create_dce_subs_url = ("restconf/operations/sal-remote:" - "create-data-change-event-subscription") - odl_subscription_data = {'input': { - 'path': self.path, - 'sal-remote-augment:datastore': self.datastore, - 'sal-remote-augment:scope': self.scope, - 'sal-remote-augment:notification-output-type': 'JSON' - }} - try: - response = self.odl_rest_client.sendjson('post', - ws_create_dce_subs_url, - odl_subscription_data) - response.raise_for_status() - except exceptions.ConnectionError: - LOG.error("cannot connect to the opendaylight controller") - return None - except exceptions.HTTPError as e: - # restconf returns 400 on operation when path is not available - if e.response.status_code == codes.bad_request: - LOG.debug("response code bad_request (400)" - "check path for websocket connection") - raise ValueError(_("bad_request (http400),check path.")) - else: - LOG.warning("websocket connection failed", - exc_info=True) - return None - except Exception: - LOG.error("websocket subscription failed", exc_info=True) - return None - - # Subscribing to stream. Returns websocket URL to listen to - ws_dce_subs_url = """restconf/streams/stream/""" - try: - stream_name = response.json() - stream_name = stream_name['output']['stream-name'] - url = ws_dce_subs_url + stream_name - if self.leaf_node_only: - url += "?odl-leaf-nodes-only=true" - response = self.odl_rest_client.get(url) - response.raise_for_status() - stream_url = response.headers['location'] - LOG.debug("websocket stream URL: %s", stream_url) - return stream_url - except exceptions.ConnectionError: - LOG.error("cannot connect to the opendaylight controller") - return None - except exceptions.HTTPError as e: - # restconf returns 404 on operation when there is no entry - if e.response.status_code == codes.not_found: - LOG.debug("response code not_found (404)" - "unable to websocket connection url") - raise ValueError(_("bad_request (http400),check path")) - else: - LOG.warning("websocket connection failed") - return None - except ValueError: - with excutils.save_and_reraise_exception(): - LOG.error("websocket subscribe got invalid stream name") - except KeyError: - LOG.error("websocket subscribe got bad stream data") - raise ValueError(_("websocket subscribe bad stream data")) - except Exception: - LOG.error("websocket subscription failed", exc_info=True) - return None - - def _socket_create_connection(self, stream_url): - ws = None - try: - ws = websocket.create_connection(stream_url, - timeout=self.timeout) - except ValueError: - with excutils.save_and_reraise_exception(): - LOG.error("websocket create connection invalid URL") - except websocket.WebSocketBadStatusException: - LOG.error("webSocket bad status exception", exc_info=True) - return None - except Exception: - LOG.exception("websocket create connection failed", - exc_info=True) - return None - if ws is None or not ws.connected: - LOG.error("websocket create connection unsuccessful") - return None - - LOG.debug("websocket connection established") - return ws - - def _connect_ws(self): - self._set_websocket_status(ODL_WEBSOCKET_CONNECTING) - stream_url = self._subscribe_websocket() - if stream_url is None: - return None - # Delay here causes websocket notification lose (ODL Bug 8299) - ws = self._socket_create_connection(stream_url) - if ws is not None: - self._set_websocket_status(ODL_WEBSOCKET_CONNECTED) - return ws - - def _close_ws(self, ws): - LOG.debug("closing websocket") - try: - if ws is not None: - ws.close() - except Exception: - LOG.error("Error while closing websocket", exc_info=True) - self._set_websocket_status(ODL_WEBSOCKET_DISCONNECTED) - - -class EventDataParser(object): - """Helper class to parse websocket notification data""" - - NOTIFICATION_TAG = 'notification' - DC_NOTIFICATION_TAG = 'data-changed-notification' - DC_EVENT_TAG = 'data-change-event' - OPERATION_DELETE = 'deleted' - OPERATION_CREATE = 'created' - OPERATION_UPDATE = 'updated' - - def __init__(self, item): - self.item = item - - @classmethod - def get_item(cls, payload): - try: - data = jsonutils.loads(payload) - except ValueError: - LOG.warning("invalid websocket notification") - return - try: - dn_events = (data[cls.NOTIFICATION_TAG] - [cls.DC_NOTIFICATION_TAG] - [cls.DC_EVENT_TAG]) - - if not isinstance(dn_events, list): - dn_events = [dn_events] - - for e in dn_events: - yield cls(e) - except KeyError: - LOG.warning("invalid JSON for websocket notification") - - def get_fields(self): - return (self.get_operation(), - self.get_path(), - self.get_data()) - - def get_path(self): - return self.item.get('path') - - def get_data(self): - return self.item.get('data') - - def get_operation(self): - return self.item.get('operation') - - @staticmethod - def extract_field(text, key): - pattern = r'\[' + key + r'=(.*?)\]' - match = re.search(pattern, text) - if match: - return match.group(1) - - return None diff --git a/networking_odl/db/__init__.py b/networking_odl/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/db/db.py b/networking_odl/db/db.py deleted file mode 100644 index ab0e61ff1..000000000 --- a/networking_odl/db/db.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -from sqlalchemy import asc -from sqlalchemy import func -from sqlalchemy import or_ -from sqlalchemy.orm import aliased - -from networking_odl.common import constants as odl_const -from networking_odl.db import models - -from neutron.db import api as db_api - -from oslo_db import api as oslo_db_api -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -def get_pending_or_processing_ops(session, object_uuid, operation=None): - q = session.query(models.OpenDaylightJournal).filter( - or_(models.OpenDaylightJournal.state == odl_const.PENDING, - models.OpenDaylightJournal.state == odl_const.PROCESSING), - models.OpenDaylightJournal.object_uuid == object_uuid) - - if operation: - if isinstance(operation, (list, tuple)): - q = q.filter(models.OpenDaylightJournal.operation.in_(operation)) - else: - q = q.filter(models.OpenDaylightJournal.operation == operation) - - return q.all() - - -def get_pending_delete_ops_with_parent(session, object_type, parent_id): - rows = session.query(models.OpenDaylightJournal).filter( - or_(models.OpenDaylightJournal.state == odl_const.PENDING, - models.OpenDaylightJournal.state == odl_const.PROCESSING), - models.OpenDaylightJournal.object_type == object_type, - models.OpenDaylightJournal.operation == odl_const.ODL_DELETE - ).all() - - return (row for row in rows if parent_id in row.data) - - -def get_all_db_rows(session): - return session.query(models.OpenDaylightJournal).all() - - -def get_all_db_rows_by_state(session, state): - return session.query(models.OpenDaylightJournal).filter_by( - state=state).all() - - -# Retry deadlock exception for Galera DB. -# If two (or more) different threads call this method at the same time, they -# might both succeed in changing the same row to pending, but at least one -# of them will get a deadlock from Galera and will have to retry the operation. -@db_api.retry_db_errors -def get_oldest_pending_db_row_with_lock(session): - with session.begin(): - journal_dep = aliased(models.OpenDaylightJournal) - dep_query = session.query(journal_dep).filter( - models.OpenDaylightJournal.seqnum == journal_dep.seqnum - ).outerjoin( - journal_dep.depending_on, aliased=True).filter( - or_(models.OpenDaylightJournal.state == odl_const.PENDING, - models.OpenDaylightJournal.state == odl_const.PROCESSING)) - row = session.query(models.OpenDaylightJournal).filter( - models.OpenDaylightJournal.state == odl_const.PENDING, - ~ dep_query.exists() - ).order_by( - asc(models.OpenDaylightJournal.last_retried)).first() - if row: - update_db_row_state(session, row, odl_const.PROCESSING) - - return row - - -def delete_dependency(session, entry): - """Delete dependency upon the given ID""" - conn = session.connection() - stmt = models.journal_dependencies.delete( - models.journal_dependencies.c.depends_on == entry.seqnum) - conn.execute(stmt) - session.expire_all() - - -@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) -def update_db_row_state(session, row, state): - row.state = state - session.merge(row) - session.flush() - - -def update_pending_db_row_retry(session, row, retry_count): - if row.retry_count >= retry_count: - update_db_row_state(session, row, odl_const.FAILED) - else: - row.retry_count += 1 - update_db_row_state(session, row, odl_const.PENDING) - - -# This function is currently not used. -# Deleted resources are marked as 'deleted' in the database. -@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) -def delete_row(session, row=None, row_id=None): - if row_id: - row = session.query(models.OpenDaylightJournal).filter_by( - id=row_id).one() - if row: - session.delete(row) - session.flush() - - -@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) -def create_pending_row(session, object_type, object_uuid, - operation, data, depending_on=None): - if depending_on is None: - depending_on = [] - row = models.OpenDaylightJournal(object_type=object_type, - object_uuid=object_uuid, - operation=operation, data=data, - created_at=func.now(), - state=odl_const.PENDING, - depending_on=depending_on) - session.add(row) - # Keep session flush for unit tests. NOOP for L2/L3 events since calls are - # made inside database session transaction with subtransactions=True. - session.flush() - - -@db_api.retry_db_errors -def delete_pending_rows(session, operations_to_delete): - with session.begin(): - session.query(models.OpenDaylightJournal).filter( - models.OpenDaylightJournal.operation.in_(operations_to_delete), - models.OpenDaylightJournal.state == odl_const.PENDING).delete( - synchronize_session=False) - session.expire_all() - - -@db_api.retry_db_errors -def _update_periodic_task_state(session, expected_state, state, task): - with session.begin(): - row = session.query(models.OpenDaylightPeriodicTask).filter_by( - state=expected_state, - task=task).with_for_update().one_or_none() - - if row is None: - return False - - row.state = state - return True - - -def was_periodic_task_executed_recently(session, task, interval): - now = session.execute(func.now()).scalar() - delta = datetime.timedelta(seconds=interval) - row = session.query(models.OpenDaylightPeriodicTask).filter( - models.OpenDaylightPeriodicTask.task == task, - (now - delta >= (models.OpenDaylightPeriodicTask.lock_updated)) - ).one_or_none() - - return bool(row is None) - - -def lock_periodic_task(session, task): - return _update_periodic_task_state(session, odl_const.PENDING, - odl_const.PROCESSING, task) - - -def unlock_periodic_task(session, task): - return _update_periodic_task_state(session, odl_const.PROCESSING, - odl_const.PENDING, task) - - -def update_periodic_task(session, task, operation=None): - """Update the current periodic task details. - - The function assumes the lock is held, so it mustn't be run outside of a - locked context. - """ - op_text = None - if operation: - op_text = operation.__name__ - - with session.begin(): - row = session.query(models.OpenDaylightPeriodicTask).filter_by( - task=task).one() - row.processing_operation = op_text - - -def delete_rows_by_state_and_time(session, state, time_delta): - with session.begin(): - now = session.execute(func.now()).scalar() - session.query(models.OpenDaylightJournal).filter( - models.OpenDaylightJournal.state == state, - models.OpenDaylightJournal.last_retried < now - time_delta).delete( - synchronize_session=False) - session.expire_all() - - -def reset_processing_rows(session, max_timedelta): - with session.begin(): - now = session.execute(func.now()).scalar() - max_timedelta = datetime.timedelta(seconds=max_timedelta) - rows = session.query(models.OpenDaylightJournal).filter( - models.OpenDaylightJournal.last_retried < now - max_timedelta, - models.OpenDaylightJournal.state == odl_const.PROCESSING, - ).update({'state': odl_const.PENDING}) - - return rows diff --git a/networking_odl/db/head.py b/networking_odl/db/head.py deleted file mode 100644 index ef9f3b9a0..000000000 --- a/networking_odl/db/head.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 Intel Corporation. -# Copyright 2016 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_odl.db import models # noqa - -from neutron.db.migration.models import head - - -def get_metadata(): - return head.model_base.BASEV2.metadata diff --git a/networking_odl/db/migration/__init__.py b/networking_odl/db/migration/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/db/migration/alembic_migrations/README b/networking_odl/db/migration/alembic_migrations/README deleted file mode 100644 index 5d89e570d..000000000 --- a/networking_odl/db/migration/alembic_migrations/README +++ /dev/null @@ -1 +0,0 @@ -This directory contains the migration scripts for the networking_odl project. diff --git a/networking_odl/db/migration/alembic_migrations/__init__.py b/networking_odl/db/migration/alembic_migrations/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/db/migration/alembic_migrations/env.py b/networking_odl/db/migration/alembic_migrations/env.py deleted file mode 100644 index 370c5e88c..000000000 --- a/networking_odl/db/migration/alembic_migrations/env.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from logging import config as logging_config - -from alembic import context -from neutron_lib.db import model_base -from oslo_config import cfg -from oslo_db.sqlalchemy import session -import sqlalchemy as sa -from sqlalchemy import event - -from neutron.db.migration.alembic_migrations import external -from neutron.db.migration.models import head # noqa - -MYSQL_ENGINE = None -ODL_VERSION_TABLE = 'odl_alembic_version' -config = context.config -neutron_config = config.neutron_config -logging_config.fileConfig(config.config_file_name) -target_metadata = model_base.BASEV2.metadata - - -def set_mysql_engine(): - try: - mysql_engine = neutron_config.command.mysql_engine - except cfg.NoSuchOptError: - mysql_engine = None - - global MYSQL_ENGINE - MYSQL_ENGINE = (mysql_engine or - model_base.BASEV2.__table_args__['mysql_engine']) - - -def include_object(object, name, type_, reflected, compare_to): - if type_ == 'table' and name in external.TABLES: - return False - - return True - - -def run_migrations_offline(): - set_mysql_engine() - - kwargs = dict() - if neutron_config.database.connection: - kwargs['url'] = neutron_config.database.connection - else: - kwargs['dialect_name'] = neutron_config.database.engine - kwargs['include_object'] = include_object - kwargs['version_table'] = ODL_VERSION_TABLE - context.configure(**kwargs) - - with context.begin_transaction(): - context.run_migrations() - - -@event.listens_for(sa.Table, 'after_parent_attach') -def set_storage_engine(target, parent): - if MYSQL_ENGINE: - target.kwargs['mysql_engine'] = MYSQL_ENGINE - - -def run_migrations_online(): - set_mysql_engine() - engine = session.create_engine(neutron_config.database.connection) - - connection = engine.connect() - context.configure( - connection=connection, - target_metadata=target_metadata, - include_object=include_object, - version_table=ODL_VERSION_TABLE - ) - - try: - with context.begin_transaction(): - context.run_migrations() - finally: - connection.close() - engine.dispose() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/networking_odl/db/migration/alembic_migrations/script.py.mako b/networking_odl/db/migration/alembic_migrations/script.py.mako deleted file mode 100644 index 4f9605736..000000000 --- a/networking_odl/db/migration/alembic_migrations/script.py.mako +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -% if branch_labels: -branch_labels = ${repr(branch_labels)} -%endif - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD b/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD deleted file mode 100644 index 5713e5bcb..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD +++ /dev/null @@ -1 +0,0 @@ -eccd865b7d3a diff --git a/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD b/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD deleted file mode 100644 index 892797a84..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD +++ /dev/null @@ -1 +0,0 @@ -6f7dfb241354 diff --git a/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py b/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py deleted file mode 100644 index d80815d2c..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Initial odl db, branchpoint - -Revision ID: b89a299e19f9 -Revises: None -Create Date: 2015-09-03 22:22:22.222222 - -""" - -# revision identifiers, used by Alembic. -revision = 'b89a299e19f9' -down_revision = None - - -def upgrade(): - pass diff --git a/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py b/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py deleted file mode 100644 index 43959c0a4..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Start of odl contract branch - -Revision ID: 383acb0d38a0 -Revises: b89a299e19f9 -Create Date: 2015-09-03 22:27:49.306394 - -""" - -from neutron.db import migration -from neutron.db.migration import cli - - -# revision identifiers, used by Alembic. -revision = '383acb0d38a0' -down_revision = 'b89a299e19f9' -branch_labels = (cli.CONTRACT_BRANCH,) - -# milestone identifier, used by neutron-db-manage -neutron_milestone = [migration.MITAKA] - - -def upgrade(): - pass diff --git a/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py b/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py deleted file mode 100644 index 71d24b3ea..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Start of odl expand branch - -Revision ID: 247501328046 -Revises: b89a299e19f9 -Create Date: 2015-09-03 22:27:49.292238 - -""" - -from neutron.db.migration import cli - - -# revision identifiers, used by Alembic. -revision = '247501328046' -down_revision = 'b89a299e19f9' -branch_labels = (cli.EXPAND_BRANCH,) - - -def upgrade(): - pass diff --git a/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py b/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py deleted file mode 100644 index ccde47271..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""OpenDaylight Neutron mechanism driver refactor - -Revision ID: 37e242787ae5 -Revises: 247501328046 -Create Date: 2015-10-30 22:09:27.221767 - -""" -from neutron.db import migration - - -# revision identifiers, used by Alembic. -revision = '37e242787ae5' -down_revision = '247501328046' - -# milestone identifier, used by neutron-db-manage -neutron_milestone = [migration.MITAKA] - - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'opendaylightjournal', - sa.Column('id', sa.String(36), primary_key=True), - sa.Column('object_type', sa.String(36), nullable=False), - sa.Column('object_uuid', sa.String(36), nullable=False), - sa.Column('operation', sa.String(36), nullable=False), - sa.Column('data', sa.PickleType, nullable=True), - sa.Column('state', - sa.Enum('pending', 'processing', 'failed', 'completed', - name='state'), - nullable=False, default='pending'), - sa.Column('retry_count', sa.Integer, default=0), - sa.Column('created_at', sa.DateTime, default=sa.func.now()), - sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(), - onupdate=sa.func.now()) - ) diff --git a/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py b/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py deleted file mode 100644 index fe653c9c8..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2016 Isaku Yamahata -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""update opendayligut journal - -Revision ID: fa0c536252a5 -Revises: 383acb0d38a0 -Create Date: 2016-08-05 23:03:46.470595 - -""" - -# revision identifiers, used by Alembic. -revision = 'fa0c536252a5' -down_revision = '383acb0d38a0' -depends_on = ('3d560427d776', ) - -from alembic import op - - -def upgrade(): - # Since a new primary key is introduced and alembic doesn't allow to - # add new primary key, create a new table with new primary key and - # rename it. - op.execute("INSERT INTO opendaylightjournal_new " - "(object_type, object_uuid, operation, data, " - "state, retry_count, created_at, last_retried) " - "SELECT object_type, object_uuid, operation, data, " - "state, retry_count, created_at, last_retried " - "FROM opendaylightjournal " - "WHERE state != 'completed' " - "ORDER BY created_at ASC") - op.drop_table('opendaylightjournal') - op.rename_table('opendaylightjournal_new', 'opendaylightjournal') diff --git a/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py b/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py deleted file mode 100644 index 30924aa48..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 Isaku Yamahata -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add sequence number to journal - -Revision ID: 3d560427d776 -Revises: 703dbf02afde -Create Date: 2016-08-05 15:50:22.151078 - -""" - -# revision identifiers, used by Alembic. -revision = '3d560427d776' -down_revision = '703dbf02afde' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'opendaylightjournal_new', - sa.Column('seqnum', sa.BigInteger(), - primary_key=True, autoincrement=True), - sa.Column('object_type', sa.String(36), nullable=False), - sa.Column('object_uuid', sa.String(36), nullable=False), - sa.Column('operation', sa.String(36), nullable=False), - sa.Column('data', sa.PickleType, nullable=True), - sa.Column('state', - sa.Enum('pending', 'processing', 'failed', 'completed', - name='state'), - nullable=False, default='pending'), - sa.Column('retry_count', sa.Integer, default=0), - sa.Column('created_at', sa.DateTime, default=sa.func.now()), - sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(), - onupdate=sa.func.now()), - ) diff --git a/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py b/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py deleted file mode 100644 index bbe0c4612..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add journal maintenance table - -Revision ID: 703dbf02afde -Revises: 37e242787ae5 -Create Date: 2016-04-12 10:49:31.802663 - -""" - -# revision identifiers, used by Alembic. -revision = '703dbf02afde' -down_revision = '37e242787ae5' - -from alembic import op -from oslo_utils import uuidutils -import sqlalchemy as sa - -from networking_odl.common import constants as odl_const - - -def upgrade(): - maint_table = op.create_table( - 'opendaylight_maintenance', - sa.Column('id', sa.String(36), primary_key=True), - sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING, - name='state'), - nullable=False), - sa.Column('processing_operation', sa.String(70)), - sa.Column('lock_updated', sa.TIMESTAMP, nullable=False, - server_default=sa.func.now(), - onupdate=sa.func.now()) - ) - - # Insert the only row here that is used to synchronize the lock between - # different Neutron processes. - op.bulk_insert(maint_table, - [{'id': uuidutils.generate_uuid(), - 'state': odl_const.PENDING}]) diff --git a/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py b/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py deleted file mode 100644 index 9e0bf519a..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2017 NEC Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""drop opendaylight_maintenance table - -Revision ID: eccd865b7d3a -Revises: fa0c536252a5 -Create Date: 2017-05-24 03:00:40.194278 - -""" - -# revision identifiers, used by Alembic. -revision = 'eccd865b7d3a' -down_revision = 'fa0c536252a5' - -from alembic import op - - -def upgrade(): - op.drop_table('opendaylight_maintenance') diff --git a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py b/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py deleted file mode 100644 index acf632731..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2017 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add journal dependencies table - -Revision ID: 0472f56ff2fb -Revises: 43af357fd638 -Create Date: 2017-04-02 11:02:01.622548 - -""" - -# revision identifiers, used by Alembic. -revision = '0472f56ff2fb' -down_revision = '43af357fd638' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'opendaylight_journal_deps', - sa.Column('depends_on', sa.BigInteger(), - sa.ForeignKey('opendaylightjournal.seqnum', - ondelete='CASCADE'), - primary_key=True), - sa.Column('dependent', sa.BigInteger(), - sa.ForeignKey('opendaylightjournal.seqnum', - ondelete='CASCADE'), - primary_key=True)) diff --git a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py b/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py deleted file mode 100644 index 02511762f..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2017 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Added version_id for optimistic locking - -Revision ID: 43af357fd638 -Revises: 3d560427d776 -Create Date: 2016-03-24 10:14:56.408413 - -""" - -# revision identifiers, used by Alembic. -revision = '43af357fd638' -down_revision = '3d560427d776' -depends_on = ('fa0c536252a5',) - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('opendaylightjournal', - sa.Column('version_id', sa.Integer, server_default='0', - nullable=False)) diff --git a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py b/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py deleted file mode 100644 index e9e35720a..000000000 --- a/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2017 NEC Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""create opendaylight_preiodic_task table - -Revision ID: 6f7dfb241354 -Revises: 0472f56ff2fb -Create Date: 2017-05-24 03:01:00.755796 - -""" - -# revision identifiers, used by Alembic. -revision = '6f7dfb241354' -down_revision = '0472f56ff2fb' - -from alembic import op -import sqlalchemy as sa - -from networking_odl.common import constants as odl_const - - -def upgrade(): - periodic_table = op.create_table( - 'opendaylight_periodic_task', - sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING, - name='state'), - nullable=False), - sa.Column('processing_operation', sa.String(70)), - sa.Column('task', sa.String(70), primary_key=True), - sa.Column('lock_updated', sa.TIMESTAMP, nullable=False, - server_default=sa.func.now(), - onupdate=sa.func.now()) - ) - op.bulk_insert(periodic_table, - [{'task': 'maintenance', - 'state': odl_const.PENDING}, - {'task': 'hostconfig', - 'state': odl_const.PENDING}]) diff --git a/networking_odl/db/models.py b/networking_odl/db/models.py deleted file mode 100644 index ec92adc35..000000000 --- a/networking_odl/db/models.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy.dialects import sqlite - -from neutron_lib.db import model_base - -from networking_odl.common import constants as odl_const - - -IdType = sa.BigInteger().with_variant(sa.Integer(), 'sqlite') - -journal_dependencies = sa.Table( - 'opendaylight_journal_deps', model_base.BASEV2.metadata, - sa.Column('depends_on', IdType, - sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), - primary_key=True), - sa.Column('dependent', IdType, - sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), - primary_key=True)) - - -class OpenDaylightJournal(model_base.BASEV2): - __tablename__ = 'opendaylightjournal' - - seqnum = sa.Column(IdType, primary_key=True, autoincrement=True) - object_type = sa.Column(sa.String(36), nullable=False) - object_uuid = sa.Column(sa.String(36), nullable=False) - operation = sa.Column(sa.String(36), nullable=False) - data = sa.Column(sa.PickleType, nullable=True) - state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.FAILED, - odl_const.PROCESSING, odl_const.COMPLETED), - nullable=False, default=odl_const.PENDING) - retry_count = sa.Column(sa.Integer, default=0) - created_at = sa.Column( - sa.DateTime().with_variant( - sqlite.DATETIME(truncate_microseconds=True), 'sqlite'), - server_default=sa.func.now()) - last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(), - onupdate=sa.func.now()) - version_id = sa.Column(sa.Integer, server_default='0', nullable=False) - dependencies = sa.orm.relationship( - "OpenDaylightJournal", secondary=journal_dependencies, - primaryjoin=seqnum == journal_dependencies.c.depends_on, - secondaryjoin=seqnum == journal_dependencies.c.dependent, - backref="depending_on" - ) - - __mapper_args__ = { - 'version_id_col': version_id - } - - -class OpenDaylightPeriodicTask(model_base.BASEV2): - __tablename__ = 'opendaylight_periodic_task' - - state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING), - nullable=False) - processing_operation = sa.Column(sa.String(70)) - task = sa.Column(sa.String(70), primary_key=True) - lock_updated = sa.Column(sa.TIMESTAMP, nullable=False, - server_default=sa.func.now(), - onupdate=sa.func.now()) diff --git a/networking_odl/fwaas/__init__.py b/networking_odl/fwaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/fwaas/driver.py b/networking_odl/fwaas/driver.py deleted file mode 100644 index a9de4f251..000000000 --- a/networking_odl/fwaas/driver.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from neutron_fwaas.services.firewall.drivers import fwaas_base - -from networking_odl.common import client as odl_client -from networking_odl.common import config # noqa - -LOG = logging.getLogger(__name__) - - -class OpenDaylightFwaasDriver(fwaas_base.FwaasDriverBase): - - """OpenDaylight FWaaS Driver - - This code is the backend implementation for the OpenDaylight FWaaS - driver for OpenStack Neutron. - """ - - def __init__(self): - LOG.debug("Initializing OpenDaylight FWaaS driver") - self.client = odl_client.OpenDaylightRestClient.create_client() - - def create_firewall(self, apply_list, firewall): - """Create the Firewall with default (drop all) policy. - - The default policy will be applied on all the interfaces of - trusted zone. - """ - pass - - def delete_firewall(self, apply_list, firewall): - """Delete firewall. - - Removes all policies created by this instance and frees up - all the resources. - """ - pass - - def update_firewall(self, apply_list, firewall): - """Apply the policy on all trusted interfaces. - - Remove previous policy and apply the new policy on all trusted - interfaces. - """ - pass - - def apply_default_policy(self, apply_list, firewall): - """Apply the default policy on all trusted interfaces. - - Remove current policy and apply the default policy on all trusted - interfaces. - """ - pass diff --git a/networking_odl/hacking/__init__.py b/networking_odl/hacking/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/hacking/checks.py b/networking_odl/hacking/checks.py deleted file mode 100644 index 7fddd5a0f..000000000 --- a/networking_odl/hacking/checks.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2017 Intel Corporation. -# Copyright 2017 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tokenize - -from hacking.checks import docstrings - -# TODO(yamahata): enable neutron checking -# from neutron.hacking import checks -from neutron_lib.hacking import checks - -_ND01_MSG = "ND01: use OpenDaylight (capital D) instead of Opendaylight" -_ND01_OPENDAYLIGHT = 'Opendaylight' - - -def check_opendaylight_lowercase(logical_line, filename): - """ND01 - Enforce using OpenDaylight.""" - if _ND01_OPENDAYLIGHT in logical_line: - pos = logical_line.find(_ND01_OPENDAYLIGHT) - yield (pos, _ND01_MSG) - - -def check_opendaylight_lowercase_comment( - physical_line, previous_logical, tokens): - """ND01 - Enforce using OpenDaylight in comment.""" - for token_type, text, start_index, _, _ in tokens: - if token_type == tokenize.COMMENT: - pos = physical_line.find(_ND01_OPENDAYLIGHT) - if pos >= 0: - return (pos, _ND01_MSG + " in comment") - - -def check_opendaylight_lowercase_docstring( - physical_line, previous_logical, tokens): - """ND01 - Enforce using OpenDaylight in docstring.""" - docstring = docstrings.is_docstring(tokens, previous_logical) - if docstring and _ND01_OPENDAYLIGHT in docstring: - pos = physical_line.find(_ND01_OPENDAYLIGHT) - return (pos, _ND01_MSG + " in docstring") - - -def factory(register): - checks.factory(register) - register(check_opendaylight_lowercase) - register(check_opendaylight_lowercase_comment) - register(check_opendaylight_lowercase_docstring) diff --git a/networking_odl/journal/__init__.py b/networking_odl/journal/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/journal/cleanup.py b/networking_odl/journal/cleanup.py deleted file mode 100644 index baa9f2937..000000000 --- a/networking_odl/journal/cleanup.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from datetime import timedelta - -from oslo_config import cfg -from oslo_log import log as logging - -from networking_odl.common import constants as odl_const -from networking_odl.db import db - -LOG = logging.getLogger(__name__) - - -class JournalCleanup(object): - """Journal maintenance operation for deleting completed rows.""" - def __init__(self): - self._rows_retention = cfg.CONF.ml2_odl.completed_rows_retention - self._processing_timeout = cfg.CONF.ml2_odl.processing_timeout - - def delete_completed_rows(self, session): - if self._rows_retention != -1: - LOG.debug("Deleting completed rows") - db.delete_rows_by_state_and_time( - session, odl_const.COMPLETED, - timedelta(seconds=self._rows_retention)) - - def cleanup_processing_rows(self, session): - row_count = db.reset_processing_rows(session, self._processing_timeout) - if row_count: - LOG.info("Reset %(num)s orphaned rows back to pending", - {"num": row_count}) diff --git a/networking_odl/journal/dependency_validations.py b/networking_odl/journal/dependency_validations.py deleted file mode 100644 index 243a4e01e..000000000 --- a/networking_odl/journal/dependency_validations.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_odl._i18n import _ -from networking_odl.common import constants as odl_const -from networking_odl.db import db - - -def _get_delete_dependencies(session, object_type, object_uuid): - """Get dependent operations for a delete operation. - - Return any operations that pertain to the delete: Either create - or update operations on the same object, or delete operations on other - objects that depend on the deleted object. - """ - # Get any pending or processing create or update ops on the row itself - deps = db.get_pending_or_processing_ops( - session, object_uuid, operation=(odl_const.ODL_UPDATE, - odl_const.ODL_CREATE)) - - # Get dependent operations of other dependent types - dependent_resource_types = _DELETE_DEPENDENCIES.get(object_type) - if dependent_resource_types is not None: - for resource_type in dependent_resource_types: - deps.extend(db.get_pending_delete_ops_with_parent( - session, resource_type, object_uuid)) - - return deps - - -def _get_older_operations(session, object_ids): - """Get any older operations. - - Return any operations still in the queue for the given ID(s). - """ - if not isinstance(object_ids, (list, tuple)): - object_ids = (object_ids,) - - deps = [] - for object_id in object_ids: - deps.extend( - db.get_pending_or_processing_ops(session, object_id)) - - return deps - - -def _generate_subnet_deps(data): - return data['network_id'] - - -def _generate_port_deps(data): - object_ids = [fixed_ip['subnet_id'] for fixed_ip in data['fixed_ips']] - object_ids.append(data['network_id']) - qos_policy_id = data.get('qos_policy_id') - if qos_policy_id is not None: - object_ids.append(qos_policy_id) - return object_ids - - -def _generate_network_deps(data): - return data.get('qos_policy_id') - - -def _generate_sg_rule_deps(data): - return data['security_group_id'] - - -def _generate_router_deps(data): - return data['gw_port_id'] - - -def _generate_floatingip_deps(data): - object_ids = [] - network_id = data.get('floating_network_id') - if network_id is not None: - object_ids.append(network_id) - - port_id = data.get('port_id') - if port_id is not None: - object_ids.append(port_id) - - router_id = data.get('router_id') - if router_id is not None: - object_ids.append(router_id) - - return object_ids - - -def _generate_trunk_deps(data): - portids = [subport['port_id'] for subport in data['sub_ports']] - portids.append(data['port_id']) - return portids - - -def _generate_l2gateway_connection_deps(data): - object_ids = [] - network_id = data.get('network_id') - if network_id is not None: - object_ids.append(network_id) - - gateway_id = data.get('gateway_id') - if gateway_id is not None: - object_ids.append(gateway_id) - - return object_ids - - -def _generate_sfc_port_pair_deps(data): - object_ids = [] - ingress_port = data.get('ingress') - if ingress_port is not None: - object_ids.append(ingress_port) - - egress_port = data.get('egress') - if egress_port is not None: - object_ids.append(egress_port) - - return object_ids - - -def _generate_sfc_port_pair_group_deps(data): - port_pairs = [port_pair['id'] for port_pair in data['port_pairs']] - return port_pairs - - -def _generate_sfc_port_chain_deps(data): - object_ids = [port_pair_group['id'] for port_pair_group in - data['port_pair_groups']] - flow_classifiers = [flow_classifier['id'] for flow_classifier in - data['flow_classifiers']] - object_ids.extend(flow_classifiers) - - return object_ids - - -def _generate_bgpvpn_deps(data): - object_ids = [] - - network_ids = data.get('networks') - if network_ids is not None: - object_ids.extend(network_ids) - - router_ids = data.get('routers') - if router_ids is not None: - object_ids.extend(router_ids) - - return object_ids - - -_CREATE_OR_UPDATE_DEP_GENERATOR = { - odl_const.ODL_NETWORK: _generate_network_deps, - odl_const.ODL_SUBNET: _generate_subnet_deps, - odl_const.ODL_PORT: _generate_port_deps, - # TODO(yamahata): dependency between SG and PORT - odl_const.ODL_SG_RULE: _generate_sg_rule_deps, - odl_const.ODL_ROUTER: _generate_router_deps, - odl_const.ODL_FLOATINGIP: _generate_floatingip_deps, - odl_const.ODL_TRUNK: _generate_trunk_deps, - odl_const.ODL_L2GATEWAY_CONNECTION: _generate_l2gateway_connection_deps, - odl_const.ODL_SFC_PORT_PAIR: _generate_sfc_port_pair_deps, - odl_const.ODL_SFC_PORT_PAIR_GROUP: _generate_sfc_port_pair_group_deps, - odl_const.ODL_SFC_PORT_CHAIN: _generate_sfc_port_chain_deps, - odl_const.ODL_BGPVPN: _generate_bgpvpn_deps, -} - - -_DELETE_DEPENDENCIES = { - odl_const.ODL_NETWORK: (odl_const.ODL_SUBNET, odl_const.ODL_PORT, - odl_const.ODL_ROUTER, - odl_const.ODL_L2GATEWAY_CONNECTION, - odl_const.ODL_BGPVPN), - odl_const.ODL_SUBNET: (odl_const.ODL_PORT,), - odl_const.ODL_ROUTER: (odl_const.ODL_PORT, odl_const.ODL_FLOATINGIP, - odl_const.ODL_BGPVPN), - odl_const.ODL_PORT: (odl_const.ODL_TRUNK,), - # TODO(yamahata): dependency between SG and PORT - odl_const.ODL_SG: (odl_const.ODL_SG_RULE,), - odl_const.ODL_L2GATEWAY: (odl_const.ODL_L2GATEWAY_CONNECTION,), - odl_const.ODL_SFC_FLOW_CLASSIFIER: (odl_const.ODL_SFC_PORT_CHAIN,), - odl_const.ODL_SFC_PORT_PAIR: (odl_const.ODL_SFC_PORT_PAIR_GROUP,), - odl_const.ODL_SFC_PORT_PAIR_GROUP: (odl_const.ODL_SFC_PORT_CHAIN,), - odl_const.ODL_QOS_POLICY: (odl_const.ODL_PORT, odl_const.ODL_NETWORK), -} - - -def calculate(session, operation, object_type, object_uuid, data): - """Calculate resource deps in journaled operations. - - As a rule of thumb validation takes into consideration only operations in - pending or processing state, other states are irrelevant. - :param session: db session - :param row: entry in journal entry to be validated - """ - deps = [] - if operation == odl_const.ODL_DELETE: - return _get_delete_dependencies(session, object_type, object_uuid) - elif operation == odl_const.ODL_UPDATE: - deps.extend( - db.get_pending_or_processing_ops( - session, object_uuid, - operation=(odl_const.ODL_CREATE, odl_const.ODL_UPDATE))) - elif operation != odl_const.ODL_CREATE: - raise ValueError(_("unsupported operation {}").format(operation)) - - # Validate deps if there are any to validate. - dep_generator = _CREATE_OR_UPDATE_DEP_GENERATOR.get(object_type) - if dep_generator is not None: - object_ids = dep_generator(data) - if object_ids is not None: - deps.extend(_get_older_operations(session, object_ids)) - - return deps diff --git a/networking_odl/journal/full_sync.py b/networking_odl/journal/full_sync.py deleted file mode 100644 index a99caf5ab..000000000 --- a/networking_odl/journal/full_sync.py +++ /dev/null @@ -1,126 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import requests - -from neutron_lib import context as neutron_context -from neutron_lib.plugins import directory - -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.journal import journal - -# Define which pending operation types should be deleted -_CANARY_NETWORK_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142" -_CANARY_TENANT_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142" -_CANARY_NETWORK_DATA = {'id': _CANARY_NETWORK_ID, - 'tenant_id': _CANARY_TENANT_ID, - 'name': 'Sync Canary Network', - 'admin_state_up': False} -_OPS_TO_DELETE_ON_SYNC = (odl_const.ODL_CREATE, odl_const.ODL_UPDATE) -_CLIENT = client.OpenDaylightRestClientGlobal() - -# TODO(yamahata): to add more resources. -# e.g. bgpvpn, fwaas, l2gw, lbaas, qos, sfc -# and as more services are added -_ORDERED_ODL_RESOURCES = ( - odl_const.ODL_SG, - odl_const.ODL_SG_RULE, - odl_const.ODL_NETWORK, - odl_const.ODL_SUBNET, - odl_const.ODL_ROUTER, - odl_const.ODL_PORT, - odl_const.ODL_FLOATINGIP, - odl_const.ODL_LOADBALANCER, - odl_const.ODL_LISTENER, - odl_const.ODL_POOL, - odl_const.ODL_MEMBER, - odl_const.ODL_HEALTHMONITOR, - odl_const.ODL_QOS_POLICY, - odl_const.ODL_TRUNK, - odl_const.ODL_BGPVPN, - odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, - odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, - odl_const.ODL_SFC_FLOW_CLASSIFIER, - odl_const.ODL_SFC_PORT_PAIR, - odl_const.ODL_SFC_PORT_PAIR_GROUP, - odl_const.ODL_SFC_PORT_CHAIN, - odl_const.ODL_L2GATEWAY, - odl_const.ODL_L2GATEWAY_CONNECTION, -) - -ALL_RESOURCES = {} - - -def register(driver, resources): - ALL_RESOURCES[driver] = resources - - -def full_sync(session): - if not _full_sync_needed(session): - return - - db.delete_pending_rows(session, _OPS_TO_DELETE_ON_SYNC) - - dbcontext = neutron_context.get_admin_context() - for resource_type in _ORDERED_ODL_RESOURCES: - for plugin_alias, resource in ALL_RESOURCES.items(): - collection_name = resource.get(resource_type) - if collection_name is not None: - plugin = directory.get_plugin(plugin_alias) - _sync_resources(session, plugin, dbcontext, resource_type, - collection_name) - break - - journal.record(dbcontext, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID, - odl_const.ODL_CREATE, _CANARY_NETWORK_DATA) - - -def _full_sync_needed(session): - return (_canary_network_missing_on_odl() and - _canary_network_not_in_journal(session)) - - -def _canary_network_missing_on_odl(): - # Try to reach the ODL server, sometimes it might be up & responding to - # HTTP calls but inoperative.. - client = _CLIENT.get_client() - response = client.get(odl_const.ODL_NETWORKS) - response.raise_for_status() - - response = client.get(odl_const.ODL_NETWORKS + "/" + _CANARY_NETWORK_ID) - if response.status_code == requests.codes.not_found: - return True - - # In case there was an error raise it up because we don't know how to deal - # with it.. - response.raise_for_status() - return False - - -def _canary_network_not_in_journal(session): - return not db.get_pending_or_processing_ops( - session, _CANARY_NETWORK_ID, operation=odl_const.ODL_CREATE) - - -def _sync_resources(session, plugin, dbcontext, object_type, collection_name): - obj_getter = getattr(plugin, 'get_%s' % collection_name) - resources = obj_getter(dbcontext) - - for resource in resources: - journal.record(dbcontext, object_type, resource['id'], - odl_const.ODL_CREATE, resource) diff --git a/networking_odl/journal/journal.py b/networking_odl/journal/journal.py deleted file mode 100644 index c02020ab0..000000000 --- a/networking_odl/journal/journal.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import threading -import time - -from neutron.db import api as neutron_db_api -from neutron_lib.plugins import directory -from oslo_config import cfg -from oslo_log import log as logging -from requests import exceptions - -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.common import filters -from networking_odl.common import utils -from networking_odl.db import db -from networking_odl.journal import dependency_validations - - -LOG = logging.getLogger(__name__) - -MAKE_URL = {} - - -def call_thread_on_end(func): - def new_func(obj, *args, **kwargs): - return_value = func(obj, *args, **kwargs) - obj.journal.set_sync_event() - return return_value - return new_func - - -def _enrich_port(plugin_context, ml2_context, object_type, operation, data): - """Enrich the port with additional information needed by ODL""" - - # NOTE(yamahata): work around of ODL neutron northbound - # It passes security groups in port as list of dict for historical reasons. - # keep its format for compatibility. - # TODO(yamahata): drop this format conversion. - if data[odl_const.ODL_SGS]: - groups = [{'id': id_} for id_ in data['security_groups']] - else: - groups = [] - new_data = copy.deepcopy(data) - new_data[odl_const.ODL_SGS] = groups - - # NOTE(yamahata): work around for port creation for router - # tenant_id=''(empty string) is passed when port is created - # by l3 plugin internally for router. - # On the other hand, ODL doesn't accept empty string for tenant_id. - # In that case, deduce tenant_id from network_id for now. - # Right fix: modify Neutron so that don't allow empty string - # for tenant_id even for port for internal use. - # TODO(yamahata): eliminate this work around when neutron side - # is fixed - # assert port['tenant_id'] != '' - if ('tenant_id' not in new_data or new_data['tenant_id'] == ''): - if ml2_context: - network = ml2_context._network_context._network - else: - plugin = directory.get_plugin() - network = plugin.get_network(plugin_context, - new_data['network_id']) - new_data['tenant_id'] = network['tenant_id'] - - return new_data - - -def record(plugin_context, object_type, object_uuid, operation, data, - ml2_context=None): - if (object_type == odl_const.ODL_PORT and - operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)): - data = _enrich_port( - plugin_context, ml2_context, object_type, operation, data) - - # Calculate depending_on on other journal entries - depending_on = dependency_validations.calculate( - plugin_context.session, operation, object_type, object_uuid, data) - db.create_pending_row(plugin_context.session, object_type, object_uuid, - operation, data, depending_on=depending_on) - - -def _make_url(row): - url_object = utils.make_url_object(row.object_type) - urlpath = '' - if row.operation == odl_const.ODL_CREATE: - urlpath = url_object - else: - urlpath = url_object + '/' + row.object_uuid - - return urlpath - - -def register_url_builder(object_type, method): - MAKE_URL[object_type] = method - - -def _build_url(row): - return MAKE_URL.get(row.object_type, _make_url)(row) - - -class OpenDaylightJournalThread(object): - """Thread worker for the OpenDaylight Journal Database.""" - - # make those parameter configurable? - _RETRY_SLEEP_MIN = 0.1 - _RETRY_SLEEP_MAX = 60 - - def __init__(self): - self.client = client.OpenDaylightRestClient.create_client() - self._odl_sync_timeout = cfg.CONF.ml2_odl.sync_timeout - self._max_retry_count = cfg.CONF.ml2_odl.retry_count - self._sleep_time = self._RETRY_SLEEP_MIN - self.event = threading.Event() - self.lock = threading.Lock() - self._odl_sync_thread = self.start_odl_sync_thread() - self._start_sync_timer() - - def start_odl_sync_thread(self): - # Start the sync thread - LOG.debug("Starting a new sync thread") - odl_sync_thread = threading.Thread( - name='sync', - target=self.run_sync_thread) - odl_sync_thread.start() - return odl_sync_thread - - def set_sync_event(self): - # Prevent race when starting the timer - with self.lock: - self._timer.cancel() - self._start_sync_timer() - self.event.set() - - def _start_sync_timer(self): - self._timer = threading.Timer(self._odl_sync_timeout, - self.set_sync_event) - self._timer.start() - - @staticmethod - def _json_data(row): - data = copy.deepcopy(row.data) - filters.filter_for_odl(row.object_type, row.operation, data) - - if row.operation == odl_const.ODL_CREATE: - method = 'post' - to_send = {row.object_type: data} - elif row.operation == odl_const.ODL_UPDATE: - method = 'put' - to_send = {row.object_type: data} - elif row.operation == odl_const.ODL_DELETE: - method = 'delete' - to_send = None - - return method, _build_url(row), to_send - - def run_sync_thread(self): - while True: - try: - self.event.wait() - self.event.clear() - - self.sync_pending_entries() - except Exception: - # Catch exceptions to protect the thread while running - LOG.exception("Error on run_sync_thread") - - def sync_pending_entries(self): - LOG.debug("Start processing journal entries") - session = neutron_db_api.get_writer_session() - entry = db.get_oldest_pending_db_row_with_lock(session) - if entry is None: - LOG.debug("No journal entries to process") - return - - while entry is not None: - stop_processing = self._sync_entry(session, entry) - if stop_processing: - break - - entry = db.get_oldest_pending_db_row_with_lock(session) - LOG.debug("Finished processing journal entries") - - def _retry_sleep(self): - # When something happened in the connection to ODL, don't busy loop - # because it's likely to hit same issue. - # Wait for a while for recovery - time.sleep(self._sleep_time) - self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX) - - def _retry_reset(self): - self._sleep_time = self._RETRY_SLEEP_MIN - - def _sync_entry(self, session, entry): - log_dict = {'op': entry.operation, 'type': entry.object_type, - 'id': entry.object_uuid} - LOG.info("Processing - %(op)s %(type)s %(id)s", log_dict) - method, urlpath, to_send = self._json_data(entry) - - try: - self.client.sendjson(method, urlpath, to_send) - with session.begin(): - db.update_db_row_state(session, entry, odl_const.COMPLETED) - db.delete_dependency(session, entry) - self._retry_reset() - except exceptions.ConnectionError: - # Don't raise the retry count, just log an error & break - db.update_db_row_state(session, entry, odl_const.PENDING) - LOG.error("Cannot connect to the OpenDaylight Controller," - " will not process additional entries") - self._retry_sleep() - return True - except Exception: - LOG.error("Error while processing %(op)s %(type)s %(id)s", - log_dict, exc_info=True) - db.update_pending_db_row_retry( - session, entry, self._max_retry_count) - self._retry_sleep() - - return False diff --git a/networking_odl/journal/periodic_task.py b/networking_odl/journal/periodic_task.py deleted file mode 100644 index 948c732dd..000000000 --- a/networking_odl/journal/periodic_task.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.db import api as neutron_db_api -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall - -from networking_odl.db import db - - -LOG = logging.getLogger(__name__) - - -class PeriodicTask(object): - def __init__(self, task, interval=None): - self.task = task - self.phases = [] - if interval is None: - interval = cfg.CONF.ml2_odl.maintenance_interval - - self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops) - self.interval = interval - - def start(self): - self.timer.start(self.interval, stop_on_exception=False) - - def cleanup(self): - # this method is used for unit test to tear down - self.timer.stop() - try: - self.timer.wait() - except AttributeError: - # NOTE(yamahata): workaround - # some tests call this cleanup without calling start - pass - - def _execute_op(self, operation, db_session): - op_details = operation.__name__ - if operation.__doc__: - op_details += " (%s)" % operation.func_doc - - try: - LOG.info("Starting %s phase of periodic task %s.", - op_details, self.task) - db.update_periodic_task(db_session, task=self.task, - operation=operation) - operation(session=db_session) - LOG.info("Finished %s phase of %s task.", op_details, self.task) - except Exception: - LOG.exception("Failed during periodic task operation %s.", - op_details) - - def task_already_executed_recently(self): - db_session = neutron_db_api.get_reader_session() - return db.was_periodic_task_executed_recently(db_session, self.task, - self.interval) - - def execute_ops(self): - LOG.info("Starting %s periodic task.", self.task) - # Lock make sure that periodic task is executed only after - # specified interval. It makes sure that maintenance tasks - # are not executed back to back. - if self.task_already_executed_recently(): - LOG.info("Periodic %s task executed after periodic interval " - "Skipping execution.", self.task) - return - - db_session = neutron_db_api.get_writer_session() - if not db.lock_periodic_task(db_session, self.task): - LOG.info("Periodic %s task already running task", self.task) - return - - try: - for phase in self.phases: - self._execute_op(phase, db_session) - finally: - db.update_periodic_task(db_session, task=self.task, - operation=None) - db.unlock_periodic_task(db_session, self.task) - - LOG.info("%s task has been finished", self.task) - - def register_operation(self, phase): - """Register a function to be run by the periodic task. - - :param phase: Function to call when the thread runs. The function will - receive a DB session to use for DB operations. - """ - self.phases.append(phase) - LOG.info("%s phase has been registered in %s task", phase, self.task) diff --git a/networking_odl/journal/recovery.py b/networking_odl/journal/recovery.py deleted file mode 100644 index b058a1a85..000000000 --- a/networking_odl/journal/recovery.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron_lib import context as neutron_context -from neutron_lib import exceptions as nexc -from neutron_lib.plugins import directory -from oslo_log import log as logging - -from networking_odl._i18n import _ -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.journal import full_sync -from networking_odl.journal import journal - -_CLIENT = client.OpenDaylightRestClientGlobal() - -LOG = logging.getLogger(__name__) - - -class UnsupportedResourceType(Exception): - pass - - -def journal_recovery(session): - for row in db.get_all_db_rows_by_state(session, odl_const.FAILED): - try: - LOG.debug("Attempting recovery of journal entry %s.", row) - odl_resource = _CLIENT.get_client().get_resource(row.object_type, - row.object_uuid) - if odl_resource is not None: - _handle_existing_resource(session, row) - else: - _handle_non_existing_resource(session, row) - except UnsupportedResourceType: - LOG.warning('Unsupported resource %s', row.object_type) - except Exception: - LOG.exception("Failure while recovering journal entry %s.", row) - - -def _get_latest_resource(row): - object_type = row.object_type - - for plugin_alias, resources in full_sync.ALL_RESOURCES.items(): - if object_type in resources: - plugin = directory.get_plugin(plugin_alias) - break - else: - raise UnsupportedResourceType( - _("unsupported resource type: {}").format(object_type)) - - obj_getter = getattr(plugin, 'get_{}'.format(object_type)) - return obj_getter(neutron_context.get_admin_context(), row.object_uuid) - - -def _sync_resource_to_odl(session, row, operation_type, exists_on_odl): - resource = None - try: - resource = _get_latest_resource(row) - except nexc.NotFound: - if exists_on_odl: - journal.record( - neutron_context.get_admin_context(), row.object_type, - row.object_uuid, odl_const.ODL_DELETE, []) - else: - journal.record( - neutron_context.get_admin_context(), row.object_type, - row.object_uuid, operation_type, resource) - - db.update_db_row_state(session, row, odl_const.COMPLETED) - - -def _handle_existing_resource(session, row): - if row.operation == odl_const.ODL_CREATE: - db.update_db_row_state(session, row, odl_const.COMPLETED) - elif row.operation == odl_const.ODL_DELETE: - db.update_db_row_state(session, row, odl_const.PENDING) - else: - _sync_resource_to_odl(session, row, odl_const.ODL_UPDATE, True) - - -def _handle_non_existing_resource(session, row): - if row.operation == odl_const.ODL_DELETE: - db.update_db_row_state(session, row, odl_const.COMPLETED) - else: - _sync_resource_to_odl(session, row, odl_const.ODL_CREATE, False) - # TODO(mkolesni): Handle missing parent resources somehow. diff --git a/networking_odl/l2gateway/__init__.py b/networking_odl/l2gateway/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/l2gateway/driver.py b/networking_odl/l2gateway/driver.py deleted file mode 100644 index 75625c02b..000000000 --- a/networking_odl/l2gateway/driver.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2016 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -from oslo_log import log as logging - -from networking_l2gw.services.l2gateway.common import constants -from networking_l2gw.services.l2gateway import service_drivers -from networking_odl.common import client as odl_client - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - -LOG = logging.getLogger(__name__) - -L2GATEWAYS = 'l2-gateways' -L2GATEWAY_CONNECTIONS = 'l2gateway-connections' - - -class OpenDaylightL2gwDriver(service_drivers.L2gwDriver): - """OpenDaylight L2Gateway Service Driver - - This code is the openstack driver for exciting the OpenDaylight L2GW - facility. - """ - - def __init__(self, service_plugin, validator=None): - super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator) - self.service_plugin = service_plugin - self.client = odl_client.OpenDaylightRestClient.create_client() - LOG.warning( - "ODL: OpenDaylight L2gateway driver has been deprecated " - "and will be removed. Switch to driver_v2.") - - @property - def service_type(self): - return constants.L2GW - - def create_l2_gateway_postcommit(self, context, l2_gateway): - LOG.info("ODL: Create L2Gateway %(l2gateway)s", - {'l2gateway': l2_gateway}) - request = {'l2_gateway': l2_gateway} - self.client.sendjson('post', L2GATEWAYS, request) - - def delete_l2_gateway_postcommit(self, context, l2_gateway_id): - LOG.info("ODL: Delete L2Gateway %(l2gatewayid)s", - {'l2gatewayid': l2_gateway_id}) - url = L2GATEWAYS + '/' + l2_gateway_id - self.client.try_delete(url) - - def update_l2_gateway_postcommit(self, context, l2_gateway): - LOG.info("ODL: Update L2Gateway %(l2gateway)s", - {'l2gateway': l2_gateway}) - request = {'l2_gateway': l2_gateway} - url = L2GATEWAYS + '/' + l2_gateway['id'] - self.client.sendjson('put', url, request) - - def create_l2_gateway_connection_postcommit(self, context, - l2_gateway_connection): - LOG.info("ODL: Create L2Gateway connection %(l2gwconn)s", - {'l2gwconn': l2_gateway_connection}) - odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) - odl_l2_gateway_connection['gateway_id'] = ( - l2_gateway_connection['l2_gateway_id']) - odl_l2_gateway_connection.pop('l2_gateway_id') - request = {'l2gateway_connection': odl_l2_gateway_connection} - self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request) - - def delete_l2_gateway_connection_postcommit(self, context, - l2_gateway_connection_id): - LOG.info("ODL: Delete L2Gateway connection %(l2gwconnid)s", - {'l2gwconnid': l2_gateway_connection_id}) - url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id - self.client.try_delete(url) diff --git a/networking_odl/l2gateway/driver_v2.py b/networking_odl/l2gateway/driver_v2.py deleted file mode 100644 index 132b5ef39..000000000 --- a/networking_odl/l2gateway/driver_v2.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_l2gw.services.l2gateway.common import constants -from networking_l2gw.services.l2gateway import service_drivers -from networking_odl.common import constants as odl_const -from networking_odl.common import postcommit -from networking_odl.journal import full_sync -from networking_odl.journal import journal - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - -LOG = logging.getLogger(__name__) - -L2GW_RESOURCES = { - odl_const.ODL_L2GATEWAY: odl_const.ODL_L2GATEWAYS, - odl_const.ODL_L2GATEWAY_CONNECTION: odl_const.ODL_L2GATEWAY_CONNECTIONS -} - - -@postcommit.add_postcommit('l2_gateway', 'l2_gateway_connection') -class OpenDaylightL2gwDriver(service_drivers.L2gwDriver): - """OpenDaylight L2Gateway Service Driver - - This code is the openstack driver for exciting the OpenDaylight L2GW - facility. - """ - - def __init__(self, service_plugin, validator=None): - super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator) - self.service_plugin = service_plugin - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(constants.L2GW, L2GW_RESOURCES) - LOG.info("ODL: Started OpenDaylight L2Gateway V2 driver") - - @property - def service_type(self): - return constants.L2GW - - @log_helpers.log_method_call - def create_l2_gateway_precommit(self, context, l2_gateway): - journal.record(context, odl_const.ODL_L2GATEWAY, - l2_gateway['id'], odl_const.ODL_CREATE, - l2_gateway) - - @log_helpers.log_method_call - def update_l2_gateway_precommit(self, context, l2_gateway): - journal.record(context, odl_const.ODL_L2GATEWAY, - l2_gateway['id'], odl_const.ODL_UPDATE, - l2_gateway) - - @log_helpers.log_method_call - def delete_l2_gateway_precommit(self, context, l2_gateway_id): - journal.record(context, odl_const.ODL_L2GATEWAY, - l2_gateway_id, odl_const.ODL_DELETE, - l2_gateway_id) - - @log_helpers.log_method_call - def create_l2_gateway_connection_precommit(self, context, - l2_gateway_connection): - odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) - odl_l2_gateway_connection['gateway_id'] = ( - l2_gateway_connection['l2_gateway_id']) - odl_l2_gateway_connection.pop('l2_gateway_id') - journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION, - odl_l2_gateway_connection['id'], - odl_const.ODL_CREATE, - odl_l2_gateway_connection) - - @log_helpers.log_method_call - def delete_l2_gateway_connection_precommit(self, context, - l2_gateway_connection_id): - journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION, - l2_gateway_connection_id, - odl_const.ODL_DELETE, - l2_gateway_connection_id) diff --git a/networking_odl/l3/__init__.py b/networking_odl/l3/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/l3/l3_odl.py b/networking_odl/l3/l3_odl.py deleted file mode 100644 index 8a705f8cd..000000000 --- a/networking_odl/l3/l3_odl.py +++ /dev/null @@ -1,149 +0,0 @@ -# -# Copyright (C) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import log as logging - -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.rpc.handlers import l3_rpc -from neutron.common import rpc as n_rpc -from neutron.common import topics -from neutron.db import common_db_mixin -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_dvr_db -from neutron.db import l3_gwmode_db -from neutron_lib import constants as q_const -from neutron_lib.plugins import constants as plugin_constants - -from networking_odl.common import client as odl_client -from networking_odl.common import utils as odl_utils - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = logging.getLogger(__name__) -ROUTERS = 'routers' -FLOATINGIPS = 'floatingips' - - -class OpenDaylightL3RouterPlugin( - common_db_mixin.CommonDbMixin, - extraroute_db.ExtraRoute_db_mixin, - l3_dvr_db.L3_NAT_with_dvr_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin): - - """Implementation of the OpenDaylight L3 Router Service Plugin. - - This class implements a L3 service plugin that provides - router and floatingip resources and manages associated - request/response. - """ - supported_extension_aliases = ["dvr", "router", "ext-gw-mode", - "extraroute"] - - def __init__(self): - super(OpenDaylightL3RouterPlugin, self).__init__() - self.setup_rpc() - self.client = odl_client.OpenDaylightRestClient.create_client() - - def setup_rpc(self): - self.topic = topics.L3PLUGIN - self.conn = n_rpc.create_connection() - self.agent_notifiers.update( - {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) - self.endpoints = [l3_rpc.L3RpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - self.conn.consume_in_threads() - - def get_plugin_type(self): - return plugin_constants.L3 - - def get_plugin_description(self): - """returns string description of the plugin.""" - return ("L3 Router Service Plugin for basic L3 forwarding" - " using OpenDaylight") - - def filter_update_router_attributes(self, router): - """Filter out router attributes for an update operation.""" - odl_utils.try_del(router, ['id', 'tenant_id', 'status']) - - def create_router(self, context, router): - router_dict = super(OpenDaylightL3RouterPlugin, self).create_router( - context, router) - url = ROUTERS - self.client.sendjson('post', url, {ROUTERS[:-1]: router_dict}) - return router_dict - - def update_router(self, context, id, router): - router_dict = super(OpenDaylightL3RouterPlugin, self).update_router( - context, id, router) - url = ROUTERS + "/" + id - resource = router_dict.copy() - self.filter_update_router_attributes(resource) - self.client.sendjson('put', url, {ROUTERS[:-1]: resource}) - return router_dict - - def delete_router(self, context, id): - super(OpenDaylightL3RouterPlugin, self).delete_router(context, id) - url = ROUTERS + "/" + id - self.client.sendjson('delete', url, None) - - def create_floatingip(self, context, floatingip, - initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): - fip = floatingip['floatingip'] - if fip.get('port_id') is None: - initial_status = q_const.FLOATINGIP_STATUS_DOWN - fip_dict = super(OpenDaylightL3RouterPlugin, self).create_floatingip( - context, floatingip, initial_status) - url = FLOATINGIPS - self.client.sendjson('post', url, {FLOATINGIPS[:-1]: fip_dict}) - return fip_dict - - def update_floatingip(self, context, id, floatingip): - with context.session.begin(subtransactions=True): - fip_dict = super(OpenDaylightL3RouterPlugin, - self).update_floatingip(context, id, floatingip) - # Update status based on association - if fip_dict['port_id'] is None: - status = q_const.FLOATINGIP_STATUS_DOWN - else: - status = q_const.FLOATINGIP_STATUS_ACTIVE - fip_dict['status'] = status - self.update_floatingip_status(context, id, fip_dict['status']) - - url = FLOATINGIPS + "/" + id - self.client.sendjson('put', url, {FLOATINGIPS[:-1]: fip_dict}) - return fip_dict - - def delete_floatingip(self, context, id): - super(OpenDaylightL3RouterPlugin, self).delete_floatingip(context, id) - url = FLOATINGIPS + "/" + id - self.client.sendjson('delete', url, None) - - dvr_deletens_if_no_port_warned = False - - def dvr_deletens_if_no_port(self, context, port_id): - # TODO(yamahata): implement this method or delete this logging - # For now, this is defined to avoid attribute exception - # Since ODL L3 does not create namespaces, this is always going to - # be a noop. When it is confirmed, delete this comment and logging - if not self.dvr_deletens_if_no_port_warned: - LOG.debug('dvr is not suported yet. ' - 'this method needs to be implemented') - self.dvr_deletens_if_no_port_warned = True - return [] diff --git a/networking_odl/l3/l3_odl_v2.py b/networking_odl/l3/l3_odl_v2.py deleted file mode 100644 index f92e32791..000000000 --- a/networking_odl/l3/l3_odl_v2.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from neutron.db import api as db_api -from neutron.db import common_db_mixin -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_dvr_db -from neutron.db import l3_gwmode_db -from neutron_lib import constants as q_const -from neutron_lib.plugins import constants as plugin_constants - -from networking_odl.common import config # noqa -from networking_odl.common import constants as odl_const -from networking_odl.journal import full_sync -from networking_odl.journal import journal - -LOG = logging.getLogger(__name__) - -L3_RESOURCES = { - odl_const.ODL_ROUTER: odl_const.ODL_ROUTERS, - odl_const.ODL_FLOATINGIP: odl_const.ODL_FLOATINGIPS -} - - -class OpenDaylightL3RouterPlugin( - common_db_mixin.CommonDbMixin, - extraroute_db.ExtraRoute_db_mixin, - l3_dvr_db.L3_NAT_with_dvr_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin): - - """Implementation of the OpenDaylight L3 Router Service Plugin. - - This class implements a L3 service plugin that provides - router and floatingip resources and manages associated - request/response. - """ - supported_extension_aliases = ["dvr", "router", "ext-gw-mode", - "extraroute"] - - def __init__(self): - super(OpenDaylightL3RouterPlugin, self).__init__() - - # TODO(rcurran): Continue investigation into how many journal threads - # to run per neutron controller deployment. - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(plugin_constants.L3, L3_RESOURCES) - - def get_plugin_type(self): - return plugin_constants.L3 - - def get_plugin_description(self): - """Returns string description of the plugin.""" - return ("L3 Router Service Plugin for basic L3 forwarding " - "using OpenDaylight.") - - @journal.call_thread_on_end - def create_router(self, context, router): - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - router_dict = super( - OpenDaylightL3RouterPlugin, self).create_router(context, - router) - journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], - odl_const.ODL_CREATE, router_dict) - return router_dict - - @journal.call_thread_on_end - def update_router(self, context, router_id, router): - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - router_dict = super( - OpenDaylightL3RouterPlugin, self).update_router( - context, router_id, router) - journal.record(context, odl_const.ODL_ROUTER, - router_id, odl_const.ODL_UPDATE, router_dict) - return router_dict - - @journal.call_thread_on_end - def delete_router(self, context, router_id): - session = db_api.get_writer_session() - router_dict = self.get_router(context, router_id) - dependency_list = [router_dict['gw_port_id']] - with session.begin(subtransactions=True): - super(OpenDaylightL3RouterPlugin, self).delete_router(context, - router_id) - journal.record(context, odl_const.ODL_ROUTER, router_id, - odl_const.ODL_DELETE, dependency_list) - - @journal.call_thread_on_end - def create_floatingip(self, context, floatingip, - initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): - fip = floatingip['floatingip'] - if fip.get('port_id') is None: - initial_status = q_const.FLOATINGIP_STATUS_DOWN - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - fip_dict = super( - OpenDaylightL3RouterPlugin, self).create_floatingip( - context, floatingip, initial_status) - journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], - odl_const.ODL_CREATE, fip_dict) - return fip_dict - - @journal.call_thread_on_end - def update_floatingip(self, context, floatingip_id, floatingip): - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - fip_dict = super( - OpenDaylightL3RouterPlugin, self).update_floatingip( - context, floatingip_id, floatingip) - - # Update status based on association - if fip_dict.get('port_id') is None: - fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN - else: - fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE - self.update_floatingip_status(context, floatingip_id, - fip_dict['status']) - - journal.record(context, odl_const.ODL_FLOATINGIP, floatingip_id, - odl_const.ODL_UPDATE, fip_dict) - return fip_dict - - @journal.call_thread_on_end - def delete_floatingip(self, context, floatingip_id): - session = db_api.get_writer_session() - floatingip_dict = self.get_floatingip(context, floatingip_id) - dependency_list = [floatingip_dict['router_id']] - dependency_list.append(floatingip_dict['floating_network_id']) - with session.begin(subtransactions=True): - super(OpenDaylightL3RouterPlugin, self).delete_floatingip( - context, floatingip_id) - journal.record(context, odl_const.ODL_FLOATINGIP, floatingip_id, - odl_const.ODL_DELETE, dependency_list) - - @journal.call_thread_on_end - def add_router_interface(self, context, router_id, interface_info): - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - new_router = super( - OpenDaylightL3RouterPlugin, self).add_router_interface( - context, router_id, interface_info) - return new_router - - @journal.call_thread_on_end - def remove_router_interface(self, context, router_id, interface_info): - session = db_api.get_writer_session() - with session.begin(subtransactions=True): - new_router = super( - OpenDaylightL3RouterPlugin, self).remove_router_interface( - context, router_id, interface_info) - return new_router - - dvr_deletens_if_no_port_warned = False - - def dvr_deletens_if_no_port(self, context, port_id): - # TODO(yamahata): implement this method or delete this logging - # For now, this is defined to avoid attribute exception - # Since ODL L3 does not create namespaces, this is always going to - # be a noop. When it is confirmed, delete this comment and logging - if not self.dvr_deletens_if_no_port_warned: - LOG.debug('dvr is not suported yet. ' - 'this method needs to be implemented') - self.dvr_deletens_if_no_port_warned = True - return [] diff --git a/networking_odl/lbaas/__init__.py b/networking_odl/lbaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/lbaas/driver_v2.py b/networking_odl/lbaas/driver_v2.py deleted file mode 100644 index c8a923086..000000000 --- a/networking_odl/lbaas/driver_v2.py +++ /dev/null @@ -1,183 +0,0 @@ -# -# Copyright (C) 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from neutron_lbaas.drivers import driver_base -from neutron_lbaas.drivers import driver_mixins - -from networking_odl.common import client as odl_client -from networking_odl.common import constants as odl_const - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = logging.getLogger(__name__) - -LBAAS = "lbaas" - - -class OpenDaylightLbaasDriverV2(driver_base.LoadBalancerBaseDriver): - - @log_helpers.log_method_call - def __init__(self, plugin): - LOG.debug("Initializing OpenDaylight LBaaS driver") - self.plugin = plugin - self.client = odl_client.OpenDaylightRestClient.create_client() - self.load_balancer = ODLLoadBalancerManager(self, self.client) - self.listener = ODLListenerManager(self, self.client) - self.pool = ODLPoolManager(self, self.client) - self.member = ODLMemberManager(self, self.client) - self.health_monitor = ODLHealthMonitorManager(self, self.client) - - -class OpenDaylightManager(driver_mixins.BaseManagerMixin): - - out_of_sync = True - url_path = "" - obj_type = "" - obj_name = "" - - """OpenDaylight LBaaS Driver for the V2 API - - This code is the backend implementation for the OpenDaylight - LBaaS V2 driver for OpenStack Neutron. - """ - - @log_helpers.log_method_call - def __init__(self, driver, client, obj_type): - super(OpenDaylightManager, self).__init__(driver) - self.client = client - self.obj_type = obj_type - self.url_path = LBAAS + '/' + obj_type - self.obj_name = obj_type[:-1] - - @log_helpers.log_method_call - @driver_base.driver_op - def create(self, context, obj): - self.client.sendjson('post', self.url_path, - {self.obj_name: obj.to_api_dict()}) - - @log_helpers.log_method_call - @driver_base.driver_op - def update(self, context, obj): - self.client.sendjson('put', self.url_path + '/' + obj.id, - {self.obj_name: obj.to_api_dict()}) - - @log_helpers.log_method_call - @driver_base.driver_op - def delete(self, context, obj): - self.client.sendjson('delete', self.url_path + '/' + obj.id, None) - - -class ODLLoadBalancerManager(OpenDaylightManager, - driver_base.BaseLoadBalancerManager): - - @log_helpers.log_method_call - def __init__(self, driver, client): - super(ODLLoadBalancerManager, self).__init__( - driver, client, odl_const.ODL_LOADBALANCERS) - - @log_helpers.log_method_call - @driver_base.driver_op - def refresh(self, context, lb): - # TODO(lijingjing): implement this method - # This is intended to trigger the backend to check and repair - # the state of this load balancer and all of its dependent objects - pass - - @log_helpers.log_method_call - @driver_base.driver_op - def stats(self, context, lb): - # TODO(lijingjing): implement this method - pass - - # NOTE(yamahata): workaround for pylint - # pylint raise false positive of abstract-class-instantiated. - # method resolution order is as follows and db_delete_method is resolved - # by BaseLoadBalancerManager. However pylint complains as this - # class is still abstract class - # mro: - # ODLLoadBalancerManager - # OpenDaylightManager - # neutron_lbaas.drivers.driver_base.BaseLoadBalancerManager - # neutron_lbaas.drivers.driver_mixins.BaseRefreshMixin - # neutron_lbaas.drivers.driver_mixins.BaseStatsMixin - # neutron_lbaas.drivers.driver_mixins.BaseManagerMixin - # __builtin__.object - @property - def db_delete_method(self): - return driver_base.BaseLoadBalancerManager.db_delete_method - - -class ODLListenerManager(OpenDaylightManager, - driver_base.BaseListenerManager): - - @log_helpers.log_method_call - def __init__(self, driver, client): - super(ODLListenerManager, self).__init__( - driver, client, odl_const.ODL_LISTENERS) - - -class ODLPoolManager(OpenDaylightManager, - driver_base.BasePoolManager): - - @log_helpers.log_method_call - def __init__(self, driver, client): - super(ODLPoolManager, self).__init__( - driver, client, odl_const.ODL_POOLS) - - -class ODLMemberManager(OpenDaylightManager, - driver_base.BaseMemberManager): - - # NOTE:It is for lbaas v2 api but using v1 mechanism of networking-odl. - - @log_helpers.log_method_call - def __init__(self, driver, client): - super(ODLMemberManager, self).__init__( - driver, client, odl_const.ODL_MEMBERS) - - @log_helpers.log_method_call - @driver_base.driver_op - def create(self, context, obj): - self.client.sendjson('post', self._member_url(obj), - {self.obj_name: obj.to_api_dict()}) - - @log_helpers.log_method_call - @driver_base.driver_op - def update(self, context, obj): - self.client.sendjson('put', self._member_url(obj) + '/' + obj.id, - {self.obj_name: obj.to_api_dict()}) - - @log_helpers.log_method_call - @driver_base.driver_op - def delete(self, context, obj): - self.client.sendjson('delete', - self._member_url(obj) + '/' + obj.id, None) - - def _member_url(self, obj): - return (LBAAS + '/' + odl_const.ODL_POOLS + '/' + obj.pool_id + '/' + - odl_const.ODL_MEMBERS) - - -class ODLHealthMonitorManager(OpenDaylightManager, - driver_base.BaseHealthMonitorManager): - - @log_helpers.log_method_call - def __init__(self, driver, client): - super(ODLHealthMonitorManager, self).__init__( - driver, client, odl_const.ODL_HEALTHMONITORS) diff --git a/networking_odl/lbaas/lbaasv2_driver_v2.py b/networking_odl/lbaas/lbaasv2_driver_v2.py deleted file mode 100644 index 896884b60..000000000 --- a/networking_odl/lbaas/lbaasv2_driver_v2.py +++ /dev/null @@ -1,142 +0,0 @@ -# -# Copyright (C) 2017 NEC, Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from neutron_lbaas.drivers import driver_base -from neutron_lib.plugins import constants as nlib_const - -from networking_odl.common import constants as odl_const -from networking_odl.journal import full_sync -from networking_odl.journal import journal - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = logging.getLogger(__name__) - -LBAAS_RESOURCES = { - odl_const.ODL_LOADBALANCER: odl_const.ODL_LOADBALANCERS, - odl_const.ODL_LISTENER: odl_const.ODL_LISTENERS, - odl_const.ODL_POOL: odl_const.ODL_POOLS, - odl_const.ODL_MEMBER: odl_const.ODL_MEMBERS, - odl_const.ODL_HEALTHMONITOR: odl_const.ODL_HEALTHMONITORS -} - - -class OpenDaylightManager(driver_base.LoadBalancerBaseDriver): - """OpenDaylight LBaaS Driver for the V2 API - - This code is the backend implementation for the OpenDaylight - LBaaS V2 driver for OpenStack Neutron. - """ - - @log_helpers.log_method_call - def __init__(self, driver, obj_type): - LOG.debug("Initializing OpenDaylight LBaaS driver") - super(OpenDaylightManager, self).__init__(driver) - self.journal = journal.OpenDaylightJournalThread() - self.obj_type = obj_type - full_sync.register(nlib_const.LOADBALANCERV2, LBAAS_RESOURCES) - - def _journal_record(self, context, obj_type, obj_id, operation, obj): - obj_type = ("lbaas/%s" % obj_type) - journal.record(context, obj_type, obj_id, operation, obj) - self.journal.set_sync_event() - - @log_helpers.log_method_call - @driver_base.driver_op - def create(self, context, obj): - self._journal_record(context, self.obj_type, obj.id, - odl_const.ODL_CREATE, obj) - - @log_helpers.log_method_call - @driver_base.driver_op - def update(self, context, obj): - self._journal_record(context, self.obj_type, obj.id, - odl_const.ODL_UPDATE, obj) - - @log_helpers.log_method_call - @driver_base.driver_op - def delete(self, context, obj): - self._journal_record(context, self.obj_type, obj.id, - odl_const.ODL_DELETE, obj) - - -class ODLLoadBalancerManager(OpenDaylightManager, - driver_base.BaseLoadBalancerManager): - - @log_helpers.log_method_call - def __init__(self, driver): - super(ODLLoadBalancerManager, self).__init__( - driver, odl_const.ODL_LOADBALANCER) - - @log_helpers.log_method_call - @driver_base.driver_op - def refresh(self, context, lb): - # TODO(lijingjing): implement this method - # This is intended to trigger the backend to check and repair - # the state of this load balancer and all of its dependent objects - pass - - @log_helpers.log_method_call - @driver_base.driver_op - def stats(self, context, lb): - # TODO(rajivk): implement this method - pass - - -class ODLListenerManager(OpenDaylightManager, - driver_base.BaseListenerManager): - - @log_helpers.log_method_call - def __init__(self, driver): - super(ODLListenerManager, self).__init__( - driver, odl_const.ODL_LISTENER) - - -class ODLPoolManager(OpenDaylightManager, - driver_base.BasePoolManager): - - @log_helpers.log_method_call - def __init__(self, driver): - super(ODLPoolManager, self).__init__( - driver, odl_const.ODL_POOL) - - -class ODLMemberManager(OpenDaylightManager, - driver_base.BaseMemberManager): - - @log_helpers.log_method_call - def __init__(self, driver): - super(ODLMemberManager, self).__init__( - driver, odl_const.ODL_MEMBER) - - journal.register_url_builder(odl_const.ODL_MEMBER, - self.lbaas_member_url_builder) - - @staticmethod - def lbaas_member_url_builder(row): - return ("lbaas/pools/%s/member" % row.data.pool.id) - - -class ODLHealthMonitorManager(OpenDaylightManager, - driver_base.BaseHealthMonitorManager): - - @log_helpers.log_method_call - def __init__(self, driver): - super(ODLHealthMonitorManager, self).__init__( - driver, odl_const.ODL_HEALTHMONITOR) diff --git a/networking_odl/ml2/README.odl b/networking_odl/ml2/README.odl deleted file mode 100644 index eef8d4441..000000000 --- a/networking_odl/ml2/README.odl +++ /dev/null @@ -1,41 +0,0 @@ -OpenDaylight ML2 MechanismDriver -================================ -OpenDaylight is an Open Source SDN Controller developed by a plethora of -companies and hosted by the Linux Foundation. The OpenDaylight website -contains more information on the capabilities OpenDaylight provides: - - http://www.opendaylight.org - -Theory of operation -=================== -The OpenStack Neutron integration with OpenDaylight consists of the ML2 -MechanismDriver which acts as a REST proxy and passess all Neutron API -calls into OpenDaylight. OpenDaylight contains a NB REST service (called -the NeutronAPIService) which caches data from these proxied API calls and -makes it available to other services inside of OpenDaylight. One current -user of the SB side of the NeutronAPIService is the OVSDB code in -OpenDaylight. OVSDB uses the neutron information to isolate tenant networks -using GRE or VXLAN tunnels. - -How to use the OpenDaylight ML2 MechanismDriver -=============================================== -To use the ML2 MechanismDriver, you need to ensure you have it configured -as one of the "mechanism_drivers" in ML2: - - mechanism_drivers=opendaylight - -The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini -file or in a separate ml2_conf_odl.ini file. An example is shown below: - - [ml2_odl] - password = admin - username = admin - url = http://192.168.100.1:8080/controller/nb/v2/neutron - -When starting OpenDaylight, ensure you have the SimpleForwarding application -disabled or remove the .jar file from the plugins directory. Also ensure you -start OpenDaylight before you start OpenStack Neutron. - -There is devstack support for this which will automatically pull down OpenDaylight -and start it as part of devstack as well. The patch for this will likely merge -around the same time as this patch merges. diff --git a/networking_odl/ml2/__init__.py b/networking_odl/ml2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/ml2/legacy_port_binding.py b/networking_odl/ml2/legacy_port_binding.py deleted file mode 100644 index 4df0f541c..000000000 --- a/networking_odl/ml2/legacy_port_binding.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_log import log - -from neutron_lib.api.definitions import portbindings -from neutron_lib import constants as n_const -from neutron_lib.plugins.ml2 import api - -from networking_odl.ml2 import port_binding - - -LOG = log.getLogger(__name__) - - -class LegacyPortBindingManager(port_binding.PortBindingController): - - def __init__(self): - self.vif_details = {portbindings.CAP_PORT_FILTER: True} - self.supported_vnic_types = [portbindings.VNIC_NORMAL] - - def bind_port(self, port_context): - """Set binding for all valid segments - - """ - vnic_type = port_context.current.get(portbindings.VNIC_TYPE, - portbindings.VNIC_NORMAL) - if vnic_type not in self.supported_vnic_types: - LOG.debug("Refusing to bind due to unsupported vnic_type: %s", - vnic_type) - return - - valid_segment = None - for segment in port_context.segments_to_bind: - if self._check_segment(segment): - valid_segment = segment - break - - if valid_segment: - vif_type = self._get_vif_type(port_context) - LOG.debug("Bind port %(port)s on network %(network)s with valid " - "segment %(segment)s and VIF type %(vif_type)r.", - {'port': port_context.current['id'], - 'network': port_context.network.current['id'], - 'segment': valid_segment, 'vif_type': vif_type}) - - port_context.set_binding( - valid_segment[api.ID], vif_type, - self.vif_details, - status=n_const.PORT_STATUS_ACTIVE) - - def _check_segment(self, segment): - """Verify a segment is valid for the OpenDaylight MechanismDriver. - - Verify the requested segment is supported by ODL and return True or - False to indicate this to callers. - """ - - network_type = segment[api.NETWORK_TYPE] - return network_type in [n_const.TYPE_FLAT, n_const.TYPE_LOCAL, - n_const.TYPE_GRE, n_const.TYPE_VXLAN, - n_const.TYPE_VLAN] - - def _get_vif_type(self, port_context): - """Get VIF type string for given PortContext - - Dummy implementation: it always returns following constant. - neutron_lib.api.definitions.portbindings.VIF_TYPE_OVS - """ - - return portbindings.VIF_TYPE_OVS diff --git a/networking_odl/ml2/mech_driver.py b/networking_odl/ml2/mech_driver.py deleted file mode 100644 index 829713a19..000000000 --- a/networking_odl/ml2/mech_driver.py +++ /dev/null @@ -1,512 +0,0 @@ -# Copyright (c) 2013-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy - -import six - -import netaddr -from neutron_lib.api.definitions import provider_net as providernet -from neutron_lib.callbacks import resources -from neutron_lib import constants as p_const -from neutron_lib import context as neutron_context -from neutron_lib import exceptions as n_exc -from neutron_lib.plugins.ml2 import api -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import requests - -from neutron.common import utils -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import multiprovidernet as mpnet -from neutron.extensions import securitygroup as sg -from neutron.plugins.ml2 import driver_context - -from networking_odl.common import callback as odl_call -from networking_odl.common import client as odl_client -from networking_odl.common import constants as odl_const -from networking_odl.common import filters -from networking_odl.common import odl_features -from networking_odl.common import utils as odl_utils -from networking_odl.ml2 import port_binding -from networking_odl.ml2 import port_status_update -from networking_odl.trunk import trunk_driver_v1 as trunk_driver - - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = logging.getLogger(__name__) - -not_found_exception_map = {odl_const.ODL_NETWORKS: n_exc.NetworkNotFound, - odl_const.ODL_SUBNETS: n_exc.SubnetNotFound, - odl_const.ODL_PORTS: n_exc.PortNotFound, - odl_const.ODL_SGS: sg.SecurityGroupNotFound, - odl_const.ODL_SG_RULES: - sg.SecurityGroupRuleNotFound} - - -@six.add_metaclass(abc.ABCMeta) -class ResourceFilterBase(object): - @staticmethod - @abc.abstractmethod - def filter_create_attributes(resource, context): - pass - - @staticmethod - @abc.abstractmethod - def filter_update_attributes(resource, context): - pass - - @staticmethod - @abc.abstractmethod - def filter_create_attributes_with_plugin(resource, plugin, dbcontext): - pass - - @staticmethod - def _filter_unmapped_null(resource_dict, unmapped_keys): - # NOTE(yamahata): bug work around - # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475 - # Null-value for an unmapped element causes next mapped - # collection to contain a null value - # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] } - # - # Java Object: - # class Root { - # Collection mappedCollection = new ArrayList; - # } - # - # Result: - # Field B contains one element; null - # - # TODO(yamahata): update along side with neutron and ODL - # add when neutron adds more extensions - # delete when ODL neutron northbound supports it - # TODO(yamahata): do same thing for other resources - keys_to_del = [key for key in unmapped_keys - if resource_dict.get(key) is None] - if keys_to_del: - odl_utils.try_del(resource_dict, keys_to_del) - - -class NetworkFilter(ResourceFilterBase): - _UNMAPPED_KEYS = ['qos_policy_id'] - - @classmethod - def filter_create_attributes(cls, network, context): - """Filter out network attributes not required for a create.""" - odl_utils.try_del(network, ['status', 'subnets']) - cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS) - - @classmethod - def filter_update_attributes(cls, network, context): - """Filter out network attributes for an update operation.""" - odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id']) - cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS) - - @classmethod - def filter_create_attributes_with_plugin(cls, network, plugin, dbcontext): - context = driver_context.NetworkContext(plugin, dbcontext, network) - cls.filter_create_attributes(network, context) - - -class SubnetFilter(ResourceFilterBase): - _UNMAPPED_KEYS = ['segment_id', 'subnetpool_id'] - - @classmethod - def filter_create_attributes(cls, subnet, context): - """Filter out subnet attributes not required for a create.""" - cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS) - - @classmethod - def filter_update_attributes(cls, subnet, context): - """Filter out subnet attributes for an update operation.""" - odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', - 'allocation_pools', 'tenant_id']) - cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS) - - @classmethod - def filter_create_attributes_with_plugin(cls, subnet, plugin, dbcontext): - network = plugin.get_network(dbcontext, subnet['network_id']) - context = driver_context.SubnetContext(plugin, dbcontext, subnet, - network) - cls.filter_create_attributes(subnet, context) - cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS) - - -class PortFilter(ResourceFilterBase): - _UNMAPPED_KEYS = ['binding:profile', 'dns_name', - 'port_security_enabled', 'qos_policy_id'] - - @staticmethod - def _add_security_groups(port, context): - """Populate the 'security_groups' field with entire records.""" - dbcontext = context._plugin_context - groups = [context._plugin.get_security_group(dbcontext, sg) - for sg in port['security_groups']] - port['security_groups'] = groups - - @classmethod - def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs): - """unify (ip address or network address) into network address""" - for address_pair in allowed_address_pairs: - ip_address = address_pair['ip_address'] - network_address = str(netaddr.IPNetwork(ip_address)) - address_pair['ip_address'] = network_address - - @classmethod - def filter_create_attributes(cls, port, context): - """Filter out port attributes not required for a create.""" - cls._add_security_groups(port, context) - cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS]) - cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS) - odl_utils.try_del(port, ['status']) - - # NOTE(yamahata): work around for port creation for router - # tenant_id=''(empty string) is passed when port is created - # by l3 plugin internally for router. - # On the other hand, ODL doesn't accept empty string for tenant_id. - # In that case, deduce tenant_id from network_id for now. - # Right fix: modify Neutron so that don't allow empty string - # for tenant_id even for port for internal use. - # TODO(yamahata): eliminate this work around when neutron side - # is fixed - # assert port['tenant_id'] != '' - if port['tenant_id'] == '': - LOG.debug('empty string was passed for tenant_id: %s(port)', port) - port['tenant_id'] = context._network_context._network['tenant_id'] - - @classmethod - def filter_update_attributes(cls, port, context): - """Filter out port attributes for an update operation.""" - cls._add_security_groups(port, context) - cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS]) - cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS) - odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id']) - - @classmethod - def filter_create_attributes_with_plugin(cls, port, plugin, dbcontext): - network = plugin.get_network(dbcontext, port['network_id']) - port_db = plugin._get_port(dbcontext, port['id']) - context = driver_context.PortContext( - plugin, dbcontext, port, network, port_db.port_binding, None) - cls.filter_create_attributes(port, context) - - -class SecurityGroupFilter(ResourceFilterBase): - @staticmethod - def filter_create_attributes(sg, context): - """Filter out security-group attributes not required for a create.""" - pass - - @staticmethod - def filter_update_attributes(sg, context): - """Filter out security-group attributes for an update operation.""" - pass - - @staticmethod - def filter_create_attributes_with_plugin(sg, plugin, dbcontext): - pass - - -class SecurityGroupRuleFilter(ResourceFilterBase): - @staticmethod - def filter_create_attributes(sg_rule, context): - """Filter out sg-rule attributes not required for a create.""" - filters.filter_security_group_rule(sg_rule) - - @staticmethod - def filter_update_attributes(sg_rule, context): - """Filter out sg-rule attributes for an update operation.""" - filters.filter_security_group_rule(sg_rule) - - @staticmethod - def filter_create_attributes_with_plugin(sg_rule, plugin, dbcontext): - filters.filter_security_group_rule(sg_rule) - - -class OpenDaylightDriver(object): - - """OpenDaylight Python Driver for Neutron. - - This code is the backend implementation for the OpenDaylight ML2 - MechanismDriver for OpenStack Neutron. - """ - FILTER_MAP = { - odl_const.ODL_NETWORKS: NetworkFilter, - odl_const.ODL_SUBNETS: SubnetFilter, - odl_const.ODL_PORTS: PortFilter, - odl_const.ODL_SGS: SecurityGroupFilter, - odl_const.ODL_SG_RULES: SecurityGroupRuleFilter, - } - out_of_sync = True - - def __init__(self): - LOG.debug("Initializing OpenDaylight ML2 driver") - self.client = odl_client.OpenDaylightRestClient.create_client() - self.sec_handler = odl_call.OdlSecurityGroupsHandler( - None, self.sync_from_callback) - self.port_binding_controller = port_binding.PortBindingManager.create() - # TODO(rzang): Each port binding controller should have any necessary - # parameter passed in from configuration files. - # BTW, CAP_PORT_FILTER seems being obsoleted. - # Leave the code commmeted out for now for future reference. - # - # self.vif_details = {portbindings.CAP_PORT_FILTER: True} - # self._network_topology = network_topology.NetworkTopologyManager( - # vif_details=self.vif_details) - - def synchronize(self, operation, object_type, context): - """Synchronize ODL with Neutron following a configuration change.""" - if self.out_of_sync: - self.sync_full(context._plugin) - if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: - # NOTE(yamahata): work around that sync_full doesn't know - # how to handle UPDATE and DELETE at the moment. - # TODO(yamahata): implement TODOs in sync_full and remove this - # work around - self.sync_single_resource(operation, object_type, context) - else: - self.sync_single_resource(operation, object_type, context) - - def sync_resources(self, plugin, dbcontext, collection_name): - """Sync objects from Neutron over to OpenDaylight. - - This will handle syncing networks, subnets, and ports from Neutron to - OpenDaylight. It also filters out the requisite items which are not - valid for create API operations. - """ - filter_cls = self.FILTER_MAP[collection_name] - to_be_synced = [] - obj_getter = getattr(plugin, 'get_%s' % collection_name) - if collection_name == odl_const.ODL_SGS: - resources = obj_getter(dbcontext, default_sg=True) - else: - resources = obj_getter(dbcontext) - for resource in resources: - try: - # Convert underscores to dashes in the URL for ODL - collection_name_url = odl_utils.neutronify(collection_name) - urlpath = collection_name_url + '/' + resource['id'] - self.client.sendjson('get', urlpath, None) - except requests.exceptions.HTTPError as e: - with excutils.save_and_reraise_exception() as ctx: - if e.response.status_code == requests.codes.not_found: - filter_cls.filter_create_attributes_with_plugin( - resource, plugin, dbcontext) - to_be_synced.append(resource) - ctx.reraise = False - else: - # TODO(yamahata): compare result with resource. - # If they don't match, update it below - pass - - if to_be_synced: - key = collection_name[:-1] if len(to_be_synced) == 1 else ( - collection_name) - # Convert underscores to dashes in the URL for ODL - collection_name_url = odl_utils.neutronify(collection_name) - self.client.sendjson('post', collection_name_url, - {key: to_be_synced}) - - # https://bugs.launchpad.net/networking-odl/+bug/1371115 - # TODO(yamahata): update resources with unsyned attributes - # TODO(yamahata): find dangling ODL resouce that was deleted in - # neutron db - - @utils.synchronized('odl-sync-full') - def sync_full(self, plugin): - """Resync the entire database to ODL. - - Transition to the in-sync state on success. - Note: we only allow a single thread in here at a time. - """ - if not self.out_of_sync: - return - dbcontext = neutron_context.get_admin_context() - for collection_name in [odl_const.ODL_SGS, - odl_const.ODL_SG_RULES, - odl_const.ODL_NETWORKS, - odl_const.ODL_SUBNETS, - odl_const.ODL_PORTS]: - self.sync_resources(plugin, dbcontext, collection_name) - self.out_of_sync = False - - def sync_single_resource(self, operation, object_type, context): - """Sync over a single resource from Neutron to OpenDaylight. - - Handle syncing a single operation over to OpenDaylight, and correctly - filter attributes out which are not required for the requisite - operation (create or update) being handled. - """ - # Convert underscores to dashes in the URL for ODL - object_type_url = odl_utils.neutronify(object_type) - try: - obj_id = context.current['id'] - if operation == odl_const.ODL_DELETE: - self.out_of_sync |= not self.client.try_delete( - object_type_url + '/' + obj_id) - else: - filter_cls = self.FILTER_MAP[object_type] - if operation == odl_const.ODL_CREATE: - urlpath = object_type_url - method = 'post' - attr_filter = filter_cls.filter_create_attributes - elif operation == odl_const.ODL_UPDATE: - urlpath = object_type_url + '/' + obj_id - method = 'put' - attr_filter = filter_cls.filter_update_attributes - resource = copy.deepcopy(context.current) - attr_filter(resource, context) - self.client.sendjson(method, urlpath, - {object_type_url[:-1]: resource}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Unable to perform %(operation)s on " - "%(object_type)s %(object_id)s", - {'operation': operation, - 'object_type': object_type, - 'object_id': obj_id}) - self.out_of_sync = True - - def sync_from_callback(self, context, operation, res_type, - res_id, resource_dict, **kwrags): - object_type = odl_utils.neutronify(res_type.plural) - try: - if operation == odl_const.ODL_DELETE: - self.out_of_sync |= not self.client.try_delete( - object_type + '/' + res_id) - else: - if operation == odl_const.ODL_CREATE: - urlpath = object_type - method = 'post' - elif operation == odl_const.ODL_UPDATE: - urlpath = object_type + '/' + res_id - method = 'put' - self.client.sendjson(method, urlpath, resource_dict) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Unable to perform %(operation)s on " - "%(object_type)s %(res_id)s %(resource_dict)s", - {'operation': operation, - 'object_type': object_type, - 'res_id': res_id, - 'resource_dict': resource_dict}) - self.out_of_sync = True - - # NOTE(yamahata) when security group is created, default rules - # are also created. - if (operation == odl_const.ODL_CREATE and - res_type.singular == odl_const.ODL_SG): - for rule in resource_dict[odl_const.ODL_SG][ - odl_const.ODL_SG_RULES]: - self.sync_from_callback( - context, odl_const.ODL_CREATE, - odl_call._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE], - rule['id'], {odl_const.ODL_SG_RULE: rule}) - - def bind_port(self, port_context): - """Set binding for a valid segments - - """ - self.port_binding_controller.bind_port(port_context) - - def check_vlan_transparency(self, context): - """Check VLAN transparency - - """ - # VLAN and FLAT cases, we don't know if the underlying network - # supports QinQ or VLAN. - # For now, netvirt supports only vxlan tunneling. - VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN] - network = context.current - # see TypeManager._extend_network_dict_provider() - # single providernet - if providernet.NETWORK_TYPE in network: - return (network[providernet.NETWORK_TYPE] in - VLAN_TRANSPARENT_NETWORK_TYPES) - # multi providernet - segments = network.get(mpnet.SEGMENTS) - if segments is None: - return True - return all(segment[providernet.NETWORK_TYPE] - in VLAN_TRANSPARENT_NETWORK_TYPES - for segment in segments) - - -class OpenDaylightMechanismDriver(api.MechanismDriver): - - """Mechanism Driver for OpenDaylight. - - This driver was a port from the NCS MechanismDriver. The API - exposed by ODL is slightly different from the API exposed by NCS, - but the general concepts are the same. - """ - - def initialize(self): - self.url = cfg.CONF.ml2_odl.url - self.timeout = cfg.CONF.ml2_odl.timeout - self.username = cfg.CONF.ml2_odl.username - self.password = cfg.CONF.ml2_odl.password - self.odl_drv = OpenDaylightDriver() - self.trunk_driver = trunk_driver.OpenDaylightTrunkDriverV1.create() - odl_features.init() - - def get_workers(self): - return [port_status_update.OdlPortStatusUpdate()] - - # Postcommit hooks are used to trigger synchronization. - - def create_network_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_NETWORKS, - context) - - def update_network_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_NETWORKS, - context) - - def delete_network_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_NETWORKS, - context) - - def create_subnet_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_SUBNETS, - context) - - def update_subnet_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_SUBNETS, - context) - - def delete_subnet_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_SUBNETS, - context) - - def create_port_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_PORTS, - context) - - def update_port_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_PORTS, - context) - - def delete_port_postcommit(self, context): - self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_PORTS, - context) - - def bind_port(self, context): - self.odl_drv.bind_port(context) - - def check_vlan_transparency(self, context): - return self.odl_drv.check_vlan_transparency(context) diff --git a/networking_odl/ml2/mech_driver_v2.py b/networking_odl/ml2/mech_driver_v2.py deleted file mode 100644 index 72f67477e..000000000 --- a/networking_odl/ml2/mech_driver_v2.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) 2013-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from neutron.extensions import multiprovidernet as mpnet -from neutron_lib.api.definitions import provider_net as providernet -from neutron_lib import constants as p_const -from neutron_lib.plugins import constants as nlib_const -from neutron_lib.plugins.ml2 import api - -from networking_odl.common import callback -from networking_odl.common import config as odl_conf -from networking_odl.common import constants as odl_const -from networking_odl.common import odl_features -from networking_odl.common import postcommit -from networking_odl.journal import cleanup -from networking_odl.journal import full_sync -from networking_odl.journal import journal -from networking_odl.journal import periodic_task -from networking_odl.journal import recovery -from networking_odl.ml2 import port_binding -from networking_odl.ml2 import port_status_update -from networking_odl.qos import qos_driver_v2 as qos_driver -from networking_odl.trunk import trunk_driver_v2 as trunk_driver - -LOG = logging.getLogger(__name__) - -L2_RESOURCES = { - odl_const.ODL_SG: odl_const.ODL_SGS, - odl_const.ODL_SG_RULE: odl_const.ODL_SG_RULES, - odl_const.ODL_NETWORK: odl_const.ODL_NETWORKS, - odl_const.ODL_SUBNET: odl_const.ODL_SUBNETS, - odl_const.ODL_PORT: odl_const.ODL_PORTS -} - - -@postcommit.add_postcommit('network', 'subnet', 'port') -class OpenDaylightMechanismDriver(api.MechanismDriver): - """OpenDaylight Python Driver for Neutron. - - This code is the backend implementation for the OpenDaylight ML2 - MechanismDriver for OpenStack Neutron. - """ - - def initialize(self): - LOG.debug("Initializing OpenDaylight ML2 driver") - cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl") - self.sg_handler = callback.OdlSecurityGroupsHandler( - self.sync_from_callback_precommit, - self.sync_from_callback_postcommit) - self.journal = journal.OpenDaylightJournalThread() - self.port_binding_controller = port_binding.PortBindingManager.create() - self.trunk_driver = trunk_driver.OpenDaylightTrunkDriverV2.create() - if odl_const.ODL_QOS in cfg.CONF.ml2.extension_drivers: - qos_driver.OpenDaylightQosDriver.create() - self._start_periodic_task() - full_sync.register(nlib_const.CORE, L2_RESOURCES) - odl_features.init() - - def get_workers(self): - return [port_status_update.OdlPortStatusUpdate()] - - def _start_periodic_task(self): - # start the periodic task and register all the phases - # operations : - # (1) JournalCleanup - Delete completed rows from journal - # (2) CleanupProcessing - Mark orphaned processing rows to pending - # (3) Full sync - Re-sync when detecting an ODL "cold reboot" - cleanup_obj = cleanup.JournalCleanup() - interval = cfg.CONF.ml2_odl.restconf_poll_interval - self._periodic_task = periodic_task.PeriodicTask('maintenance', - interval) - self._periodic_task.register_operation( - cleanup_obj.delete_completed_rows) - self._periodic_task.register_operation( - cleanup_obj.cleanup_processing_rows) - self._periodic_task.register_operation(full_sync.full_sync) - self._periodic_task.register_operation(recovery.journal_recovery) - self._periodic_task.start() - - @staticmethod - def _record_in_journal(context, object_type, operation, data=None): - if data is None: - data = context.current - journal.record(context._plugin_context, object_type, - context.current['id'], operation, data, - ml2_context=context) - - @log_helpers.log_method_call - def create_network_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def create_subnet_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def create_port_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_PORT, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def update_network_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def update_subnet_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def update_port_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_PORT, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def delete_network_precommit(self, context): - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[]) - - @log_helpers.log_method_call - def delete_subnet_precommit(self, context): - # Use the journal row's data field to store parent object - # uuids. This information is required for validation checking - # when deleting parent objects. - new_context = [context.current['network_id']] - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE, - data=new_context) - - @log_helpers.log_method_call - def delete_port_precommit(self, context): - # Use the journal row's data field to store parent object - # uuids. This information is required for validation checking - # when deleting parent objects. - new_context = [context.current['network_id']] - for subnet in context.current['fixed_ips']: - new_context.append(subnet['subnet_id']) - OpenDaylightMechanismDriver._record_in_journal( - context, odl_const.ODL_PORT, odl_const.ODL_DELETE, - data=new_context) - - def _sync_security_group_create_precommit( - self, context, operation, object_type, res_id, sg_dict): - - journal.record(context, object_type, sg_dict['id'], operation, sg_dict) - - # NOTE(yamahata): when security group is created, default rules - # are also created. - for rule in sg_dict['security_group_rules']: - journal.record(context, odl_const.ODL_SG_RULE, rule['id'], - odl_const.ODL_CREATE, rule) - - @log_helpers.log_method_call - def sync_from_callback_precommit(self, context, operation, res_type, - res_id, resource_dict, **kwargs): - object_type = res_type.singular - if resource_dict is not None: - resource_dict = resource_dict[object_type] - - if (operation == odl_const.ODL_CREATE and - object_type == odl_const.ODL_SG): - self._sync_security_group_create_precommit( - context, operation, object_type, res_id, resource_dict) - return - - object_uuid = (resource_dict.get('id') - if operation == 'create' else res_id) - - # NOTE(yamahata): DB auto deletion - # Security Group Rule under this Security Group needs to - # be deleted. At NeutronDB layer rules are auto deleted with - # cascade='all,delete'. - if (object_type == odl_const.ODL_SG and - operation == odl_const.ODL_DELETE): - for rule_id in kwargs['security_group_rule_ids']: - journal.record(context, odl_const.ODL_SG_RULE, - rule_id, odl_const.ODL_DELETE, [object_uuid]) - - assert object_uuid is not None - journal.record(context, object_type, object_uuid, - operation, resource_dict) - - def sync_from_callback_postcommit(self, context, operation, res_type, - res_id, resource_dict, **kwargs): - self._postcommit(context) - - def _postcommit(self, context): - self.journal.set_sync_event() - - @log_helpers.log_method_call - def bind_port(self, port_context): - """Set binding for a valid segments - - """ - return self.port_binding_controller.bind_port(port_context) - - def check_vlan_transparency(self, context): - """Check VLAN transparency - - """ - # TODO(yamahata): This should be odl service provider dependent - # introduce ODL yang model for ODL to report which network types - # are vlan-transparent. - # VLAN and FLAT cases, we don't know if the underlying network - # supports QinQ or VLAN. - # For now, netvirt supports only vxlan tunneling. - VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN] - network = context.current - # see TypeManager._extend_network_dict_provider() - # single providernet - if providernet.NETWORK_TYPE in network: - return (network[providernet.NETWORK_TYPE] in - VLAN_TRANSPARENT_NETWORK_TYPES) - # multi providernet - segments = network.get(mpnet.SEGMENTS) - if segments is None: - return True - return all(segment[providernet.NETWORK_TYPE] - in VLAN_TRANSPARENT_NETWORK_TYPES - for segment in segments) diff --git a/networking_odl/ml2/port_binding.py b/networking_odl/ml2/port_binding.py deleted file mode 100644 index ac93b9b20..000000000 --- a/networking_odl/ml2/port_binding.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six -import stevedore - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import excutils - - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class PortBindingController(object): - - @abc.abstractmethod - def bind_port(self, port_context): - """Attempt to bind a port. - - :param context: PortContext instance describing the port - - This method is called outside any transaction to attempt to - establish a port binding using calling mechanism driver. Bindings - may be created at each of multiple levels of a hierarchical - network, and are established from the top level downward. At - each level, the mechanism driver determines whether it can - bind to any of the network segments in the - context.segments_to_bind property, based on the value of the - context.host property, any relevant port or network - attributes, and its own knowledge of the network topology. At - the top level, context.segments_to_bind contains the static - segments of the port's network. At each lower level of - binding, it contains static or dynamic segments supplied by - the driver that bound at the level above. If the driver is - able to complete the binding of the port to any segment in - context.segments_to_bind, it must call context.set_binding - with the binding details. If it can partially bind the port, - it must call context.continue_binding with the network - segments to be used to bind at the next lower level. - If the binding results are committed after bind_port returns, - they will be seen by all mechanism drivers as - update_port_precommit and update_port_postcommit calls. But if - some other thread or process concurrently binds or updates the - port, these binding results will not be committed, and - update_port_precommit and update_port_postcommit will not be - called on the mechanism drivers with these results. Because - binding results can be discarded rather than committed, - drivers should avoid making persistent state changes in - bind_port, or else must ensure that such state changes are - eventually cleaned up. - Implementing this method explicitly declares the mechanism - driver as having the intention to bind ports. This is inspected - by the QoS service to identify the available QoS rules you - can use with ports. - """ - - -class PortBindingManager(PortBindingController): - # At this point, there is no requirement to have multiple - # port binding controllers at the same time. - # Stay with single controller until there is a real requirement - - def __init__(self, name, controller): - self.name = name - self.controller = controller - - @classmethod - def create( - cls, namespace='networking_odl.ml2.port_binding_controllers', - name=None): - - name = name or cfg.CONF.ml2_odl.port_binding_controller - ext_mgr = stevedore.named.NamedExtensionManager( - namespace, [name], invoke_on_load=True) - - assert len(ext_mgr.extensions) == 1, ( - "Wrong port binding controller is specified") - - extension = ext_mgr.extensions[0] - if isinstance(extension.obj, PortBindingController): - return cls(extension.name, extension.obj) - else: - raise ValueError( - ("Port binding controller '%(name)s (%(controller)r)' " - "doesn't implement PortBindingController interface."), - {'name': extension.name, 'controller': extension.obj}) - - def bind_port(self, port_context): - controller_details = {'name': self.name, 'controller': self.controller} - try: - self.controller.bind_port(port_context) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception( - "Controller '%(name)s (%(controller)r)' had an error " - "when binding port.", controller_details) - else: - if port_context._new_bound_segment: - LOG.info( - "Controller '%(name)s (%(controller)r)' has bound port.", - controller_details) - else: - LOG.debug( - "Controller %(name)s (%(controller)r) hasn't bound " - "port.", controller_details) diff --git a/networking_odl/ml2/port_status_update.py b/networking_odl/ml2/port_status_update.py deleted file mode 100644 index 146e2bf25..000000000 --- a/networking_odl/ml2/port_status_update.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from neutron.callbacks import resources -from neutron.db import provisioning_blocks -from neutron_lib import constants as n_const -from neutron_lib import context -from neutron_lib.plugins import directory -from neutron_lib import worker -from oslo_log import log - -from networking_odl.common import client as odl_client -from networking_odl.common import odl_features -from networking_odl.common import utils -from networking_odl.common import websocket_client as odl_ws_client - -LOG = log.getLogger(__name__) - - -class OdlPortStatusUpdate(worker.BaseWorker): - """Class to register and handle port status update""" - PORT_PATH = "restconf/operational/neutron:neutron/ports/port" - - def __init__(self): - super(OdlPortStatusUpdate, self).__init__() - self.odl_websocket_client = None - - def start(self): - super(OdlPortStatusUpdate, self).start() - LOG.debug('OdlPortStatusUpdate worker running') - if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS): - self.run_websocket() - - def stop(self): - if self.odl_websocket_client: - self.odl_websocket_client.set_exit_flag() - - def wait(self): - """Wait for service to complete.""" - - @staticmethod - def reset(): - pass - - def run_websocket(self): - # OpenDaylight path to recieve websocket notifications on - neutron_ports_path = "/neutron:neutron/neutron:ports" - - self.path_uri = utils.get_odl_url() - - self.odl_websocket_client = ( - odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket( - self.path_uri, neutron_ports_path, - odl_ws_client.ODL_OPERATIONAL_DATASTORE, - odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE, - self._process_websocket_recv, - self._process_websocket_reconnect, - True - )) - - def _process_websocket_recv(self, payload, reconnect): - # Callback for websocket notification - LOG.debug("Websocket notification for port status update") - for event in odl_ws_client.EventDataParser.get_item(payload): - operation, path, data = event.get_fields() - if ((operation in [event.OPERATION_UPDATE, - event.OPERATION_CREATE])): - port_id = event.extract_field(path, "neutron:uuid") - port_id = str(port_id).strip("'") - status_field = data.get('status') - if status_field is not None: - status = status_field.get('content') - LOG.debug("Update port for port id %s %s", port_id, status) - # for now we only support transition from DOWN->ACTIVE - # https://bugs.launchpad.net/networking-odl/+bug/1686023 - if status == n_const.PORT_STATUS_ACTIVE: - provisioning_blocks.provisioning_complete( - context.get_admin_context(), - port_id, resources.PORT, - provisioning_blocks.L2_AGENT_ENTITY) - if operation == event.OPERATION_DELETE: - LOG.debug("PortStatus: Ignoring delete operation") - - def _process_websocket_reconnect(self, status): - if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED: - # Get port data using restconf - LOG.debug("Websocket notification on reconnection") - reconn_thread = threading.Thread( - name='websocket', target=self._pull_missed_statuses) - reconn_thread.start() - - def _pull_missed_statuses(self): - LOG.debug("starting to pull pending statuses...") - plugin = directory.get_plugin() - filter = {"status": [n_const.PORT_STATUS_DOWN], - "vif_type": ["unbound"]} - ports = plugin.get_ports(context.get_admin_context(), filter) - - if not ports: - LOG.debug("no down ports found, done") - return - - port_fetch_url = utils.get_odl_url(self.PORT_PATH) - client = odl_client.OpenDaylightRestClient.create_client( - url=port_fetch_url) - - for port in ports: - id = port["id"] - response = client.get(id) - if response.status_code != 200: - LOG.warning("Non-200 response code %s", str(response)) - continue - odl_status = response.json()['port'][0]['status'] - if odl_status == n_const.PORT_STATUS_ACTIVE: - # for now we only support transition from DOWN->ACTIVE - # See https://bugs.launchpad.net/networking-odl/+bug/1686023 - provisioning_blocks.provisioning_complete( - context.get_admin_context(), - id, resources.PORT, - provisioning_blocks.L2_AGENT_ENTITY) - LOG.debug("done pulling pending statuses") diff --git a/networking_odl/ml2/pseudo_agentdb_binding.py b/networking_odl/ml2/pseudo_agentdb_binding.py deleted file mode 100644 index 057375843..000000000 --- a/networking_odl/ml2/pseudo_agentdb_binding.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from string import Template - -from neutron.callbacks import resources -from neutron.db import provisioning_blocks -from neutron_lib.api.definitions import portbindings -from neutron_lib import constants as nl_const -from neutron_lib import context -from neutron_lib.plugins import directory -from neutron_lib.plugins.ml2 import api -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from requests import codes -from requests import exceptions -import six.moves.urllib.parse as urlparse - -from networking_odl.common import client as odl_client -from networking_odl.common import odl_features -from networking_odl.common import utils -from networking_odl.common import websocket_client as odl_ws_client -from networking_odl.journal import periodic_task -from networking_odl.ml2 import port_binding - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') -LOG = log.getLogger(__name__) - - -class PseudoAgentDBBindingController(port_binding.PortBindingController): - """Switch agnostic Port binding controller for OpenDayLight.""" - - AGENTDB_BINARY = 'neutron-odlagent-portbinding' - L2_TYPE = "ODL L2" - - # TODO(mzmalick): binary, topic and resource_versions to be provided - # by ODL, Pending ODL NB patches. - _AGENTDB_ROW = { - 'binary': AGENTDB_BINARY, - 'host': '', - 'topic': nl_const.L2_AGENT_TOPIC, - 'configurations': {}, - 'resource_versions': '', - 'agent_type': L2_TYPE, - 'start_flag': True} - - def __init__(self, hostconf_uri=None, db_plugin=None): - """Initialization.""" - LOG.debug("Initializing ODL Port Binding Controller") - - if not hostconf_uri: - # extract host/port from ODL URL and append hostconf_uri path - hostconf_uri = self._make_hostconf_uri( - cfg.CONF.ml2_odl.url, cfg.CONF.ml2_odl.odl_hostconf_uri) - - LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri) - - # TODO(mzmalick): disable port-binding for ODL lightweight testing - self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client( - url=hostconf_uri) - - # Neutron DB plugin instance - self.agents_db = db_plugin - self._known_agents = set() - - if cfg.CONF.ml2_odl.enable_websocket_pseudo_agentdb: - # Update hostconfig once for the configurations already present - self._get_and_update_hostconfigs() - odl_url = utils.get_odl_url() - self._start_websocket(odl_url) - else: - # Start polling ODL restconf using periodic task. - # default: 30s (should be <= agent keep-alive poll interval) - self._start_periodic_task(cfg.CONF.ml2_odl.restconf_poll_interval) - - def _make_hostconf_uri(self, odl_url=None, path=''): - """Make ODL hostconfigs URI with host/port extraced from ODL_URL.""" - # NOTE(yamahata): for unit test. - odl_url = odl_url or 'http://localhost:8080/' - - # extract ODL_IP and ODL_PORT from ODL_ENDPOINT and append path - # urlsplit and urlunparse don't throw exceptions - purl = urlparse.urlsplit(odl_url) - return urlparse.urlunparse((purl.scheme, purl.netloc, - path, '', '', '')) - - def _start_periodic_task(self, poll_interval): - self._periodic = periodic_task.PeriodicTask('hostconfig', - poll_interval) - self._periodic.register_operation(self._get_and_update_hostconfigs) - self._periodic.start() - - def _rest_get_hostconfigs(self): - try: - response = self.odl_rest_client.get() - response.raise_for_status() - hostconfigs = response.json()['hostconfigs']['hostconfig'] - except exceptions.ConnectionError: - LOG.error("Cannot connect to the OpenDaylight Controller", - exc_info=True) - return None - except exceptions.HTTPError as e: - # restconf returns 404 on operation when there is no entry - if e.response.status_code == codes.not_found: - LOG.debug("Response code not_found (404)" - " treated as an empty list") - return [] - - LOG.warning("REST/GET odl hostconfig failed, ", - exc_info=True) - return None - except KeyError: - LOG.error("got invalid hostconfigs", exc_info=True) - return None - except Exception: - LOG.warning("REST/GET odl hostconfig failed, ", - exc_info=True) - return None - else: - if LOG.isEnabledFor(logging.DEBUG): - _hconfig_str = jsonutils.dumps( - response, sort_keys=True, indent=4, separators=(',', ': ')) - LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str) - - return hostconfigs - - def _get_and_update_hostconfigs(self, session=None): - LOG.info("REST/GET hostconfigs from ODL") - - hostconfigs = self._rest_get_hostconfigs() - - if not hostconfigs: - LOG.warning("ODL hostconfigs REST/GET failed, " - "will retry on next poll") - return # retry on next poll - - self._update_agents_db(hostconfigs=hostconfigs) - - def _get_neutron_db_plugin(self): - if not self.agents_db: - self.agents_db = directory.get_plugin() - return self.agents_db - - def _update_agents_db(self, hostconfigs): - LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs") - - self._old_agents = self._known_agents - self._known_agents = set() - for host_config in hostconfigs: - self._update_agents_db_row(host_config) - - def _update_agents_db_row(self, host_config): - # Update one row in agent db - agents_db = self._get_neutron_db_plugin() - if not agents_db: # if ML2 is still initializing - LOG.error("ML2 still initializing, Missed an update") - # TODO(rsood): Neutron worker can be used - return - host_id = host_config['host-id'] - host_type = host_config['host-type'] - config = host_config['config'] - try: - agentdb_row = self._AGENTDB_ROW.copy() - agentdb_row['host'] = host_id - agentdb_row['agent_type'] = host_type - agentdb_row['configurations'] = jsonutils.loads(config) - if (host_id, host_type) in self._old_agents: - agentdb_row.pop('start_flag', None) - agents_db.create_or_update_agent( - context.get_admin_context(), agentdb_row) - self._known_agents.add((host_id, host_type)) - except Exception: - LOG.exception("Unable to update agentdb.") - - def _delete_agents_db_row(self, host_id, host_type): - """Delete agent row.""" - agents_db = self._get_neutron_db_plugin() - if not agents_db: # if ML2 is still initializing - LOG.error("ML2 still initializing, Missed an update") - return None - try: - filters = {'agent_type': [host_type], - 'host': [host_id]} - # TODO(rsood): get_agent can be used here - agent = agents_db.get_agents_db( - context.get_admin_context(), filters=filters) - if not agent: - return - - LOG.debug("Deleting Agent with Agent id: %s", agent[0]['id']) - agents_db.delete_agent(context.get_admin_context(), agent[0]['id']) - self._known_agents.remove((host_id, host_type)) - except Exception: - LOG.exception("Unable to delete from agentdb.") - - def _substitute_hconfig_tmpl(self, port_context, hconfig): - # TODO(mzmalick): Explore options for inlines string splicing of - # port-id to 14 bytes as required by vhostuser types - port_id = port_context.current['id'] - conf = hconfig.get('configurations') - vnics = conf.get('supported_vnic_types') - if vnics is None: - return hconfig - for vnic in vnics: - if vnic.get('vif_type') == portbindings.VIF_TYPE_VHOST_USER: - details = vnic.get('vif_details') - if details is None: - continue - port_prefix = details.get('port_prefix') - port_prefix = port_prefix[:14] - subs_ids = { - # $IDENTIFER string substitution in hostconfigs JSON string - 'PORT_ID': port_id[:(14 - len(port_prefix))], - } - # Substitute identifiers and Convert JSON string to dict - hconfig_conf_json = Template(jsonutils.dumps(details)) - substituted_str = hconfig_conf_json.safe_substitute(subs_ids) - vnic['vif_details'] = jsonutils.loads(substituted_str) - return hconfig - - def bind_port(self, port_context): - """bind port using ODL host configuration.""" - # Get all ODL hostconfigs for this host and type - agentdb = port_context.host_agents(self.L2_TYPE) - - if not agentdb: - LOG.warning("No valid hostconfigs in agentsdb for host %s", - port_context.host) - return - - for raw_hconfig in agentdb: - # do any $identifier substitution - hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig) - - # Found ODL hostconfig for this host in agentdb - LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig) - - if self._hconfig_bind_port(port_context, hconfig): - break # Port binding suceeded! - else: # Port binding failed! - LOG.warning( - "Failed to bind Port %(pid)s devid %(device_id)s " - "owner %(owner)s for host %(host)s " - "on network %(network)s.", { - 'pid': port_context.current['id'], - 'devce_id': port_context.current['device_id'], - 'owner': port_context.current['device_owner'], - 'host': port_context.host, - 'network': port_context.network.current['id']}) - else: # No hostconfig found for host in agentdb. - LOG.warning("No ODL hostconfigs for host %s found in agentdb", - port_context.host) - - def _hconfig_bind_port(self, port_context, hconfig): - """bind port after validating odl host configuration.""" - valid_segment = None - - for segment in port_context.segments_to_bind: - if self._is_valid_segment(segment, hconfig['configurations']): - valid_segment = segment - break - else: - LOG.debug("No valid segments found!") - return False - - confs = hconfig['configurations']['supported_vnic_types'] - - # nova provides vnic_type in port_context to neutron. - # neutron provides supported vif_type for binding based on vnic_type - # in this case ODL hostconfigs has the vif_type to bind for vnic_type - vnic_type = port_context.current.get(portbindings.VNIC_TYPE) - - if vnic_type != portbindings.VNIC_NORMAL: - LOG.error("Binding failed: unsupported VNIC %s", vnic_type) - return False - - vif_details = None - for conf in confs: - if conf["vnic_type"] == vnic_type: - vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS) - LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type) - vif_details = conf.get('vif_details', {}) - break - else: - vif_type = portbindings.VIF_TYPE_OVS # default: OVS - LOG.warning("No supported vif type found for host %s!, " - "defaulting to OVS", port_context.host) - - if not vif_details: # empty vif_details could be trouble, warn. - LOG.warning("hostconfig:vif_details was empty!") - - LOG.debug("Bind port %(port)s on network %(network)s with valid " - "segment %(segment)s and VIF type %(vif_type)r " - "VIF details %(vif_details)r.", - {'port': port_context.current['id'], - 'network': port_context.network.current['id'], - 'segment': valid_segment, 'vif_type': vif_type, - 'vif_details': vif_details}) - - port_status = self._prepare_initial_port_status(port_context) - port_context.set_binding(valid_segment[api.ID], vif_type, - vif_details, status=port_status) - - return True - - def _prepare_initial_port_status(self, port_context): - port_status = nl_const.PORT_STATUS_ACTIVE - if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS): - port_status = nl_const.PORT_STATUS_DOWN - provisioning_blocks.add_provisioning_component( - port_context._plugin_context, port_context.current['id'], - resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) - return port_status - - def _is_valid_segment(self, segment, conf): - """Verify a segment is supported by ODL.""" - network_type = segment[api.NETWORK_TYPE] - return network_type in conf['allowed_network_types'] - - def _start_websocket(self, odl_url): - # OpenDaylight path to recieve websocket notifications on - neutron_hostconfigs_path = """/neutron:neutron/neutron:hostconfigs""" - - self.odl_websocket_client = ( - odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket( - odl_url, neutron_hostconfigs_path, - odl_ws_client.ODL_OPERATIONAL_DATASTORE, - odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE, - self._process_websocket_recv, - self._process_websocket_reconnect - )) - if self.odl_websocket_client is None: - LOG.error("Error starting websocket thread") - - def _process_websocket_recv(self, payload, reconnect): - # Callback for websocket notification - LOG.debug("Websocket notification for hostconfig update") - for event in odl_ws_client.EventDataParser.get_item(payload): - try: - operation, path, data = event.get_fields() - if operation == event.OPERATION_DELETE: - host_id = event.extract_field(path, "neutron:host-id") - host_type = event.extract_field(path, "neutron:host-type") - if not host_id or not host_type: - LOG.warning("Invalid delete notification") - continue - self._delete_agents_db_row(host_id.strip("'"), - host_type.strip("'")) - elif operation == event.OPERATION_CREATE: - if 'hostconfig' in data: - hostconfig = data['hostconfig'] - self._old_agents = self._known_agents - self._update_agents_db_row(hostconfig) - except KeyError: - LOG.warning("Invalid JSON for websocket notification", - exc_info=True) - continue - - # TODO(rsood): Mixing restconf and websocket can cause race conditions - def _process_websocket_reconnect(self, status): - if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED: - # Get hostconfig data using restconf - LOG.debug("Websocket notification on reconnection") - self._get_and_update_hostconfigs() diff --git a/networking_odl/qos/__init__.py b/networking_odl/qos/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/qos/qos_driver_v2.py b/networking_odl/qos/qos_driver_v2.py deleted file mode 100644 index 0265946a1..000000000 --- a/networking_odl/qos/qos_driver_v2.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import constants as n_consts -from neutron.services.qos.drivers import base -from neutron.services.qos import qos_consts -from neutron_lib.api.definitions import portbindings -from neutron_lib.plugins import constants as nlib_const -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_odl.common import constants as odl_const -from networking_odl.journal import full_sync -from networking_odl.journal import journal -from networking_odl.qos import qos_utils - -LOG = logging.getLogger(__name__) - -# TODO(manjeets) fetch these from Neutron NB -# Only bandwidth limit is supported so far. -SUPPORTED_RULES = { - qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { - qos_consts.MAX_KBPS: { - 'type:range': [0, n_consts.DB_INTEGER_MAX_VALUE]}, - qos_consts.MAX_BURST: { - 'type:range': [0, n_consts.DB_INTEGER_MAX_VALUE]}, - qos_consts.DIRECTION: { - 'type:values': [n_consts.EGRESS_DIRECTION]} - }, -} -VIF_TYPES = [portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER] -VNIC_TYPES = [portbindings.VNIC_NORMAL] - -QOS_RESOURCES = { - odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES -} - - -class OpenDaylightQosDriver(base.DriverBase): - - """OpenDaylight QOS Driver - - This code is backend implementation for OpenDaylight Qos - driver for Openstack Neutron. - """ - - @staticmethod - def create(): - return OpenDaylightQosDriver() - - def __init__(self, name='OpenDaylightQosDriver', - vif_types=VIF_TYPES, - vnic_types=VNIC_TYPES, - supported_rules=SUPPORTED_RULES, - requires_rpc_notifications=False): - super(OpenDaylightQosDriver, self).__init__( - name, vif_types, vnic_types, supported_rules, - requires_rpc_notifications) - LOG.debug("Initializing OpenDaylight Qos driver") - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(nlib_const.QOS, QOS_RESOURCES) - - def _record_in_journal(self, context, op_const, qos_policy): - data = qos_utils.convert_rules_format(qos_policy.to_dict()) - journal.record(context, odl_const.ODL_QOS_POLICY, - data['id'], op_const, data) - - @log_helpers.log_method_call - def create_policy_precommit(self, context, qos_policy): - self._record_in_journal(context, odl_const.ODL_CREATE, qos_policy) - - @log_helpers.log_method_call - def update_policy_precommit(self, context, qos_policy): - self._record_in_journal(context, odl_const.ODL_UPDATE, qos_policy) - - @log_helpers.log_method_call - def delete_policy_precommit(self, context, qos_policy): - self._record_in_journal(context, odl_const.ODL_DELETE, qos_policy) - - @log_helpers.log_method_call - def create_policy(self, context, policy): - self.journal.set_sync_event() - - @log_helpers.log_method_call - def update_policy(self, context, policy): - self.journal.set_sync_event() - - @log_helpers.log_method_call - def delete_policy(self, context, policy): - self.journal.set_sync_event() diff --git a/networking_odl/qos/qos_utils.py b/networking_odl/qos/qos_utils.py deleted file mode 100644 index efb1aafc7..000000000 --- a/networking_odl/qos/qos_utils.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2016 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - - -def enforce_policy_format(policy): - if 'bandwidth_limit_rules' not in policy.keys(): - policy['bandwidth_limit_rules'] = [] - if 'dscp_marking_rules' not in policy.keys(): - policy['dscp_marking_rules'] = [] - return policy - - -# NOTE(manjeets) keeping common methods for formatting -# qos data in qos_utils for code reuse. -def convert_rules_format(data): - policy = copy.deepcopy(data) - policy.pop('tenant_id', None) - policy.pop('rules', None) - for rule in data.get('rules', []): - rule_type = rule['type'] + '_rules' - rule.pop('type', None) - rule.pop('qos_policy_id', None) - rule['tenant_id'] = data['tenant_id'] - policy[rule_type] = [rule] - return enforce_policy_format(policy) diff --git a/networking_odl/sfc/__init__.py b/networking_odl/sfc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/sfc/flowclassifier/__init__.py b/networking_odl/sfc/flowclassifier/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v1.py b/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v1.py deleted file mode 100644 index 931f6953b..000000000 --- a/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v1.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_sfc.services.flowclassifier.drivers import base as fc_driver - -from networking_odl.common import client as odl_client -from networking_odl.common import constants as odl_const - -LOG = logging.getLogger(__name__) - - -class OpenDaylightSFCFlowClassifierDriverV1( - fc_driver.FlowClassifierDriverBase): - - """OpenDaylight SFC Flow Classifier Driver for networking-sfc. - - This Driver pass through SFC Flow Classifier API calls to - OpenDaylight Neutron Northbound Project by using the REST - API's exposed by the project. - """ - - def initialize(self): - LOG.debug("Initializing OpenDaylight Networking " - "SFC Flow Classifier driver") - self.client = odl_client.OpenDaylightRestClient.create_client() - - @log_helpers.log_method_call - def create_flow_classifier(self, context): - self.client.send_request(odl_const.ODL_CREATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_FLOW_CLASSIFIER, - context.current) - - @log_helpers.log_method_call - def update_flow_classifier(self, context): - self.client.send_request(odl_const.ODL_UPDATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_FLOW_CLASSIFIER, - context.current) - - @log_helpers.log_method_call - def delete_flow_classifier(self, context): - self.client.send_request(odl_const.ODL_DELETE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_FLOW_CLASSIFIER, - context.current) - - @log_helpers.log_method_call - def create_flow_classifier_precommit(self, context): - LOG.info("Skipping precommit check.") diff --git a/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py b/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py deleted file mode 100644 index 649e653d8..000000000 --- a/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_sfc.extensions import flowclassifier as fc_const -from networking_sfc.services.flowclassifier.drivers import base as fc_driver - -from networking_odl.common import constants as odl_const -from networking_odl.common import postcommit -from networking_odl.journal import full_sync -from networking_odl.journal import journal - -LOG = logging.getLogger(__name__) - -SFC_FC_RESOURCES = { - odl_const.ODL_SFC_FLOW_CLASSIFIER: odl_const.ODL_SFC_FLOW_CLASSIFIERS, -} - - -@postcommit.add_postcommit('flow_classifier') -class OpenDaylightSFCFlowClassifierDriverV2( - fc_driver.FlowClassifierDriverBase): - - """OpenDaylight SFC Flow Classifier Driver (Version 2) for networking-sfc. - - This Driver pass through SFC Flow Classifier API calls to - OpenDaylight Neutron Northbound Project by using the REST - API's exposed by the project. - """ - - def initialize(self): - LOG.debug("Initializing OpenDaylight Networking " - "SFC Flow Classifier driver Version 2") - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(fc_const.FLOW_CLASSIFIER_EXT, SFC_FC_RESOURCES) - - @staticmethod - def _record_in_journal(context, object_type, operation, data=None): - if data is None: - data = context.current - journal.record(context._plugin_context, object_type, - context.current['id'], operation, data) - - @log_helpers.log_method_call - def create_flow_classifier_precommit(self, context): - OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( - context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def update_flow_classifier_precommit(self, context): - OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( - context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def delete_flow_classifier_precommit(self, context): - OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( - context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_DELETE, - data=[]) - - # Need to implement these methods, else driver loading fails with error - # complaining about no abstract method implementation present. - @log_helpers.log_method_call - def create_flow_classifier(self, context): - super(OpenDaylightSFCFlowClassifierDriverV2, - self).create_flow_classifier(context) - - @log_helpers.log_method_call - def update_flow_classifier(self, context): - super(OpenDaylightSFCFlowClassifierDriverV2, - self).update_flow_classifier(context) - - @log_helpers.log_method_call - def delete_flow_classifier(self, context): - super(OpenDaylightSFCFlowClassifierDriverV2, - self).delete_flow_classifier(context) diff --git a/networking_odl/sfc/sfc_driver_v1.py b/networking_odl/sfc/sfc_driver_v1.py deleted file mode 100644 index 4d9949191..000000000 --- a/networking_odl/sfc/sfc_driver_v1.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_sfc.services.sfc.drivers import base as sfc_driver - -from networking_odl.common import client as odl_client -from networking_odl.common import constants as odl_const - -LOG = logging.getLogger(__name__) - - -class OpenDaylightSFCDriverV1(sfc_driver.SfcDriverBase): - """OpenDaylight SFC Driver for networking-sfc. - - Driver sends REST request for Networking SFC Resources (Port Pair, - Port Pair Group & Port Chain) to OpenDaylight Neutron Northbound. - OpenDaylight Neutron Northbound has API's defined for these resources - based on the Networking SFC APIs. - """ - - def initialize(self): - LOG.debug("Initializing OpenDaylight Networking SFC driver") - self.client = odl_client.OpenDaylightRestClient.create_client() - - @log_helpers.log_method_call - def create_port_pair(self, context): - self.client.send_request(odl_const.ODL_CREATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR, - context.current) - - @log_helpers.log_method_call - def update_port_pair(self, context): - self.client.send_request(odl_const.ODL_UPDATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR, - context.current) - - @log_helpers.log_method_call - def delete_port_pair(self, context): - self.client.send_request(odl_const.ODL_DELETE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR, - context.current) - - @log_helpers.log_method_call - def create_port_pair_group(self, context): - self.client.send_request(odl_const.ODL_CREATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR_GROUP, - context.current) - - @log_helpers.log_method_call - def update_port_pair_group(self, context): - self.client.send_request(odl_const.ODL_UPDATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR_GROUP, - context.current) - - @log_helpers.log_method_call - def delete_port_pair_group(self, context): - self.client.send_request(odl_const.ODL_DELETE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_PAIR_GROUP, - context.current) - - @log_helpers.log_method_call - def create_port_chain(self, context): - self.client.send_request(odl_const.ODL_CREATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_CHAIN, - context.current) - - @log_helpers.log_method_call - def update_port_chain(self, context): - self.client.send_request(odl_const.ODL_UPDATE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_CHAIN, - context.current) - - @log_helpers.log_method_call - def delete_port_chain(self, context): - self.client.send_request(odl_const.ODL_DELETE, - odl_const.ODL_SFC, - odl_const.ODL_SFC_PORT_CHAIN, - context.current) diff --git a/networking_odl/sfc/sfc_driver_v2.py b/networking_odl/sfc/sfc_driver_v2.py deleted file mode 100644 index 024dc511a..000000000 --- a/networking_odl/sfc/sfc_driver_v2.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2017 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from networking_sfc.extensions import sfc as sfc_const -from networking_sfc.services.sfc.drivers import base as sfc_driver - -from networking_odl.common import constants as odl_const -from networking_odl.common import postcommit -from networking_odl.journal import full_sync -from networking_odl.journal import journal - -LOG = logging.getLogger(__name__) - -SFC_RESOURCES = { - odl_const.ODL_SFC_PORT_PAIR: odl_const.ODL_SFC_PORT_PAIRS, - odl_const.ODL_SFC_PORT_PAIR_GROUP: odl_const.ODL_SFC_PORT_PAIR_GROUPS, - odl_const.ODL_SFC_PORT_CHAIN: odl_const.ODL_SFC_PORT_CHAINS -} - - -@postcommit.add_postcommit('port_pair', 'port_pair_group', 'port_chain') -class OpenDaylightSFCDriverV2(sfc_driver.SfcDriverBase): - """OpenDaylight SFC Driver (Version 2) for networking-sfc. - - Driver sends REST request for Networking SFC Resources (Port Pair, - Port Pair Group & Port Chain) to OpenDaylight Neutron Northbound. - OpenDaylight Neutron Northbound has API's defined for these resources - based on the Networking SFC APIs. - """ - - def initialize(self): - LOG.debug("Initializing OpenDaylight Networking SFC driver(Version 2)") - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(sfc_const.SFC_EXT, SFC_RESOURCES) - - @staticmethod - def _record_in_journal(context, object_type, operation, data=None): - if data is None: - data = context.current - journal.record(context._plugin_context, object_type, - context.current['id'], operation, data) - - @log_helpers.log_method_call - def create_port_pair_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def create_port_pair_group_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def create_port_chain_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_CREATE) - - @log_helpers.log_method_call - def update_port_pair_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def update_port_pair_group_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def update_port_chain_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_UPDATE) - - @log_helpers.log_method_call - def delete_port_pair_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_DELETE, - data=[]) - - @log_helpers.log_method_call - def delete_port_pair_group_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_DELETE, - data=[]) - - @log_helpers.log_method_call - def delete_port_chain_precommit(self, context): - OpenDaylightSFCDriverV2._record_in_journal( - context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_DELETE, - data=[]) - - # Need to implement these methods, else driver loading fails with error - # complaining about no abstract method implementation present. - @log_helpers.log_method_call - def create_port_pair(self, context): - super(OpenDaylightSFCDriverV2, self).create_port_pair(context) - - @log_helpers.log_method_call - def create_port_pair_group(self, context): - super(OpenDaylightSFCDriverV2, self).create_port_pair_group(context) - - @log_helpers.log_method_call - def create_port_chain(self, context): - super(OpenDaylightSFCDriverV2, self).create_port_chain(context) - - @log_helpers.log_method_call - def update_port_pair(self, context): - super(OpenDaylightSFCDriverV2, self).update_port_pair(context) - - @log_helpers.log_method_call - def update_port_pair_group(self, context): - super(OpenDaylightSFCDriverV2, self).update_port_pair_group(context) - - @log_helpers.log_method_call - def update_port_chain(self, context): - super(OpenDaylightSFCDriverV2, self).update_port_chain(context) - - @log_helpers.log_method_call - def delete_port_pair(self, context): - super(OpenDaylightSFCDriverV2, self).delete_port_pair(context) - - @log_helpers.log_method_call - def delete_port_pair_group(self, context): - super(OpenDaylightSFCDriverV2, self).delete_port_pair_group(context) - - @log_helpers.log_method_call - def delete_port_chain(self, context): - super(OpenDaylightSFCDriverV2, self).delete_port_chain(context) diff --git a/networking_odl/tests/__init__.py b/networking_odl/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/base.py b/networking_odl/tests/base.py deleted file mode 100644 index 8eb9547cc..000000000 --- a/networking_odl/tests/base.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2015-2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import fixtures -import mock - -from oslo_config import cfg - -from networking_odl.common import odl_features -from networking_odl.journal import periodic_task -from neutron.tests import base - -from networking_odl.journal import journal - - -class DietTestCase(base.DietTestCase): - - def patch(self, target, name, *args, **kwargs): - context = mock.patch.object(target, name, *args, **kwargs) - patch = context.start() - self.addCleanup(context.stop) - return patch - - -class OpenDaylightRestClientFixture(fixtures.Fixture): - # Set URL/user/pass so init doesn't throw a cfg required error. - # They are not used in these tests since requests.request is overwritten. - def _setUp(self): - super(OpenDaylightRestClientFixture, self)._setUp() - mock.patch('requests.sessions.Session.request').start() - cfg.CONF.set_override('url', - 'http://localhost:8080' - '/controller/nb/v2/neutron', 'ml2_odl') - cfg.CONF.set_override('username', 'someuser', 'ml2_odl') - cfg.CONF.set_override('password', 'somepass', 'ml2_odl') - cfg.CONF.set_override('port_binding_controller', - 'legacy-port-binding', 'ml2_odl') - - -class OpenDaylightRestClientGlobalFixture(fixtures.Fixture): - def __init__(self, global_client): - super(OpenDaylightRestClientGlobalFixture, self).__init__() - self._global_client = global_client - - def _setUp(self): - super(OpenDaylightRestClientGlobalFixture, self)._setUp() - mock.patch.object(self._global_client, 'get_client').start() - - -class OpenDaylightFeaturesFixture(fixtures.Fixture): - def _setUp(self): - super(OpenDaylightFeaturesFixture, self)._setUp() - if cfg.CONF.ml2_odl.url is None: - cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl') - if cfg.CONF.ml2_odl.username is None: - cfg.CONF.set_override('username', 'someuser', 'ml2_odl') - if cfg.CONF.ml2_odl.password is None: - cfg.CONF.set_override('password', 'somepass', 'ml2_odl') - # make sure init is not called, it'll block the main thread - self.mock_odl_features_init = mock.patch.object( - odl_features, 'init', side_effect=self.fake_init) - self.mock_odl_features_init.start() - self.addCleanup(odl_features.deinit) - - @staticmethod - def fake_init(): - odl_features.feature_set = set() - - -class OpenDaylightJournalThreadFixture(fixtures.Fixture): - def _setUp(self): - super(OpenDaylightJournalThreadFixture, self)._setUp() - mock.patch.object(journal.OpenDaylightJournalThread, - 'start_odl_sync_thread').start() - - -class OpenDaylightPeriodicTaskFixture(fixtures.Fixture): - def _setUp(self): - super(OpenDaylightPeriodicTaskFixture, self)._setUp() - mock.patch.object(periodic_task.PeriodicTask, 'start').start() diff --git a/networking_odl/tests/contrib/gate_hook.sh b/networking_odl/tests/contrib/gate_hook.sh deleted file mode 100644 index e576d9114..000000000 --- a/networking_odl/tests/contrib/gate_hook.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -VENV=${1:-"dsvm-functional"} - -GATE_DEST=$BASE/new -DEVSTACK_PATH=$GATE_DEST/devstack -NETWORKING_ODL_DIR="${NETWORKING_ODL_DIR:-$BASE/new/networking-odl}" - -case $VENV in -"dsvm-functional" | "dsvm-fullstack") - # The following need to be set before sourcing - # configure_for_func_testing. - PROJECT_NAME=networking-odl - IS_GATE=True - - source $NETWORKING_ODL_DIR/tools/configure_for_func_testing.sh - configure_host_for_func_testing - ;; -*) - echo "Unrecognized environment $VENV". - exit 1 -esac diff --git a/networking_odl/tests/contrib/post_test_hook.sh b/networking_odl/tests/contrib/post_test_hook.sh deleted file mode 100644 index d17ed0aca..000000000 --- a/networking_odl/tests/contrib/post_test_hook.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -set -xe - -NETWORKING_ODL_DIR="${NETWORKING_ODL_DIR:-$BASE/new/networking-odl}" -SCRIPTS_DIR="/usr/os-testr-env/bin/" -GATE_STACK_USER=stack - -venv=${1:-"dsvm-functional"} - -function generate_testr_results { - # Give job user rights to access tox logs - sudo -H -u $owner chmod o+rw . - sudo -H -u $owner chmod o+rw -R .testrepository - if [ -f ".testrepository/0" ] ; then - # Some tests have python-subunit installed globally - # and in gate we specified sitepackages=True - if [ -x .tox/$venv/bin/subunit-1to2 ]; then - SUBUNIT1TO2=.tox/$venv/bin/subunit-1to2 - else - # Use system subunit-1to2 - SUBUNIT1TO2=subunit-1to2 - fi - $SUBUNIT1TO2 < .testrepository/0 > ./testrepository.subunit - $SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html - gzip -9 ./testrepository.subunit - gzip -9 ./testr_results.html - sudo mv ./*.gz /opt/stack/logs/ - fi -} - -case $venv in - dsvm-functional*|dsvm-fullstack) - owner=$GATE_STACK_USER - sudo_env= - - # Set owner permissions according to job's requirements. - sudo chown -R $owner:stack $BASE/new - cd $NETWORKING_ODL_DIR - - # Run tests - echo "Running networking-odl $venv test suite" - set +e - sudo -H -u $owner $sudo_env tox -e $venv - testr_exit_code=$? - # stop ODL server for complete log - $BASE/new/opendaylight/distribution-karaf-*/bin/stop - sleep 3 - set -e - - # Collect and parse results - generate_testr_results - exit $testr_exit_code - ;; - *) - echo "Unrecognized test suite $venv". - exit 1 -esac diff --git a/networking_odl/tests/fullstack/__init__.py b/networking_odl/tests/fullstack/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/fullstack/base.py b/networking_odl/tests/fullstack/base.py deleted file mode 100644 index 7787bb009..000000000 --- a/networking_odl/tests/fullstack/base.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.plugins.ml2 import config -from neutron.plugins.ml2.drivers import type_vxlan # noqa -from neutron.tests.unit.plugins.ml2 import test_plugin - -from networking_odl.common import config as odl_config - - -class TestODLFullStackBase(test_plugin.Ml2PluginV2TestCase): - - _mechanism_drivers = ['logger', 'opendaylight'] - _extension_drivers = ['port_security'] - - def setUp(self): - config.cfg.CONF.set_override('extension_drivers', - self._extension_drivers, - group='ml2') - config.cfg.CONF.set_override('tenant_network_types', - ['vxlan'], - group='ml2') - config.cfg.CONF.set_override('vni_ranges', - ['1:1000'], - group='ml2_type_vxlan') - - odl_url = 'http://127.0.0.1:8087/controller/nb/v2/neutron' - odl_config.cfg.CONF.set_override('url', - odl_url, - group='ml2_odl') - odl_config.cfg.CONF.set_override('username', - 'admin', - group='ml2_odl') - odl_config.cfg.CONF.set_override('password', - 'admin', - group='ml2_odl') - odl_config.cfg.CONF.set_override('port_binding_controller', - 'legacy-port-binding', - group='ml2_odl') - - super(TestODLFullStackBase, self).setUp() diff --git a/networking_odl/tests/fullstack/requirements.txt b/networking_odl/tests/fullstack/requirements.txt deleted file mode 100644 index ccaa3895b..000000000 --- a/networking_odl/tests/fullstack/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -tempest>=12.1.0 # Apache-2.0 - diff --git a/networking_odl/tests/fullstack/test_mech_driver.py b/networking_odl/tests/fullstack/test_mech_driver.py deleted file mode 100644 index 14165dc8d..000000000 --- a/networking_odl/tests/fullstack/test_mech_driver.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from oslo_utils import uuidutils -from tempest.lib.common.utils import test_utils - -from neutron.agent.common import utils -from neutron.tests.common import net_helpers - -from networking_odl.tests.fullstack import base - - -class TestMechDriver(base.TestODLFullStackBase): - - def setUp(self): - super(TestMechDriver, self).setUp() - - def _get_ovs_system_id(self): - # Example: - # ('{system-id="f1487b2f-b103-4ce0-b4ab-1a94258405dd"}\n', '') - system_id = utils.execute(['ovs-vsctl', 'get', 'Open_Vswitch', - '.', 'external_ids'], - run_as_root=True) - return re.findall('".*"', system_id)[0] - - def _check_device_existence(self, tap): - def _callback(): - ports = utils.execute( - ['ovs-vsctl', 'list-ports', 'br-int'], - run_as_root=True) - - return bool(re.search(tap, ports)) - return test_utils.call_until_true(_callback, 30, 2) - - def _create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id): - return utils.execute(['ovs-vsctl', 'add-port', bridge, dev, - '--', 'set', 'Interface', dev, - 'external-ids:iface-id=%s' % iface_id, - 'external-ids:iface-status=active', - 'external-ids:attached-mac=%s' % mac, - 'external-ids:vm-uuid=%s' % instance_id, - 'type=tap'], run_as_root=True) - - def test_port_plugging(self): - # Step1: create test network - resp = self._create_network(self.fmt, "test_fullstack_net", True) - resp = self.deserialize(self.fmt, resp) - net_id = resp['network']['id'] - - # Step2: create port and binding to host - host_id = self._get_ovs_system_id() - resp = self._create_port(self.fmt, net_id) - resp = self.deserialize(self.fmt, resp) - port_id = resp['port']['id'] - mac = resp['port']['mac_address'] - tap = 'tap' + port_id[:net_helpers.OVSPortFixture.NIC_NAME_LEN - 3] - data = {'port': {'binding:host_id': host_id}} - req = self.new_update_request('ports', data, port_id) - resp = self.deserialize(self.fmt, req.get_response(self.api)) - vif_type = resp['port']['binding:vif_type'] - self.assertEqual('ovs', vif_type) - self.assertFalse(self._check_device_existence(tap)) - - # Step3: plug vif - self._create_ovs_vif_port('br-int', tap, port_id, mac, - uuidutils.generate_uuid()) - - # TODO(manjeets) Add a test case to verify mac - # in flows - # Step4: verify device - self.assertTrue(self._check_device_existence(tap)) diff --git a/networking_odl/tests/functional/__init__.py b/networking_odl/tests/functional/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/functional/base.py b/networking_odl/tests/functional/base.py deleted file mode 100644 index be529f2f8..000000000 --- a/networking_odl/tests/functional/base.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import functools - -import mock -from neutron.common import utils -from neutron.plugins.ml2 import config -from neutron.tests.unit.plugins.ml2 import test_plugin - -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.common import utils as odl_utils -from networking_odl.db import db -from networking_odl.journal import journal -from networking_odl.tests.unit import test_base_db - - -class OdlTestsBase(object): - def setUp(self): - config.cfg.CONF.set_override( - 'url', 'http://127.0.0.1:8181/controller/nb/v2/neutron', 'ml2_odl') - config.cfg.CONF.set_override('username', 'admin', 'ml2_odl') - config.cfg.CONF.set_override('password', 'admin', 'ml2_odl') - config.cfg.CONF.set_override('mechanism_drivers', - self._mechanism_drivers, - group='ml2') - config.cfg.CONF.set_override('extension_drivers', - ['qos', 'port_security'], - group='ml2') - self.client = client.OpenDaylightRestClient.create_client() - super(OdlTestsBase, self).setUp() - - def setup_parent(self): - """Perform parent setup with the common plugin configuration class.""" - # Ensure that the parent setup can be called without arguments - # by the common configuration setUp. - service_plugins = {'l3_plugin_name': self.l3_plugin} - service_plugins.update(self.get_additional_service_plugins()) - parent_setup = functools.partial( - super(test_plugin.Ml2PluginV2TestCase, self).setUp, - plugin=self.get_plugins(), - ext_mgr=self.get_ext_managers(), - service_plugins=service_plugins - ) - self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) - - def get_plugins(self): - return test_plugin.PLUGIN_NAME - - def get_ext_managers(self): - return None - - def get_odl_resource(self, resource_type, resource): - return self.client.get_resource( - resource_type, resource[resource_type]['id']) - - def assert_resource_created(self, resource_type, resource): - odl_resource = self.get_odl_resource(resource_type, resource) - self.assertIsNotNone(odl_resource) - - def resource_update_test(self, resource_type, resource): - update_field = 'name' - update_value = 'bubu' - resource = self.get_odl_resource(resource_type, resource) - self.assertNotEqual(update_value, - resource[resource_type][update_field]) - - self._update(odl_utils.make_url_object(resource_type), - resource[resource_type]['id'], - {resource_type: {update_field: update_value}}) - resource = self.get_odl_resource(resource_type, resource) - self.assertEqual(update_value, resource[resource_type][update_field]) - - def resource_delete_test(self, resource_type, resource): - self._delete(odl_utils.make_url_object(resource_type), - resource[resource_type]['id']) - self.assertIsNone(self.get_odl_resource(resource_type, resource)) - - -class V2DriverAdjustment(test_base_db.ODLBaseDbTestCase): - def setUp(self): - # NOTE(yamahata): for functional test, timer isn't needed. - # and if tearDown/setUp() is called during journal thread is running - # by timer, journal thread may be upset by sudden db transaction. - mock.patch.object(journal.OpenDaylightJournalThread, - '_start_sync_timer') - super(V2DriverAdjustment, self).setUp() - - def get_odl_resource(self, resource_type, resource): - def no_journal_rows(): - pending_rows = db.get_all_db_rows_by_state( - self.db_session, odl_const.PENDING) - processing_rows = db.get_all_db_rows_by_state( - self.db_session, odl_const.PROCESSING) - return len(pending_rows) == 0 and len(processing_rows) == 0 - - utils.wait_until_true(no_journal_rows, 5, 0.5) - - return super(V2DriverAdjustment, self).get_odl_resource( - resource_type, resource) diff --git a/networking_odl/tests/functional/db/__init__.py b/networking_odl/tests/functional/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/functional/db/test_migrations.py b/networking_odl/tests/functional/db/test_migrations.py deleted file mode 100644 index a779a6a57..000000000 --- a/networking_odl/tests/functional/db/test_migrations.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2016 Intel Corporation. -# Copyright 2016 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from sqlalchemy import sql -from sqlalchemy.sql import schema - -from neutron.db.migration.alembic_migrations import external -from neutron.db.migration import cli as migration -from neutron.tests.functional.db import test_migrations -from neutron.tests.unit import testlib_api - -from networking_odl.db import head - -FWAAS_TABLES = [ - 'cisco_firewall_associations', - 'firewall_group_port_associations_v2', - 'firewall_groups_v2', - 'firewall_policies_v2', - 'firewall_policy_rule_associations_v2', - 'firewall_router_associations', - 'firewall_rules_v2', -] - -L2GW_TABLES = [ - 'l2gatewayconnections', - 'l2gatewaydevices', - 'l2gatewayinterfaces', - 'l2gateways', - 'l2gw_alembic_version', - 'logical_switches', - 'pending_ucast_macs_remotes', - 'physical_locators', - 'physical_ports', - 'physical_switches', - 'ucast_macs_locals', - 'ucast_macs_remotes', - 'vlan_bindings', -] - -BGPVPN_TABLES = [ - 'bgpvpns', - 'bgpvpn_network_associations', - 'bgpvpn_router_associations', -] - -# EXTERNAL_TABLES should contain all names of tables that are not related to -# current repo. -EXTERNAL_TABLES = set(external.TABLES + FWAAS_TABLES + - L2GW_TABLES + BGPVPN_TABLES) - -VERSION_TABLE = 'odl_alembic_version' - - -class _TestModelsMigrationsODL(test_migrations._TestModelsMigrations): - def db_sync(self, engine): - cfg.CONF.set_override('connection', engine.url, group='database') - for conf in migration.get_alembic_configs(): - self.alembic_config = conf - self.alembic_config.neutron_config = cfg.CONF - migration.do_alembic_command(conf, 'upgrade', 'heads') - - def get_metadata(self): - return head.get_metadata() - - def include_object(self, object_, name, type_, reflected, compare_to): - if type_ == 'table' and (name.startswith('alembic') or - name == VERSION_TABLE or - name in EXTERNAL_TABLES): - return False - if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): - return False - return True - - def _filter_mysql_server_func_now(self, diff_elem): - # TODO(yamahata): remove this bug work around once it's fixed - # example: - # when the column has server_default=sa.func.now(), the diff - # includes the followings diff - # [ ('modify_default', - # None, - # 'opendaylightjournal', - # 'created_at', - # {'existing_nullable': True, - # 'existing_type': DATETIME()}, - # DefaultClause(, for_update=False), - # DefaultClause(, - # for_update=False))] - # another example - # [ ('modify_default', - # None, - # 'opendaylightjournal', - # 'created_at', - # {'existing_nullable': True, - # 'existing_type': DATETIME()}, - # None, - # DefaultClause(, - # for_update=False))] - - meta_def = diff_elem[0][5] - rendered_meta_def = diff_elem[0][6] - if (diff_elem[0][0] == 'modify_default' and - diff_elem[0][2] in ('opendaylightjournal', - 'opendaylight_periodic_task') and - isinstance(meta_def, schema.DefaultClause) and - isinstance(meta_def.arg, sql.elements.TextClause) and - meta_def.reflected and - meta_def.arg.text == u'CURRENT_TIMESTAMP' and - isinstance(rendered_meta_def, schema.DefaultClause) and - isinstance(rendered_meta_def.arg, sql.functions.now) and - not rendered_meta_def.reflected and - meta_def.for_update == rendered_meta_def.for_update): - return False - if (diff_elem[0][0] == 'modify_default' and - diff_elem[0][2] == 'opendaylightjournal' and - meta_def is None and - isinstance(rendered_meta_def, schema.DefaultClause) and - isinstance(rendered_meta_def.arg, sql.functions.now) and - not rendered_meta_def.reflected): - return False - return True - - def filter_metadata_diff(self, diff): - return filter(self._filter_mysql_server_func_now, diff) - - -class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, - _TestModelsMigrationsODL, - testlib_api.SqlTestCaseLight): - pass - - -class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin, - _TestModelsMigrationsODL, - testlib_api.SqlTestCaseLight): - pass diff --git a/networking_odl/tests/functional/requirements.txt b/networking_odl/tests/functional/requirements.txt deleted file mode 100644 index 2f9131d0b..000000000 --- a/networking_odl/tests/functional/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Additional requirements for functional tests - -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -psutil>=1.1.1,<2.0.0 -psycopg2 -PyMySQL>=0.6.2 # MIT License diff --git a/networking_odl/tests/functional/test_bgpvpn.py b/networking_odl/tests/functional/test_bgpvpn.py deleted file mode 100644 index 447c510b6..000000000 --- a/networking_odl/tests/functional/test_bgpvpn.py +++ /dev/null @@ -1,173 +0,0 @@ -# -# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import webob.exc - -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron.tests.unit import testlib_api - -# BGPVPN Table metadata should be imported before -# sqlalchemy metadata.create_all call else tables -# will not be created. -from networking_bgpvpn.neutron.db import bgpvpn_db # noqa -from networking_bgpvpn.tests.unit.services import test_plugin as bgpvpn_plugin - -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base - - -class _TestBGPVPNBase(base.OdlTestsBase): - rds = ['100:1'] - - def setUp(self, plugin=None, service_plugins=None, - ext_mgr=None): - provider = { - 'service_type': 'BGPVPN', - 'name': 'OpenDaylight', - 'driver': 'networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver', - 'default': True - } - self.service_providers.return_value = [provider] - self.plugin_arg = plugin - self.service_plugin_arg = service_plugins - self.ext_mgr_arg = ext_mgr - super(_TestBGPVPNBase, self).setUp() - - def get_ext_managers(self): - return self.ext_mgr_arg - - def get_plugins(self): - return self.plugin_arg - - def get_additional_service_plugins(self): - return self.service_plugin_arg - - def _assert_networks_associated(self, net_ids, bgpvpn): - response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) - self.assertItemsEqual(net_ids, - response[odl_const.ODL_BGPVPN]['networks']) - - def _assert_routers_associated(self, router_ids, bgpvpn): - response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) - self.assertItemsEqual(router_ids, - response[odl_const.ODL_BGPVPN]['routers']) - - def test_bgpvpn_create(self): - with self.bgpvpn() as bgpvpn: - self.assert_resource_created(odl_const.ODL_BGPVPN, bgpvpn) - - def test_bgpvpn_create_with_rds(self): - with self.bgpvpn(route_distinguishers=self.rds) as bgpvpn: - response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) - self.assertItemsEqual(self.rds, - response[odl_const.ODL_BGPVPN] - ['route_distinguishers']) - - def test_bgpvpn_delete(self): - with self.bgpvpn(do_delete=False) as bgpvpn: - self._delete('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id']) - self.assertIsNone( - self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)) - - def test_associate_dissociate_net(self): - with (self.network()) as net1, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - net_id = net1['network']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_net(id, net_id): - self._assert_networks_associated([net_id], bgpvpn) - self._assert_networks_associated([], bgpvpn) - - def test_associate_multiple_networks(self): - with (self.network()) as net1, (self.network()) as net2, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - net_id1 = net1['network']['id'] - net_id2 = net2['network']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_net(id, net_id1), self.assoc_net(id, net_id2): - self._assert_networks_associated([net_id1, net_id2], bgpvpn) - - def test_assoc_multiple_networks_dissoc_one(self): - with (self.network()) as net1, (self.network()) as net2, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - net_id1 = net1['network']['id'] - net_id2 = net2['network']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_net(id, net_id1): - with self.assoc_net(id, net_id2): - self._assert_networks_associated([net_id1, net_id2], - bgpvpn) - self._assert_networks_associated([net_id1], bgpvpn) - - def test_associate_dissociate_router(self): - with (self.router(tenant_id=self._tenant_id)) as router, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - router_id = router['router']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_router(id, router_id): - self._assert_routers_associated([router_id], bgpvpn) - self._assert_routers_associated([], bgpvpn) - - def test_associate_multiple_routers(self): - with (self.router(tenant_id=self._tenant_id, name='r1')) as r1, ( - self.router(tenant_id=self._tenant_id, name='r2')) as r2, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - router_id1 = r1['router']['id'] - router_id2 = r2['router']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_router(id, router_id1): - self._assert_routers_associated([router_id1], bgpvpn) - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - with self.assoc_router(id, router_id2): - pass - self.assertEqual(webob.exc.HTTPBadRequest.code, - ctx_manager.exception.code) - self._assert_routers_associated([router_id1], bgpvpn) - - def test_assoc_router_multiple_bgpvpns(self): - with (self.router(tenant_id=self._tenant_id, name='r1')) as router, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn1, ( - self.bgpvpn()) as bgpvpn2: - router_id = router['router']['id'] - bgpvpn_id_1 = bgpvpn1['bgpvpn']['id'] - bgpvpn_id_2 = bgpvpn2['bgpvpn']['id'] - with (self.assoc_router(bgpvpn_id_1, router_id)), ( - self.assoc_router(bgpvpn_id_2, router_id)): - self._assert_routers_associated([router_id], bgpvpn1) - self._assert_routers_associated([router_id], bgpvpn2) - - def test_associate_router_network(self): - with (self.router(tenant_id=self._tenant_id)) as router, ( - self.network()) as net1, ( - self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: - router_id = router['router']['id'] - net_id = net1['network']['id'] - id = bgpvpn['bgpvpn']['id'] - with self.assoc_router(id, router_id), self.assoc_net(id, net_id): - response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) - self.assertItemsEqual([router_id], - response[odl_const.ODL_BGPVPN] - ['routers']) - self.assertItemsEqual([net_id], - response[odl_const.ODL_BGPVPN] - ['networks']) - - -class TestBGPVPNV2Driver(base.V2DriverAdjustment, - bgpvpn_plugin.BgpvpnTestCaseMixin, - _TestBGPVPNBase, test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] diff --git a/networking_odl/tests/functional/test_l2gateway.py b/networking_odl/tests/functional/test_l2gateway.py deleted file mode 100644 index 615384f4a..000000000 --- a/networking_odl/tests/functional/test_l2gateway.py +++ /dev/null @@ -1,179 +0,0 @@ -# -# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import contextlib -import copy - -import mock -import webob.exc - -from neutron.api import extensions as api_extensions -from neutron.db import servicetype_db as sdb -from neutron.tests.unit.plugins.ml2 import test_plugin -from oslo_utils import uuidutils - -from networking_l2gw import extensions as l2gw_extensions -from networking_l2gw.services.l2gateway.common import constants as l2gw_const -from networking_l2gw.services.l2gateway.plugin import L2GatewayPlugin -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base - -_uuid = uuidutils.generate_uuid - - -class L2GatewayTestCaseMixin(object): - - devices = [{'device_name': 's1', - 'interfaces': [{'name': 'int1'}] - }, - {'device_name': 's2', - 'interfaces': [{'name': 'int2', 'segmentation_id': [10, 20]}] - }] - l2gw_data = {l2gw_const.GATEWAY_RESOURCE_NAME: {'tenant_id': _uuid(), - 'name': 'l2gw', - 'devices': devices}} - - def setUp(self): - """Perform parent setup with the common plugin configuration class.""" - # Ensure that the parent setup can be called without arguments - # by the common configuration setUp. - bits = self.service_provider.split(':') - provider = { - 'service_type': bits[0], - 'name': bits[1], - 'driver': bits[2], - 'default': True - } - - # override the default service provider - self.service_providers = ( - mock.patch.object(sdb.ServiceTypeManager, - 'get_service_providers').start()) - self.service_providers.return_value = [provider] - super(L2GatewayTestCaseMixin, self).setUp() - - @contextlib.contextmanager - def l2gateway(self, do_delete=True, **kwargs): - req_data = copy.deepcopy(self.l2gw_data) - - fmt = 'json' - if kwargs.get('data'): - req_data = kwargs.get('data') - else: - req_data[l2gw_const.GATEWAY_RESOURCE_NAME].update(kwargs) - l2gw_req = self.new_create_request(l2gw_const.L2_GATEWAYS, - req_data, fmt=fmt) - res = l2gw_req.get_response(self.ext_api) - if res.status_int >= 400: - raise webob.exc.HTTPClientError(code=res.status_int) - l2gw = self.deserialize('json', res) - yield l2gw - if do_delete: - self._delete(l2gw_const.L2_GATEWAYS, - l2gw[l2gw_const.GATEWAY_RESOURCE_NAME]['id']) - - @contextlib.contextmanager - def l2gateway_connection(self, nw_id, l2gw_id, - do_delete=True, **kwargs): - req_data = { - l2gw_const.CONNECTION_RESOURCE_NAME: - {'tenant_id': _uuid(), - 'network_id': nw_id, - 'l2_gateway_id': l2gw_id} - } - - fmt = 'json' - if kwargs.get('data'): - req_data = kwargs.get('data') - else: - req_data[l2gw_const.CONNECTION_RESOURCE_NAME].update(kwargs) - l2gw_connection_req = self.new_create_request( - l2gw_const.L2_GATEWAYS_CONNECTION, req_data, fmt=fmt) - res = l2gw_connection_req.get_response(self.ext_api) - if res.status_int >= 400: - raise webob.exc.HTTPClientError(code=res.status_int) - l2gw_connection = self.deserialize('json', res) - yield l2gw_connection - if do_delete: - self._delete(l2gw_const.L2_GATEWAYS_CONNECTION, - l2gw_connection - [l2gw_const.CONNECTION_RESOURCE_NAME]['id']) - - @staticmethod - def convert_to_odl_l2gw_connection(l2gw_connection_in): - odl_l2_gw_conn_data = copy.deepcopy( - l2gw_connection_in[l2gw_const.CONNECTION_RESOURCE_NAME]) - odl_l2_gw_conn_data['gateway_id'] = ( - odl_l2_gw_conn_data['l2_gateway_id']) - odl_l2_gw_conn_data.pop('l2_gateway_id') - return {odl_const.ODL_L2GATEWAY_CONNECTION: odl_l2_gw_conn_data} - - -class _TestL2GatewayBase(base.OdlTestsBase, L2GatewayTestCaseMixin): - - def get_ext_managers(self): - extensions_path = ':'.join(l2gw_extensions.__path__) - return api_extensions.PluginAwareExtensionManager( - extensions_path, - {'l2gw_plugin': L2GatewayPlugin()}) - - def get_additional_service_plugins(self): - l2gw_plugin_str = ('networking_l2gw.services.l2gateway.plugin.' - 'L2GatewayPlugin') - service_plugin = {'l2gw_plugin': l2gw_plugin_str} - return service_plugin - - def test_l2gateway_create(self): - with self.l2gateway(name='mygateway') as l2gateway: - self.assert_resource_created(odl_const.ODL_L2GATEWAY, l2gateway) - - def test_l2gateway_update(self): - with self.l2gateway(name='gateway1') as l2gateway: - self.resource_update_test(odl_const.ODL_L2GATEWAY, l2gateway) - - def test_l2gateway_delete(self): - with self.l2gateway(do_delete=False) as l2gateway: - self.resource_delete_test(odl_const.ODL_L2GATEWAY, l2gateway) - - def test_l2gateway_connection_create_delete(self): - odl_l2gw_connection = {} - with self.network() as network: - with self.l2gateway() as l2gateway: - net_id = network['network']['id'] - l2gw_id = l2gateway[odl_const.ODL_L2GATEWAY]['id'] - with (self.l2gateway_connection(net_id, l2gw_id) - ) as l2gw_connection: - odl_l2gw_connection = ( - self.convert_to_odl_l2gw_connection(l2gw_connection)) - self.assert_resource_created( - odl_const.ODL_L2GATEWAY_CONNECTION, - odl_l2gw_connection) - self.assertIsNone(self.get_odl_resource( - odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2gw_connection)) - - -class TestL2gatewayV1Driver(_TestL2GatewayBase, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - service_provider = ('L2GW:OpenDaylight:networking_odl.l2gateway.driver.' - 'OpenDaylightL2gwDriver:default') - - -class TestL2gatewayV2Driver(base.V2DriverAdjustment, _TestL2GatewayBase, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] - service_provider = ('L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.' - 'OpenDaylightL2gwDriver:default') diff --git a/networking_odl/tests/functional/test_l3.py b/networking_odl/tests/functional/test_l3.py deleted file mode 100644 index f404bcd83..000000000 --- a/networking_odl/tests/functional/test_l3.py +++ /dev/null @@ -1,91 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import functools - -from neutron.tests.unit.extensions import test_l3 -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron_lib import constants as q_const - -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base - - -class _TestL3Base(test_l3.L3NatTestCaseMixin, base.OdlTestsBase): - - # Override default behavior so that extension manager is used, otherwise - # we can't test security groups. - def setup_parent(self): - """Perform parent setup with the common plugin configuration class.""" - ext_mgr = test_l3.L3TestExtensionManager() - - # Ensure that the parent setup can be called without arguments - # by the common configuration setUp. - parent_setup = functools.partial( - super(test_plugin.Ml2PluginV2TestCase, self).setUp, - plugin=test_plugin.PLUGIN_NAME, - ext_mgr=ext_mgr, - service_plugins={'l3_plugin_name': self.l3_plugin}, - - ) - self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) - - def test_router_create(self): - with self.router() as router: - self.assert_resource_created(odl_const.ODL_ROUTER, router) - - def test_router_update(self): - with self.router() as router: - self.resource_update_test(odl_const.ODL_ROUTER, router) - - def test_router_delete(self): - with self.router() as router: - self.resource_delete_test(odl_const.ODL_ROUTER, router) - - def test_floatingip_create(self): - with self.floatingip_with_assoc() as fip: - self.assert_resource_created(odl_const.ODL_FLOATINGIP, fip) - - # Test FIP was deleted since the code creating the FIP deletes it - # once the context block exists. - odl_fip = self.get_odl_resource(odl_const.ODL_FLOATINGIP, fip) - self.assertIsNone(odl_fip) - - def test_floatingip_status_with_port(self): - with self.floatingip_with_assoc() as fip: - self.assertEqual( - q_const.FLOATINGIP_STATUS_ACTIVE, - fip['floatingip']['status']) - - def test_floatingip_status_without_port(self): - with self.subnet() as subnet: - with self.floatingip_no_assoc(subnet) as fip: - # status should be down when floating ip - # is not associated to any port - self.assertEqual( - q_const.FLOATINGIP_STATUS_DOWN, - fip['floatingip']['status']) - - -class TestL3PluginV1(_TestL3Base, test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - l3_plugin = 'odl-router' - - -class TestL3PluginV2(base.V2DriverAdjustment, _TestL3Base, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] - l3_plugin = 'odl-router_v2' diff --git a/networking_odl/tests/functional/test_ml2_drivers.py b/networking_odl/tests/functional/test_ml2_drivers.py deleted file mode 100644 index 2068202d6..000000000 --- a/networking_odl/tests/functional/test_ml2_drivers.py +++ /dev/null @@ -1,134 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import functools - -from neutron.tests.unit.extensions import test_securitygroup -from neutron.tests.unit.plugins.ml2 import test_plugin - -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base - - -class _DriverTest(base.OdlTestsBase): - - def test_network_create(self): - with self.network() as network: - self.assert_resource_created(odl_const.ODL_NETWORK, network) - - def test_network_update(self): - with self.network() as network: - self.resource_update_test(odl_const.ODL_NETWORK, network) - - def test_network_delete(self): - with self.network() as network: - self.resource_delete_test(odl_const.ODL_NETWORK, network) - - def test_subnet_create(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - self.assert_resource_created(odl_const.ODL_SUBNET, subnet) - - def test_subnet_update(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - self.resource_update_test(odl_const.ODL_SUBNET, subnet) - - def test_subnet_delete(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - self.resource_delete_test(odl_const.ODL_SUBNET, subnet) - - def test_port_create(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as port: - self.assert_resource_created(odl_const.ODL_PORT, port) - - def test_port_update(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as port: - self.resource_update_test(odl_const.ODL_PORT, port) - - def test_port_delete(self): - with self.network() as network: - with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as port: - self.resource_delete_test(odl_const.ODL_PORT, port) - - -class _DriverSecGroupsTests(base.OdlTestsBase): - - # Override default behavior so that extension manager is used, otherwise - # we can't test security groups. - def setup_parent(self): - """Perform parent setup with the common plugin configuration class.""" - ext_mgr = ( - test_securitygroup.SecurityGroupTestExtensionManager()) - # Ensure that the parent setup can be called without arguments - # by the common configuration setUp. - parent_setup = functools.partial( - super(test_plugin.Ml2PluginV2TestCase, self).setUp, - plugin=test_plugin.PLUGIN_NAME, - ext_mgr=ext_mgr, - ) - self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) - - def test_security_group_create(self): - with self.security_group() as sg: - self.assert_resource_created(odl_const.ODL_SG, sg) - - def test_security_group_update(self): - with self.security_group() as sg: - self.resource_update_test(odl_const.ODL_SG, sg) - - def test_security_group_delete(self): - with self.security_group() as sg: - self.resource_delete_test(odl_const.ODL_SG, sg) - - def test_security_group_rule_create(self): - with self.security_group() as sg: - sg_id = sg[odl_const.ODL_SG]['id'] - with self.security_group_rule(security_group_id=sg_id) as sg_rule: - self.assert_resource_created(odl_const.ODL_SG_RULE, sg_rule) - - def test_security_group_rule_delete(self): - with self.security_group() as sg: - sg_id = sg[odl_const.ODL_SG]['id'] - with self.security_group_rule(security_group_id=sg_id) as sg_rule: - self.resource_delete_test(odl_const.ODL_SG_RULE, sg_rule) - - -class TestV1Driver(_DriverTest, test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - - -class TestV1DriverSecGroups(_DriverSecGroupsTests, - test_securitygroup.SecurityGroupsTestCase, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - - -class TestV2Driver(base.V2DriverAdjustment, _DriverTest, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] - - -class TestV2DriverSecGroups(base.V2DriverAdjustment, _DriverSecGroupsTests, - test_securitygroup.SecurityGroupsTestCase, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] diff --git a/networking_odl/tests/functional/test_qos.py b/networking_odl/tests/functional/test_qos.py deleted file mode 100644 index d23f10551..000000000 --- a/networking_odl/tests/functional/test_qos.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2017 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import contextlib - -from oslo_utils import uuidutils - -from neutron.api.v2 import attributes -from neutron.extensions import qos as qos_ext -from neutron.services.qos import qos_plugin -from neutron.tests.unit.api import test_extensions -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron_lib.plugins import directory - -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base - - -class QoSTestExtensionManager(object): - - def get_resources(self): - # Add the resources to the global attribute map - # This is done here as the setup process won't - # initialize the main API QoS which extends - # the global attribute map - attributes.RESOURCE_ATTRIBUTE_MAP.update( - qos_ext.RESOURCE_ATTRIBUTE_MAP) - return qos_ext.Qos.get_resources() - - def get_actions(self): - return [] - - def get_request_extensions(self): - return [] - - -class _QoSDriverTestCase(base.OdlTestsBase): - - def test_policy_create(self): - with self.qos_policy() as policy: - self.assert_resource_created( - odl_const.ODL_QOS_POLICY, policy) - - def test_policy_update(self): - with self.qos_policy() as policy: - self.resource_update_test( - odl_const.ODL_QOS_POLICY, policy) - - def test_policy_delete(self): - with self.qos_policy() as policy: - self.resource_delete_test( - odl_const.ODL_QOS_POLICY, policy) - - -class QoSDriverTests(base.V2DriverAdjustment, - _QoSDriverTestCase, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] - - def setUp(self): - super(QoSDriverTests, self).setUp() - self.qos_plug = qos_plugin.QoSPlugin() - directory.add_plugin('QOS', self.qos_plug) - ext_mgr = QoSTestExtensionManager() - self.resource_prefix_map = {'policies': '/qos'} - self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) - tenant_id = uuidutils.generate_uuid() - self.policy_data = { - 'policy': {'name': 'test-policy', 'tenant_id': tenant_id}} - - @contextlib.contextmanager - def qos_policy(self, fmt='json'): - po_res = self.new_create_request('policies', self.policy_data, fmt) - po_rep = po_res.get_response(self.ext_api) - policy = self.deserialize(fmt, po_rep) - yield policy diff --git a/networking_odl/tests/functional/test_trunk_drivers.py b/networking_odl/tests/functional/test_trunk_drivers.py deleted file mode 100644 index a3347852e..000000000 --- a/networking_odl/tests/functional/test_trunk_drivers.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib - -from networking_odl.common import constants as odl_const -from networking_odl.tests.functional import base -from neutron.plugins.common import utils -from neutron.services.trunk import constants -from neutron.services.trunk import plugin as trunk_plugin -from neutron.tests.unit.plugins.ml2 import test_plugin - -from oslo_utils import uuidutils - - -class _TrunkDriverTest(base.OdlTestsBase): - - def test_trunk_create(self): - with self.trunk() as trunk: - self.assert_resource_created(odl_const.ODL_TRUNK, trunk) - - def test_trunk_update(self): - with self.trunk() as trunk: - trunk['trunk'].update(admin_state_up=False) - self.trunk_plugin.update_trunk(self.context, - trunk['trunk']['id'], trunk) - response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk) - self.assertFalse(response['trunk']['admin_state_up']) - - def test_subport_create(self): - with self.trunk() as trunk: - with self.subport() as subport: - trunk_obj = self.trunk_plugin.add_subports( - self.context, trunk['trunk']['id'], - {'sub_ports': [subport]}) - response = self.get_odl_resource(odl_const.ODL_TRUNK, - {'trunk': trunk_obj}) - self.assertEqual(response['trunk']['sub_ports'][0]['port_id'], - subport['port_id']) - - def test_subport_delete(self): - with self.subport() as subport: - with self.trunk([subport]) as trunk: - response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk) - self.assertEqual(response['trunk']['sub_ports'][0]['port_id'], - subport['port_id']) - trunk_obj = self.trunk_plugin.remove_subports( - self.context, trunk['trunk']['id'], - {'sub_ports': [subport]}) - response = self.get_odl_resource(odl_const.ODL_TRUNK, - {'trunk': trunk_obj}) - self.assertEqual(response['trunk']['sub_ports'], []) - - def test_trunk_delete(self): - with self.trunk() as trunk: - self.trunk_plugin.delete_trunk(self.context, trunk['trunk']['id']) - self.assertIsNone(self.get_odl_resource(odl_const.ODL_TRUNK, - trunk)) - - @contextlib.contextmanager - def trunk(self, subports=None): - subports = subports if subports else [] - with self.network() as network: - with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as trunk_parent: - tenant_id = uuidutils.generate_uuid() - trunk = {'port_id': trunk_parent['port']['id'], - 'tenant_id': tenant_id, 'project_id': tenant_id, - 'admin_state_up': True, - 'name': 'test_trunk', 'sub_ports': subports} - trunk_obj = self.trunk_plugin.create_trunk( - self.context, {'trunk': trunk}) - yield {'trunk': trunk_obj} - - @contextlib.contextmanager - def subport(self): - with self.port() as child_port: - subport = {'segmentation_type': 'vlan', - 'segmentation_id': 123, - 'port_id': child_port['port']['id']} - yield subport - - -class TestTrunkV2Driver(base.V2DriverAdjustment, _TrunkDriverTest, - test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight_v2'] - - def setUp(self): - super(TestTrunkV2Driver, self).setUp() - self.trunk_plugin = trunk_plugin.TrunkPlugin() - self.trunk_plugin.add_segmentation_type(constants.VLAN, - utils.is_valid_vlan_tag) - - -class TestTrunkV1Driver(_TrunkDriverTest, test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - - def setUp(self): - super(TestTrunkV1Driver, self).setUp() - self.trunk_plugin = trunk_plugin.TrunkPlugin() - self.trunk_plugin.add_segmentation_type(constants.VLAN, - utils.is_valid_vlan_tag) diff --git a/networking_odl/tests/match.py b/networking_odl/tests/match.py deleted file mode 100644 index 96edd22c3..000000000 --- a/networking_odl/tests/match.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fnmatch -import re - -from oslo_serialization import jsonutils - - -def json(obj): - return MatchJson(obj) - - -class MatchJson(object): - - def __init__(self, obj): - self._obj = obj - - def __eq__(self, json_text): - return self._obj == jsonutils.loads(json_text) - - def __repr__(self): - return "MatchJson({})".format(repr(self._obj)) - - -def wildcard(text): - return MatchWildcard(text) - - -class MatchWildcard(object): - - def __init__(self, obj): - self._text = text = str(obj) - self._reg = re.compile(fnmatch.translate(text)) - - def __eq__(self, obj): - return self._reg.match(str(obj)) - - def __repr__(self): - return "MatchWildcard({})".format(self._text) diff --git a/networking_odl/tests/unit/__init__.py b/networking_odl/tests/unit/__init__.py deleted file mode 100644 index faed26a53..000000000 --- a/networking_odl/tests/unit/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -cfg.CONF.use_stderr = False diff --git a/networking_odl/tests/unit/base_v2.py b/networking_odl/tests/unit/base_v2.py deleted file mode 100644 index 40301ee38..000000000 --- a/networking_odl/tests/unit/base_v2.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from neutron.tests.unit.plugins.ml2 import test_plugin -from oslo_config import cfg - -from networking_odl.common import client -from networking_odl.journal import journal -from networking_odl.ml2 import mech_driver_v2 -from networking_odl.tests import base -from networking_odl.tests.unit import test_base_db - - -class OpenDaylightConfigBase(test_plugin.Ml2PluginV2TestCase, - test_base_db.ODLBaseDbTestCase): - def setUp(self): - self.useFixture(base.OpenDaylightJournalThreadFixture()) - self.useFixture(base.OpenDaylightRestClientFixture()) - super(OpenDaylightConfigBase, self).setUp() - cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight_v2'], 'ml2') - cfg.CONF.set_override('extension_drivers', - ['port_security', 'qos'], 'ml2') - self.useFixture(base.OpenDaylightJournalThreadFixture()) - self.thread = journal.OpenDaylightJournalThread() - self.useFixture(base.OpenDaylightJournalThreadFixture()) - - def run_journal_processing(self): - """Cause the journal to process the first pending entry""" - self.thread.sync_pending_entries() - - -class OpenDaylightTestCase(OpenDaylightConfigBase): - def setUp(self): - self.mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, - 'sendjson').start() - super(OpenDaylightTestCase, self).setUp() - self.port_create_status = 'DOWN' - self.mech = mech_driver_v2.OpenDaylightMechanismDriver() - self.mock_sendjson.side_effect = self.check_sendjson - - def check_sendjson(self, method, urlpath, obj): - self.assertFalse(urlpath.startswith("http://")) diff --git a/networking_odl/tests/unit/bgpvpn/__init__.py b/networking_odl/tests/unit/bgpvpn/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/bgpvpn/test_odl_v2.py b/networking_odl/tests/unit/bgpvpn/test_odl_v2.py deleted file mode 100644 index 252803444..000000000 --- a/networking_odl/tests/unit/bgpvpn/test_odl_v2.py +++ /dev/null @@ -1,166 +0,0 @@ -# -# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -from neutron.db import api as neutron_db_api - -from networking_odl.bgpvpn import odl_v2 as driverv2 -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.tests.unit import base_v2 - - -class OpenDaylightBgpvpnDriverTestCase(base_v2.OpenDaylightConfigBase): - - def setUp(self): - super(OpenDaylightBgpvpnDriverTestCase, self).setUp() - self.db_session = neutron_db_api.get_reader_session() - self.driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None) - self.context = self._get_mock_context() - - def _get_mock_context(self): - context = mock.Mock() - context.session = self.db_session - return context - - def _get_fake_bgpvpn(self, net=False, router=False): - net_id = [] - router_id = [] - if router: - router_id = ['ROUTER_ID'] - if net: - net_id = ['NET_ID'] - fake_bgpvpn = {'route_targets': '100:1', - 'route_distinguishers': ['100:1'], - 'id': 'BGPVPN_ID', - 'networks': net_id, - 'routers': router_id} - return fake_bgpvpn - - def _get_fake_router_assoc(self): - fake_router_assoc = {'id': 'ROUTER_ASSOC_ID', - 'bgpvpn_id': 'BGPVPN_ID', - 'router_id': 'ROUTER_ID'} - return fake_router_assoc - - def _get_fake_net_assoc(self): - fake_net_assoc = {'id': 'NET_ASSOC_ID', - 'bgpvpn_id': 'BGPVPN_ID', - 'network_id': 'NET_ID'} - return fake_net_assoc - - def _assert_op(self, operation, object_type, data, precommit=True): - rows = sorted(db.get_all_db_rows_by_state(self.db_session, - odl_const.PENDING), - key=lambda x: x.seqnum) - if precommit: - self.db_session.flush() - self.assertEqual(operation, rows[0]['operation']) - self.assertEqual(object_type, rows[0]['object_type']) - self.assertEqual(data['id'], rows[0]['object_uuid']) - else: - self.assertEqual([], rows) - - def test_create_bgpvpn(self): - fake_data = self._get_fake_bgpvpn() - self.driver.create_bgpvpn_precommit(self.context, fake_data) - self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, - fake_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, - fake_data, False) - - def test_update_bgpvpn(self): - fake_data = self._get_fake_bgpvpn() - self.driver.update_bgpvpn_precommit(self.context, fake_data) - self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, - fake_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, - fake_data, False) - - def test_delete_bgpvpn(self): - fake_data = self._get_fake_bgpvpn() - self.driver.delete_bgpvpn_precommit(self.context, fake_data) - self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN, - fake_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN, - fake_data, False) - - def test_create_router_assoc(self): - fake_rtr_assoc_data = self._get_fake_router_assoc() - fake_rtr_upd_bgpvpn_data = self._get_fake_bgpvpn(router=True) - with mock.patch.object(self.driver, 'get_router_assocs', - return_value=[]), \ - mock.patch.object(self.driver, 'get_bgpvpn', - return_value=fake_rtr_upd_bgpvpn_data): - self.driver.create_router_assoc_precommit(self.context, - fake_rtr_assoc_data) - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_rtr_upd_bgpvpn_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_rtr_upd_bgpvpn_data, False) - - def test_delete_router_assoc(self): - fake_rtr_assoc_data = self._get_fake_router_assoc() - fake_bgpvpn_data = self._get_fake_bgpvpn(router=False) - with mock.patch.object(self.driver, 'get_bgpvpn', - return_value=fake_bgpvpn_data): - self.driver.delete_router_assoc_precommit(self.context, - fake_rtr_assoc_data) - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_bgpvpn_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_bgpvpn_data, False) - - def test_create_net_assoc(self): - fake_net_assoc_data = self._get_fake_net_assoc() - fake_net_upd_bgpvpn_data = self._get_fake_bgpvpn(net=True) - # todo(vivekanandan) add check for case when assoc already exists - with mock.patch.object(self.driver, 'get_bgpvpns', - return_value=[fake_net_upd_bgpvpn_data]): - self.driver.create_net_assoc_precommit(self.context, - fake_net_assoc_data) - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_net_upd_bgpvpn_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_net_upd_bgpvpn_data, False) - - def test_delete_net_assoc(self): - fake_net_assoc_data = self._get_fake_net_assoc() - fake_bgpvpn_data = self._get_fake_bgpvpn(net=False) - with mock.patch.object(self.driver, 'get_bgpvpn', - return_value=fake_bgpvpn_data): - self.driver.delete_net_assoc_precommit(self.context, - fake_net_assoc_data) - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_bgpvpn_data) - self.run_journal_processing() - self._assert_op(odl_const.ODL_UPDATE, - odl_const.ODL_BGPVPN, - fake_bgpvpn_data, False) diff --git a/networking_odl/tests/unit/ceilometer/__init__.py b/networking_odl/tests/unit/ceilometer/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/ceilometer/network/__init__.py b/networking_odl/tests/unit/ceilometer/network/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/ceilometer/network/statistics/__init__.py b/networking_odl/tests/unit/ceilometer/network/statistics/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py b/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py b/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py deleted file mode 100644 index 5904d8af8..000000000 --- a/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py +++ /dev/null @@ -1,133 +0,0 @@ -# -# Copyright 2017 Ericsson India Global Services Pvt Ltd.. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as config_fixture -from oslotest import base -from requests import auth as req_auth -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer import service as ceilometer_service -from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client - - -class TestClientHTTPBasicAuth(base.BaseTestCase): - - auth_way = 'basic' - scheme = 'http' - - def setUp(self): - super(TestClientHTTPBasicAuth, self).setUp() - conf = ceilometer_service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(config_fixture.Config(conf)).conf - self.parsed_url = urlparse.urlparse( - 'http://127.0.0.1:8080/controller/statistics?' - 'auth=%s&user=admin&password=admin_pass&' - 'scheme=%s' % (self.auth_way, self.scheme)) - self.params = urlparse.parse_qs(self.parsed_url.query) - self.endpoint = urlparse.urlunparse( - urlparse.ParseResult(self.scheme, - self.parsed_url.netloc, - self.parsed_url.path, - None, None, None)) - odl_params = {'auth': self.params.get('auth')[0], - 'user': self.params.get('user')[0], - 'password': self.params.get('password')[0]} - self.client = client.Client(self.CONF, self.endpoint, odl_params) - - self.resp = mock.MagicMock() - self.get = mock.patch('requests.Session.get', - return_value=self.resp).start() - - self.resp.raw.version = 1.1 - self.resp.status_code = 200 - self.resp.reason = 'OK' - self.resp.headers = {} - self.resp.content = 'dummy' - - def _test_request(self, method, url): - data = method() - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - # check url - real_url = url % {'scheme': self.scheme} - self.assertEqual(real_url, call_args[0]) - - # check auth parameters - auth = call_kwargs.get('auth') - if self.auth_way == 'digest': - self.assertIsInstance(auth, req_auth.HTTPDigestAuth) - else: - self.assertIsInstance(auth, req_auth.HTTPBasicAuth) - self.assertEqual('admin', auth.username) - self.assertEqual('admin_pass', auth.password) - - # check header - self.assertEqual( - {'Accept': 'application/json'}, - call_kwargs['headers']) - - # check return value - self.assertEqual(self.get().json(), data) - - def test_switch_statistics(self): - self._test_request( - self.client.switch_statistics.get_statistics, - '%(scheme)s://127.0.0.1:8080/controller' - '/statistics/flow-capable-switches') - - def test_http_error(self): - self.resp.status_code = 404 - self.resp.reason = 'Not Found' - - try: - self.client.switch_statistics.get_statistics() - self.fail('') - except client.OpenDaylightRESTAPIFailed as e: - self.assertEqual( - _('OpenDaylight API returned %(status)s %(reason)s') % - {'status': self.resp.status_code, - 'reason': self.resp.reason}, - six.text_type(e)) - - def test_other_error(self): - - class _Exception(Exception): - pass - - self.get = mock.patch('requests.Session.get', - side_effect=_Exception).start() - - self.assertRaises(_Exception, - lambda: - self.client.switch_statistics.get_statistics()) - - -class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): - - auth_way = 'digest' - - -class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): - - scheme = 'https' - - -class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): - - scheme = 'https' diff --git a/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py b/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py deleted file mode 100644 index 4801f4f83..000000000 --- a/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py +++ /dev/null @@ -1,719 +0,0 @@ -# -# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import mock -from oslotest import base -import six -from six.moves.urllib import parse as url_parse - -from ceilometer import service -from networking_odl.ceilometer.network.statistics.opendaylight_v2 import driver -from oslo_utils import uuidutils - -ADMIN_ID = uuidutils.generate_uuid() -PORT_1_TENANT_ID = uuidutils.generate_uuid() -PORT_2_TENANT_ID = uuidutils.generate_uuid() -PORT_1_ID = uuidutils.generate_uuid() -PORT_2_ID = uuidutils.generate_uuid() - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractproperty - def switch_data(self): - pass - - fake_odl_url = url_parse.ParseResult('opendaylight.v2', - 'localhost:8080', - 'controller/statistics', - None, - None, - None) - - fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'auth=basic') - - def setUp(self): - super(_Base, self).setUp() - self.addCleanup(mock.patch.stopall) - conf = service.prepare_service([], []) - self.driver = driver.OpenDaylightDriver(conf) - - self.get_statistics = mock.patch( - 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' - 'client.SwitchStatisticsAPIClient.get_statistics', - return_value=self.switch_data).start() - - def _test_for_meter(self, meter_name, expected_data): - sample_data = self.driver.get_sample_data(meter_name, - self.fake_odl_url, - self.fake_params, - {}) - - self.assertEqual(expected_data, list(sample_data)) - - -class TestOpenDayLightDriverInvalid(_Base): - - switch_data = {"flow_capable_switches": []} - - def test_not_implemented_meter(self): - sample_data = self.driver.get_sample_data('egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - sample_data = self.driver.get_sample_data('switch.table.egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - def test_cache(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(1, self.get_statistics.call_count) - - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(2, self.get_statistics.call_count) - - def test_http_error(self): - - mock.patch( - 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' - 'client.SwitchStatisticsAPIClient.get_statistics', - side_effect=Exception()).start() - - sample_data = self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - {}) - - self.assertEqual(0, len(sample_data)) - - mock.patch( - 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' - 'client.SwitchStatisticsAPIClient.get_statistics', - side_effect=[Exception(), self.switch_data]).start() - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - - self.assertIn('network.statistics.opendaylight_v2', cache) - - -class TestOpenDayLightDriverSimple(_Base): - - switch_data = { - "flow_capable_switches": [{ - "packet_in_messages_received": 501, - "packet_out_messages_sent": 300, - "ports": 1, - "flow_datapath_id": 55120148545607, - "tenant_id": ADMIN_ID, - "switch_port_counters": [{ - "bytes_received": 0, - "bytes_sent": 0, - "duration": 600, - "packets_internal_received": 444, - "packets_internal_sent": 0, - "packets_received": 0, - "packets_received_drop": 0, - "packets_received_error": 0, - "packets_sent": 0, - "port_id": 4, - "tenant_id": PORT_1_TENANT_ID, - "uuid": PORT_1_ID - }], - "table_counters": [{ - "flow_count": 90, - "table_id": 0 - }] - }] - } - - def test_meter_switch(self): - expected_data = [ - (1, "55120148545607", - {'controller': 'OpenDaylight_V2'}, - ADMIN_ID), - ] - self._test_for_meter('switch', expected_data) - - def test_meter_switch_ports(self): - expected_data = [ - (1, "55120148545607", - {'controller': 'OpenDaylight_V2'}, - ADMIN_ID) - ] - self._test_for_meter('switch.ports', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_uptime(self): - expected_data = [ - (600, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.uptime', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, '55120148545607:4', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 4, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_port(self): - expected_data = [ - (1, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port', expected_data) - - def test_meter_port_uptime(self): - expected_data = [ - (600, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.uptime', expected_data) - - def test_meter_port_receive_packets(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.packets', expected_data) - - def test_meter_port_transmit_packets(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.transmit.packets', expected_data) - - def test_meter_port_receive_bytes(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.bytes', expected_data) - - def test_meter_port_transmit_bytes(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.transmit.bytes', expected_data) - - def test_meter_port_receive_drops(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.drops', expected_data) - - def test_meter_port_receive_errors(self): - expected_data = [ - (0, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.errors', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (90, "55120148545607:table:0", { - 'switch': '55120148545607', - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - -class TestOpenDayLightDriverComplex(_Base): - - switch_data = { - "flow_capable_switches": [{ - "packet_in_messages_received": 501, - "packet_out_messages_sent": 300, - "ports": 3, - "flow_datapath_id": 55120148545607, - "switch_port_counters": [{ - "bytes_received": 0, - "bytes_sent": 512, - "duration": 200, - "packets_internal_received": 444, - "packets_internal_sent": 0, - "packets_received": 10, - "packets_received_drop": 0, - "packets_received_error": 0, - "packets_sent": 0, - "port_id": 3, - "tenant_id": ADMIN_ID - }, { - "bytes_received": 9800, - "bytes_sent": 6540, - "duration": 150, - "packets_internal_received": 0, - "packets_internal_sent": 7650, - "packets_received": 20, - "packets_received_drop": 0, - "packets_received_error": 0, - "packets_sent": 0, - "port_id": 2, - "tenant_id": PORT_2_TENANT_ID, - "uuid": PORT_2_ID - }, { - "bytes_received": 100, - "bytes_sent": 840, - "duration": 100, - "packets_internal_received": 984, - "packets_internal_sent": 7950, - "packets_received": 9900, - "packets_received_drop": 1500, - "packets_received_error": 1000, - "packets_sent": 7890, - "port_id": 1, - "tenant_id": PORT_1_TENANT_ID, - "uuid": PORT_1_ID - }], - "table_counters": [{ - "flow_count": 90, - "table_id": 10 - }, { - "flow_count": 80, - "table_id": 20 - }], - "tenant_id": ADMIN_ID - }, { - "packet_in_messages_received": 0, - "packet_out_messages_sent": 0, - "ports": 0, - "flow_datapath_id": 55120148545555, - "table_counters": [{ - "flow_count": 5, - "table_id": 10 - }, { - "flow_count": 3, - "table_id": 20 - }], - "tenant_id": ADMIN_ID - }] - } - - def test_meter_switch(self): - expected_data = [ - (1, "55120148545607", { - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - (1, "55120148545555", { - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_ports(self): - expected_data = [ - (3, "55120148545607", { - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - (0, "55120148545555", { - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - ] - - self._test_for_meter('switch.ports', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (1, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (1, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_uptime(self): - expected_data = [ - (200, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (150, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (100, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.uptime', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (10, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (20, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (9900, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (0, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (7890, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (9800, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (100, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (512, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (6540, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (840, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (0, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (1500, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "55120148545607:3", { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 3, - 'switch': '55120148545607' - }, ADMIN_ID), - (0, '55120148545607:2', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 2, - 'neutron_port_id': PORT_2_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - (1000, '55120148545607:1', { - 'controller': 'OpenDaylight_V2', - 'port_number_on_switch': 1, - 'neutron_port_id': PORT_1_ID, - 'switch': '55120148545607' - }, ADMIN_ID), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_port(self): - expected_data = [ - (1, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (1, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port', expected_data) - - def test_meter_port_uptime(self): - expected_data = [ - (150, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (100, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.uptime', expected_data) - - def test_meter_port_receive_packets(self): - expected_data = [ - (20, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (9900, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.packets', expected_data) - - def test_meter_port_transmit_packets(self): - expected_data = [ - (0, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (7890, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.transmit.packets', expected_data) - - def test_meter_port_receive_bytes(self): - expected_data = [ - (9800, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (100, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.bytes', expected_data) - - def test_meter_port_transmit_bytes(self): - expected_data = [ - (6540, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (840, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.transmit.bytes', expected_data) - - def test_meter_port_receive_drops(self): - expected_data = [ - (0, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (1500, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.drops', expected_data) - - def test_meter_port_receive_errors(self): - expected_data = [ - (0, str(PORT_2_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_2_TENANT_ID), - (1000, str(PORT_1_ID), - {'controller': 'OpenDaylight_V2'}, - PORT_1_TENANT_ID), - ] - self._test_for_meter('port.receive.errors', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (90, "55120148545607:table:10", { - 'switch': '55120148545607', - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - (80, "55120148545607:table:20", { - 'switch': '55120148545607', - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - (5, "55120148545555:table:10", { - 'switch': '55120148545555', - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - (3, "55120148545555:table:20", { - 'switch': '55120148545555', - 'controller': 'OpenDaylight_V2' - }, ADMIN_ID), - ] - self._test_for_meter('switch.table.active.entries', expected_data) diff --git a/networking_odl/tests/unit/cmd/__init__.py b/networking_odl/tests/unit/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py b/networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py deleted file mode 100644 index 9da5dcc12..000000000 --- a/networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# pylint: disable=unused-argument, protected-access - -import os -import tempfile - -import mock -from oslo_serialization import jsonutils -import six - -from networking_odl.cmd import set_ovs_hostconfigs -from networking_odl.tests import base -from networking_odl.tests import match - - -class TestSetOvsHostconfigs(base.DietTestCase): - - maxDiff = None - - def test_given_ovs_hostconfigs(self): - # given - self.patch_os_geteuid() - ovs_hostconfigs = { - "ODL L2": {"allowed_network_types": ["a", "b", "c"]}} - args = ['--ovs_hostconfigs=' + jsonutils.dumps(ovs_hostconfigs), - '--bridge_mappings=a:1,b:2'] - execute = self.patch_utils_execute() - conf = set_ovs_hostconfigs.setup_conf(args) - - # when - result = set_ovs_hostconfigs.main(args) - - # then - self.assertEqual(0, result) - execute.assert_has_calls([ - mock.call( - ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), - mock.call( - ('ovs-vsctl', 'set', 'Open_vSwitch', '', - 'external_ids:odl_os_hostconfig_hostid=' + conf.host)), - mock.call( - ('ovs-vsctl', 'set', 'Open_vSwitch', '', - match.wildcard( - 'external_ids:odl_os_hostconfig_config_odl_l2=*'))), - ]) - - expected = ovs_hostconfigs['ODL L2'] - _, actual_json = execute.call_args_list[2][0][0][4].split("=", 1) - self.assertEqual(match.json(expected), actual_json) - - @mock.patch.object( - set_ovs_hostconfigs, 'DEFAULT_COMMAND_LINE_OPTIONS', - ('--host=my_host',)) - def test_given_no_args(self): - self._test_given_args() - - def test_given_default_values(self): - self._test_given_args([]) - - def test_given_datapath_type_system(self): - self._test_given_args(['--datapath_type=netdev']) - - def test_given_datapath_type_netdev(self): - self._test_given_args(['--datapath_type=netdev']) - - def test_given_datapath_type_vhostuser(self): - self._test_given_args(['--datapath_type=dpdkvhostuser']) - - def test_given_ovs_dpdk(self): - self._test_given_args(['--ovs_dpdk']) - - def test_given_noovs_dpdk(self): - self._test_given_args(['--noovs_dpdk']) - - def test_given_vhostuser_ovs_plug(self): - self._test_given_args(['--vhostuser_ovs_plug']) - - def test_given_novhostuser_ovs_plug(self): - self._test_given_args(['--novhostuser_ovs_plug']) - - def test_given_allowed_network_types(self): - self._test_given_args(['--allowed_network_types=a,b,c']) - - def test_given_local_ip(self): - self._test_given_args(['--local_ip=192.168.1.10', '--host=']) - - def test_given_vhostuser_mode_server(self): - self._test_given_args( - ['--vhostuser_mode=server', '--datapath_type=netdev']) - - def test_given_vhostuser_mode_client(self): - self._test_given_args( - ['--vhostuser_mode=client', '--datapath_type=netdev']) - - def test_given_vhostuser_port_prefix_vhu(self): - self._test_given_args( - ['--vhostuser_port_prefix=vhu', '--datapath_type=netdev']) - - def test_given_vhostuser_port_prefix_socket(self): - self._test_given_args( - ['--vhostuser_port_prefix=socket', '--datapath_type=netdev']) - - def test_given_config_file(self): - file_descriptor, file_path = tempfile.mkstemp() - - try: - os.write(file_descriptor, six.b("# dummy neutron config file\n")) - os.close(file_descriptor) - self._test_given_args(['--config-file={}'.format(file_path)]) - - finally: - os.remove(file_path) - - def _test_given_args(self, *args): - # given - self.patch_os_geteuid() - execute = self.patch_utils_execute() - conf = set_ovs_hostconfigs.setup_conf(*args) - - datapath_type = conf.datapath_type - if datapath_type is None: - if conf.ovs_dpdk is False: - datapath_type = "system" - else: - datapath_type = "netdev" - - # when - result = set_ovs_hostconfigs.main(*args) - - # then - self.assertEqual(0, result) - execute.assert_has_calls([ - mock.call( - ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), - mock.call( - ('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')), - mock.call( - ('ovs-vsctl', 'set', 'Open_vSwitch', '', - 'external_ids:odl_os_hostconfig_hostid=' + conf.host)), - mock.call( - ('ovs-vsctl', 'set', 'Open_vSwitch', '', - match.wildcard( - 'external_ids:odl_os_hostconfig_config_odl_l2=*'))), - ]) - - host_addresses = [conf.host or conf.local_ip] - if datapath_type == "system": - vif_type = "ovs" - vif_details = { - "uuid": '', - "host_addresses": host_addresses, - "has_datapath_type_netdev": False, - "support_vhost_user": False - } - else: # datapath_type in ["system", "netdev"] - vif_type = "vhostuser" - vif_details = { - "uuid": '', - "host_addresses": host_addresses, - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": conf.vhostuser_port_prefix, - "vhostuser_mode": conf.vhostuser_mode, - "vhostuser_ovs_plug": conf.vhostuser_ovs_plug, - "vhostuser_socket_dir": conf.vhostuser_socket_dir, - "vhostuser_socket": os.path.join( - conf.vhostuser_socket_dir, - conf.vhostuser_port_prefix + "$PORT_ID"), - } - - _, actual_json = execute.call_args_list[3][0][0][4].split("=", 1) - expected = { - "allowed_network_types": conf.allowed_network_types, - "bridge_mappings": conf.bridge_mappings, - "datapath_type": datapath_type, - "supported_vnic_types": [ - { - "vif_type": vif_type, - "vnic_type": "normal", - "vif_details": vif_details - } - ] - } - self.assertEqual(match.json(expected), actual_json) - - def test_given_ovs_dpdk_undetected(self): - # given - LOG = self.patch(set_ovs_hostconfigs, 'LOG') - args = ('--ovs_dpdk', '--bridge_mappings=a:1,b:2', '--debug') - conf = set_ovs_hostconfigs.setup_conf(args) - self.patch_os_geteuid() - execute = self.patch_utils_execute(datapath_types="whatever") - - # when - result = set_ovs_hostconfigs.main(args) - - # then - self.assertEqual(1, result) - execute.assert_has_calls([ - mock.call( - ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), - mock.call( - ('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')), - ]) - LOG.error.assert_called_once_with( - "Fatal error: %s", - match.wildcard( - "--ovs_dpdk option was specified but the 'netdev' " - "datapath_type was not enabled. To override use option " - "--datapath_type=netdev"), exc_info=conf.debug) - - def test_bridge_mappings(self): - # when - conf = set_ovs_hostconfigs.setup_conf(('--bridge_mappings=a:1,b:2',)) - self.assertEqual({'a': '1', 'b': '2'}, conf.bridge_mappings) - - def test_allowed_network_types(self): - # when - conf = set_ovs_hostconfigs.setup_conf(('--allowed_network_types=a,b',)) - self.assertEqual(['a', 'b'], conf.allowed_network_types) - - def patch_utils_execute( - self, uuid='', - datapath_types='netdev,dpdkvhostuser,system'): - - def execute(args): - command, method, table, record, value = args - self.assertEqual('ovs-vsctl', command) - self.assertEqual('Open_vSwitch', table) - self.assertIn(method, ['get', 'set']) - if method == 'set': - self.assertEqual(uuid, record) - return "" - elif method == 'get': - self.assertEqual('.', record) - self.assertIn(value, ['_uuid', 'datapath_types']) - if value == '_uuid': - return uuid - elif value == 'datapath_types': - return datapath_types - - self.fail('Unexpected command: ' + repr(args)) - - return self.patch( - set_ovs_hostconfigs.subprocess, "check_output", - side_effect=execute) - - def patch_os_geteuid(self, return_value=0): - return self.patch( - set_ovs_hostconfigs.os, "geteuid", return_value=return_value) diff --git a/networking_odl/tests/unit/common/__init__.py b/networking_odl/tests/unit/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/common/test_callback.py b/networking_odl/tests/unit/common/test_callback.py deleted file mode 100644 index 7cfb3fce3..000000000 --- a/networking_odl/tests/unit/common/test_callback.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2013-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_odl.common import callback -from networking_odl.common import constants as odl_const -from networking_odl.ml2.mech_driver import OpenDaylightDriver -from networking_odl.tests import base - -import mock -from neutron_lib.callbacks import events -from neutron_lib.callbacks import resources -import testtools - - -FAKE_ID = 'fakeid' - - -class ODLCallbackTestCase(testtools.TestCase): - def setUp(self): - self.useFixture(base.OpenDaylightRestClientFixture()) - super(ODLCallbackTestCase, self).setUp() - self._precommit = mock.Mock() - self._postcommit = mock.Mock() - self.sgh = callback.OdlSecurityGroupsHandler(self._precommit, - self._postcommit) - - def _test_callback_precommit_for_sg(self, event, op, sg, sg_id): - plugin_context_mock = mock.Mock() - expected_dict = ({resources.SECURITY_GROUP: sg} - if sg is not None else None) - self.sgh.sg_callback_precommit(resources.SECURITY_GROUP, - event, - None, - context=plugin_context_mock, - security_group=sg, - security_group_id=sg_id) - self._precommit.assert_called_with( - plugin_context_mock, op, - callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id, - expected_dict, security_group=sg, security_group_id=sg_id) - - def _test_callback_postcommit_for_sg(self, event, op, sg, sg_id): - plugin_context_mock = mock.Mock() - expected_dict = ({resources.SECURITY_GROUP: sg} - if sg is not None else None) - self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP, - event, - None, - context=plugin_context_mock, - security_group=sg, - security_group_id=sg_id) - - self._postcommit.assert_called_with( - plugin_context_mock, op, - callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id, - expected_dict, security_group=sg, security_group_id=sg_id) - - def test_callback_precommit_sg_create(self): - sg = mock.Mock() - sg_id = sg.get('id') - self._test_callback_precommit_for_sg( - events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, sg, sg_id) - - def test_callback_postcommit_sg_create(self): - sg = mock.Mock() - sg_id = sg.get('id') - self._test_callback_postcommit_for_sg( - events.AFTER_CREATE, odl_const.ODL_CREATE, sg, sg_id) - - def test_callback_precommit_sg_update(self): - self._test_callback_precommit_for_sg( - events.PRECOMMIT_UPDATE, odl_const.ODL_UPDATE, mock.Mock(), - FAKE_ID) - - def test_callback_postcommit_sg_update(self): - self._test_callback_postcommit_for_sg( - events.AFTER_UPDATE, odl_const.ODL_UPDATE, mock.Mock(), FAKE_ID) - - def test_callback_precommit_sg_delete(self): - self._test_callback_precommit_for_sg( - events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) - - def test_callback_postcommit_sg_delete(self): - self._test_callback_postcommit_for_sg( - events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) - - @mock.patch.object(OpenDaylightDriver, 'sync_from_callback') - def _test_callback_precommit_for_sg_rules( - self, event, op, sg_rule, sg_rule_id, sfc): - plugin_context_mock = mock.Mock() - expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule} - if sg_rule is not None else None) - self.sgh.sg_callback_precommit(resources.SECURITY_GROUP_RULE, - event, - None, - context=plugin_context_mock, - security_group_rule=sg_rule, - security_group_rule_id=sg_rule_id) - self._precommit.assert_called_with( - plugin_context_mock, op, - callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE], - sg_rule_id, expected_dict, security_group_rule=sg_rule, - security_group_rule_id=sg_rule_id) - - @mock.patch.object(OpenDaylightDriver, 'sync_from_callback') - def _test_callback_postcommit_for_sg_rules( - self, event, op, sg_rule, sg_rule_id, sfc): - plugin_context_mock = mock.Mock() - expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule} - if sg_rule is not None else None) - self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP_RULE, - event, - None, - context=plugin_context_mock, - security_group_rule=sg_rule, - security_group_rule_id=sg_rule_id) - - self._postcommit.assert_called_with( - plugin_context_mock, op, - callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE], - sg_rule_id, expected_dict, - security_group_rule=sg_rule, security_group_rule_id=sg_rule_id, - ) - - def test_callback_precommit_sg_rules_create(self): - rule = mock.Mock() - rule_id = rule.get('id') - self._test_callback_precommit_for_sg_rules( - events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, rule, rule_id) - - def test_callback_postcommit_sg_rules_create(self): - rule = mock.Mock() - rule_id = rule.get('id') - self._test_callback_postcommit_for_sg_rules( - events.AFTER_CREATE, odl_const.ODL_CREATE, rule, rule_id) - - def test_callback_precommit_sg_rules_delete(self): - self._test_callback_precommit_for_sg_rules( - events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) - - def test_callback_postcommit_sg_rules_delete(self): - self._test_callback_postcommit_for_sg_rules( - events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) diff --git a/networking_odl/tests/unit/common/test_client.py b/networking_odl/tests/unit/common/test_client.py deleted file mode 100644 index 1fc44efe9..000000000 --- a/networking_odl/tests/unit/common/test_client.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2015 Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from networking_odl.common import client - -from neutron.tests import base - - -class ClientTestCase(base.DietTestCase): - def setUp(self): - cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight_v2'], - 'ml2') - super(ClientTestCase, self).setUp() - - def _set_config(self, url='http://127.0.0.1:9999', username='someuser', - password='somepass'): - cfg.CONF.set_override('url', url, 'ml2_odl') - cfg.CONF.set_override('username', username, 'ml2_odl') - cfg.CONF.set_override('password', password, 'ml2_odl') - - def _test_missing_config(self, **kwargs): - self._set_config(**kwargs) - self.assertRaisesRegex(cfg.RequiredOptError, - 'value required for option \w+ in group ' - '\[ml2_odl\]', - client.OpenDaylightRestClient._check_opt, - cfg.CONF.ml2_odl.url) - - def test_valid_config(self): - self._set_config() - client.OpenDaylightRestClient._check_opt(cfg.CONF.ml2_odl.url) - - def test_missing_url_raises_exception(self): - self._test_missing_config(url=None) - - def test_missing_username_raises_exception(self): - self._test_missing_config(username=None) - - def test_missing_password_raises_exception(self): - self._test_missing_config(password=None) diff --git a/networking_odl/tests/unit/common/test_filters.py b/networking_odl/tests/unit/common/test_filters.py deleted file mode 100644 index 8fe5d1a32..000000000 --- a/networking_odl/tests/unit/common/test_filters.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2016 Intel Corp. Isaku Yamahata -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.tests import base -from neutron_lib import constants as n_const - -from networking_odl.common import filters - - -class TestFilters(base.DietTestCase): - def _check_id(self, resource, project_id): - filters._populate_project_id_and_tenant_id(resource) - self.assertIn(resource['project_id'], project_id) - self.assertIn(resource['tenant_id'], project_id) - - def _test_populate_project_id_and_tenant_id(self, project_id): - self._check_id({'project_id': project_id}, project_id) - self._check_id({'tenant_id': project_id}, project_id) - self._check_id({'project_id': project_id, - 'tenant_id': project_id}, project_id) - - def test_populate_project_id_and_tenant_id_with_id(self): - self._test_populate_project_id_and_tenant_id( - '01234567-890a-bcde-f012-3456789abcde') - self._test_populate_project_id_and_tenant_id("") - - def test_populate_project_id_and_tenant_id_without_id(self): - resource = {} - filters._populate_project_id_and_tenant_id(resource) - self.assertNotIn('project_id', resource) - self.assertNotIn('tenant_id', resource) - - def test_populate_project_id_and_tenant_id_with_router(self): - # test case for OpenDaylightL3RouterPlugin.delete_router() - # it passes data as dependency_list as list, not dict - resource0 = ['gw_port_id'] - resource1 = resource0[:] - filters._populate_project_id_and_tenant_id(resource1) - self.assertEqual(resource0, resource1) - - def test_populate_project_id_and_tenant_id_with_floatingip(self): - # test case for OpenDaylightL3RouterPlugin.delete_floatingip() - # it passes data as dependency_list as list, not dict. - resource0 = ['router_uuid', 'floatingip_uuid'] - resource1 = resource0[:] - filters._populate_project_id_and_tenant_id(resource1) - self.assertEqual(resource0, resource1) - - def test_sgrule_scrub_unknown_protocol_name(self): - KNOWN_PROTO_NAMES = (n_const.PROTO_NAME_TCP, - n_const.PROTO_NAME_UDP, - n_const.PROTO_NAME_ICMP, - n_const.PROTO_NAME_IPV6_ICMP_LEGACY) - for protocol_name in KNOWN_PROTO_NAMES: - self.assertEqual( - protocol_name, - filters._sgrule_scrub_unknown_protocol_name(protocol_name)) - - self.assertEqual( - n_const.PROTO_NUM_AH, - filters._sgrule_scrub_unknown_protocol_name(n_const.PROTO_NAME_AH)) - self.assertEqual("1", filters._sgrule_scrub_unknown_protocol_name("1")) - - def test_sgrule_scrub_icmpv6_name(self): - for protocol_name in (n_const.PROTO_NAME_ICMP, - n_const.PROTO_NAME_IPV6_ICMP, - n_const.PROTO_NAME_IPV6_ICMP_LEGACY): - sgrule = {'ethertype': n_const.IPv6, - 'protocol': protocol_name} - filters._sgrule_scrub_icmpv6_name(sgrule) - self.assertEqual(n_const.PROTO_NAME_IPV6_ICMP_LEGACY, - sgrule['protocol']) diff --git a/networking_odl/tests/unit/common/test_lightweight_testing.py b/networking_odl/tests/unit/common/test_lightweight_testing.py deleted file mode 100644 index 1175a8fb7..000000000 --- a/networking_odl/tests/unit/common/test_lightweight_testing.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2015 Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from networking_odl.common import lightweight_testing as lwt -from networking_odl.tests import base as odl_base - -from neutron.tests import base - - -class LightweightTestingTestCase(base.DietTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(LightweightTestingTestCase, self).setUp() - - def test_create_client_with_lwt_enabled(self): - """Have to do the importation here, otherwise there will be a loop""" - from networking_odl.common import client as odl_client - odl_client.cfg.CONF.set_override('enable_lightweight_testing', - True, 'ml2_odl') - # DietTestCase does not automatically cleans configuration overrides - self.addCleanup(odl_client.cfg.CONF.reset) - - client = odl_client.OpenDaylightRestClient.create_client() - self.assertIsInstance(client, lwt.OpenDaylightLwtClient) - - def test_create_client_with_lwt_disabled(self): - """Have to do the importation here, otherwise there will be a loop""" - from networking_odl.common import client as odl_client - odl_client.cfg.CONF.set_override('enable_lightweight_testing', - False, 'ml2_odl') - # DietTestCase does not automatically cleans configuration overrides - self.addCleanup(odl_client.cfg.CONF.reset) - - client = odl_client.OpenDaylightRestClient.create_client() - self.assertIsInstance(client, odl_client.OpenDaylightRestClient) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'networks': {}}, clear=True) - def test_post_single_resource(self): - client = lwt.OpenDaylightLwtClient.create_client() - fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'} - obj = {'networks': fake_network1} - response = client.sendjson('post', 'networks', obj) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - self.assertEqual(lwt_dict['networks']['fakeid1'], - fake_network1) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'networks': {}}, clear=True) - def test_post_multiple_resources(self): - client = lwt.OpenDaylightLwtClient.create_client() - fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'} - fake_network2 = {'id': 'fakeid2', 'name': 'fake_network2'} - obj = {'networks': [fake_network1, fake_network2]} - response = client.sendjson('post', 'networks', obj) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - self.assertEqual(lwt_dict['networks']['fakeid1'], - fake_network1) - self.assertEqual(lwt_dict['networks']['fakeid2'], - fake_network2) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'ports': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_port1'}}}, - clear=True) - def test_get_single_resource(self): - client = lwt.OpenDaylightLwtClient.create_client() - url_path = 'ports/fakeid1' - response = client.sendjson('get', url_path, None) - self.assertEqual(lwt.OK, response.status_code) - res = response.json() - # For single resource, the return value is a dict - self.assertEqual(res['port']['name'], 'fake_port1') - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'ports': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_port1'}, - 'fakeid2': {'id': 'fakeid2', - 'name': 'fake_port2'}}}, - clear=True) - def test_get_multiple_resources(self): - client = lwt.OpenDaylightLwtClient.create_client() - url_path = 'ports/' - response = client.sendjson('get', url_path, None) - self.assertEqual(lwt.OK, response.status_code) - res = response.json() - for port in res: - self.assertIn(port['port']['name'], - ['fake_port1', 'fake_port2']) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'subnets': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_subnet1'}}}, - clear=True) - def test_put_single_resource(self): - client = lwt.OpenDaylightLwtClient.create_client() - changed = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'} - obj = {'subnets': changed} - - url_path = 'subnets/fakeid1' - response = client.sendjson('put', url_path, obj) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - self.assertEqual('fake_subnet1_changed', - lwt_dict['subnets']['fakeid1']['name']) - - """Check the client does not change the parameter""" - self.assertEqual('fakeid1', changed['id']) - self.assertEqual('fake_subnet1_changed', changed['name']) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'subnets': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_subnet1'}, - 'fakeid2': {'id': 'fakeid2', - 'name': 'fake_subnet2'}}}, - clear=True) - def test_put_multiple_resources(self): - client = lwt.OpenDaylightLwtClient.create_client() - changed1 = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'} - changed2 = {'id': 'fakeid2', 'name': 'fake_subnet2_changed'} - obj = {'subnets': [changed1, changed2]} - - url_path = 'subnets/' - response = client.sendjson('put', url_path, obj) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - self.assertEqual('fake_subnet1_changed', - lwt_dict['subnets']['fakeid1']['name']) - self.assertEqual('fake_subnet2_changed', - lwt_dict['subnets']['fakeid2']['name']) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'networks': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_network1'}}}, - clear=True) - def test_delete_single_resource(self): - client = lwt.OpenDaylightLwtClient.create_client() - url_path = 'networks/fakeid1' - response = client.sendjson('delete', url_path, None) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - network = lwt_dict['networks'].get('fakeid1') - self.assertIsNone(network) - - @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, - {'networks': {'fakeid1': {'id': 'fakeid1', - 'name': 'fake_network1'}, - 'fakeid2': {'id': 'fakeid2', - 'name': 'fake_network2'}}}, - clear=True) - def test_delete_multiple_resources(self): - client = lwt.OpenDaylightLwtClient.create_client() - network1 = {'id': 'fakeid1'} - network2 = {'id': 'fakeid2'} - obj = {'networks': [network1, network2]} - response = client.sendjson('delete', 'networks/', obj) - self.assertEqual(lwt.NO_CONTENT, response.status_code) - lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict - network = lwt_dict['networks'].get('fakeid1') - self.assertIsNone(network) - network = lwt_dict['networks'].get('fakeid2') - self.assertIsNone(network) diff --git a/networking_odl/tests/unit/common/test_odl_features.py b/networking_odl/tests/unit/common/test_odl_features.py deleted file mode 100644 index ac47987b5..000000000 --- a/networking_odl/tests/unit/common/test_odl_features.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -from requests import exceptions - -from networking_odl.common.client import OpenDaylightRestClient -from networking_odl.common import odl_features -from networking_odl.tests import base - - -class TestOdlFeatures(base.DietTestCase): - """Basic tests for odl_features""" - - feature_json = """{"features": {"feature": - [{"service-provider-feature": - "neutron-extensions:operational-port-status"}]}}""" - - def setUp(self): - self.features_fixture = base.OpenDaylightFeaturesFixture() - self.useFixture(self.features_fixture) - super(TestOdlFeatures, self).setUp() - self.features_fixture.mock_odl_features_init.stop() - - @mock.patch.object(OpenDaylightRestClient, 'request') - def test_fetch_exception(self, mocked_client): - mocked_client.side_effect = exceptions.ConnectionError() - self.assertTrue(None == odl_features._fetch_features()) - - @mock.patch.object(OpenDaylightRestClient, 'request') - def test_fetch_404(self, mocked_client): - mocked_client.return_value = mock.MagicMock(status_code=404) - self.assertTrue(set() == odl_features._fetch_features()) - - @mock.patch.object(OpenDaylightRestClient, 'request') - def test_fetch_400(self, mocked_client): - mocked_client.return_value = mock.MagicMock(status_code=400) - self.assertTrue(set() == odl_features._fetch_features()) - - @mock.patch.object(OpenDaylightRestClient, 'request') - def test_fetch_500(self, mocked_client): - mocked_client.return_value = mock.MagicMock(status_code=500) - self.assertTrue(None == odl_features._fetch_features()) - - @mock.patch.object(OpenDaylightRestClient, 'request') - def test_init(self, mocked_client): - response = mock.MagicMock() - response.status_code = 200 - response.json = mock.MagicMock( - return_value=jsonutils.loads(self.feature_json)) - mocked_client.return_value = response - - odl_features.init() - self.assertTrue(odl_features.has(odl_features.OPERATIONAL_PORT_STATUS)) - - def test_init_from_config(self): - cfg.CONF.set_override('odl_features', 'thing1,thing2', 'ml2_odl') - odl_features.init() - self.assertTrue(odl_features.has('thing1')) - self.assertTrue(odl_features.has('thing2')) diff --git a/networking_odl/tests/unit/common/test_postcommit.py b/networking_odl/tests/unit/common/test_postcommit.py deleted file mode 100644 index bd739ce3d..000000000 --- a/networking_odl/tests/unit/common/test_postcommit.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_odl.common import postcommit -from neutron.tests import base - - -class BaseTest(object): - def create_resource1_postcommit(self): - pass - - update_resource1_postcommit = create_resource1_postcommit - delete_resource1_postcommit = create_resource1_postcommit - update_resource2_postcommit = create_resource1_postcommit - delete_resource2_postcommit = create_resource1_postcommit - create_resource2_postcommit = create_resource1_postcommit - - -class TestPostCommit(base.DietTestCase): - def _get_class(self, *args): - @postcommit.add_postcommit(*args) - class TestClass(BaseTest): - pass - - return TestClass - - def _get_methods_name(self, resources): - ops = ['create', 'update', 'delete'] - m_names = [op + '_' + resource + '_postcommit' for op in ops - for resource in resources] - - return m_names - - def test_with_one_resource(self): - cls = self._get_class('resource1') - m_names = self._get_methods_name(['resource1']) - for m_name in m_names: - self.assertEqual(m_name, getattr(cls, m_name).__name__) - - def test_with_two_resource(self): - cls = self._get_class('resource1', 'resource2') - m_names = self._get_methods_name(['resource1', 'resource2']) - for m_name in m_names: - self.assertEqual(m_name, getattr(cls, m_name).__name__) - - def test_with_two_resource_create_defined_for_one(self): - m_names = self._get_methods_name(['resource1', 'resource2']) - - @postcommit.add_postcommit('resource1', 'resource2') - class TestClass(BaseTest): - def create_resource1_postcommit(self): - pass - - create_resource1_postcommit.__name__ = 'test_method' - - for m_name in m_names[1:]: - self.assertEqual(m_name, getattr(TestClass, m_name).__name__) - - self.assertEqual('test_method', - getattr(TestClass, m_names[0]).__name__) diff --git a/networking_odl/tests/unit/common/test_utils.py b/networking_odl/tests/unit/common/test_utils.py deleted file mode 100644 index e15c99ff3..000000000 --- a/networking_odl/tests/unit/common/test_utils.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.tests import base - -from networking_odl.common import utils - - -class TestUtils(base.DietTestCase): - - # TODO(manjeets) remove this test once neutronify is - # consolidated with make_plural - def test_neutronify(self): - self.assertEqual('a-b-c', utils.neutronify('a_b_c')) - - def test_neutronify_empty(self): - self.assertEqual('', utils.neutronify('')) - - def test_make_url_object_in_resource_map(self): - url_object = utils.make_url_object('policy') - self.assertEqual('qos/policies', url_object) - - def test_make_url_object_conversion(self): - self.assertEqual('networks', utils.make_url_object('network')) - self.assertEqual('l2-gateways', utils.make_url_object('l2_gateway')) diff --git a/networking_odl/tests/unit/common/test_websocket_client.py b/networking_odl/tests/unit/common/test_websocket_client.py deleted file mode 100644 index cf53bde6f..000000000 --- a/networking_odl/tests/unit/common/test_websocket_client.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -import requests -import websocket - -from networking_odl.common.client import OpenDaylightRestClient as odl_client -from networking_odl.common import websocket_client as wsc -from networking_odl.tests import base - - -class TestWebsocketClient(base.DietTestCase): - """Test class for Websocket Client.""" - - FAKE_WEBSOCKET_STREAM_NAME_DATA = { - 'output': { - 'stream-name': 'data-change-event-subscription/neutron:neutron/' - 'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE' - }} - - INVALID_WEBSOCKET_STREAM_NAME_DATA = { - 'outputs': { - 'stream-name': 'data-change-event-subscription/neutron:neutron/' - 'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE' - }} - - FAKE_WEBSOCKET_SUBS_DATA = { - 'location': 'ws://localhost:8185/data-change-event-subscription/' - 'neutron:neutron/neutron:hostconfigs/datastore=OPERATIONAL' - '/scope=SUBTREE'} - - mock_callback_handler = mock.MagicMock() - - def setUp(self): - """Setup test.""" - self.useFixture(base.OpenDaylightRestClientFixture()) - mock.patch.object(wsc.OpenDaylightWebsocketClient, - 'start_odl_websocket_thread').start() - super(TestWebsocketClient, self).setUp() - - self.mgr = wsc.OpenDaylightWebsocketClient.odl_create_websocket( - "http://localhost:8080/", - "restconf/operational/neutron:neutron/hostconfigs", - wsc.ODL_OPERATIONAL_DATASTORE, wsc.ODL_NOTIFICATION_SCOPE_SUBTREE, - TestWebsocketClient.mock_callback_handler - ) - - def _get_raised_response(self, status_code): - response = requests.Response() - response.status_code = status_code - return response - - @classmethod - def _get_mock_request_response(cls, status_code): - response = mock.Mock(status_code=status_code) - response.raise_for_status = mock.Mock() if status_code < 400 else ( - mock.Mock(side_effect=requests.exceptions.HTTPError())) - return response - - @mock.patch.object(odl_client, 'sendjson') - def test_subscribe_websocket_sendjson(self, mocked_sendjson): - request_response = self._get_raised_response(401) - mocked_sendjson.return_value = request_response - stream_url = self.mgr._subscribe_websocket() - self.assertIsNone(stream_url) - - request_response = self._get_raised_response(400) - mocked_sendjson.return_value = request_response - self.assertRaises(ValueError, self.mgr._subscribe_websocket) - - request_response = self._get_mock_request_response(200) - request_response.json = mock.Mock( - return_value=(TestWebsocketClient. - INVALID_WEBSOCKET_STREAM_NAME_DATA)) - mocked_sendjson.return_value = request_response - self.assertRaises(ValueError, self.mgr._subscribe_websocket) - - request_response = self._get_mock_request_response(200) - request_response.json = mock.Mock(return_value={""}) - mocked_sendjson.return_value = request_response - self.assertIsNone(self.mgr._subscribe_websocket()) - - @mock.patch.object(odl_client, 'get') - def test_subscribe_websocket_get(self, mocked_get): - request_response = self._get_raised_response(404) - mocked_get.return_value = request_response - self.assertRaises(ValueError, self.mgr._subscribe_websocket) - - request_response = self._get_raised_response(400) - mocked_get.return_value = request_response - stream_url = self.mgr._subscribe_websocket() - self.assertIsNone(stream_url) - - request_response = self._get_raised_response(401) - mocked_get.return_value = request_response - stream_url = self.mgr._subscribe_websocket() - self.assertIsNone(stream_url) - - @mock.patch.object(odl_client, 'sendjson') - @mock.patch.object(odl_client, 'get') - def test_subscribe_websocket(self, mocked_get, mocked_sendjson): - request_response = self._get_mock_request_response(200) - request_response.json = mock.Mock( - return_value=TestWebsocketClient.FAKE_WEBSOCKET_STREAM_NAME_DATA) - mocked_sendjson.return_value = request_response - - request_response = self._get_mock_request_response(200) - request_response.headers = TestWebsocketClient.FAKE_WEBSOCKET_SUBS_DATA - mocked_get.return_value = request_response - stream_url = self.mgr._subscribe_websocket() - - EXPECTED_OUTPUT = ( - "ws://localhost:8185/" + - "data-change-event-subscription/neutron:neutron/" + - "neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE") - self.assertEqual(EXPECTED_OUTPUT, stream_url) - - @mock.patch.object(websocket, 'create_connection') - def test_create_connection(self, mock_create_connection): - mock_create_connection.return_value = None - return_value = self.mgr._socket_create_connection("localhost") - self.assertIsNone(return_value) - - def test_run_websocket_thread(self): - self.mgr._connect_ws = mock.MagicMock(return_value=None) - cfg.CONF.ml2_odl.restconf_poll_interval = 0 - self.mgr.run_websocket_thread(True) - assert self.mgr._connect_ws.call_count == 1 - - self.mgr.set_exit_flag(False) - self.mgr._connect_ws = mock.MagicMock(return_value=1) - with mock.patch.object(wsc, 'LOG') as mock_log: - self.mgr.run_websocket_thread(True) - self.assertTrue(mock_log.error.called) - - self.mgr.set_exit_flag(False) - ws = mock.MagicMock() - ws.recv.return_value = None - self.mgr._connect_ws = mock.MagicMock(return_value=ws) - self.mgr._close_ws = mock.MagicMock(return_value=None) - with mock.patch.object(wsc, 'LOG') as mock_log: - self.mgr.run_websocket_thread(True) - self.assertTrue(mock_log.warning.called) - - self.mgr.set_exit_flag(False) - ws = mock.MagicMock() - ws.recv.return_value = "Test Data" - self.mgr._connect_ws = mock.MagicMock(return_value=ws) - self.mgr._close_ws = mock.MagicMock(return_value=None) - self.mgr.run_websocket_thread(True) - TestWebsocketClient.mock_callback_handler.assert_called_once() - - -class TestEventDataParser(base.DietTestCase): - """Test class for Websocket Client.""" - - # test data port status payload - sample_port_status_payload = """{"notification": - {"xmlns":"urn:ietf:params:xml:ns:netconf:notification:1.0", - "data-changed-notification": { "xmlns": - "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote", - "data-change-event": - [{"path": - "/neutron:neutron/neutron:ports/neutron:port\ - [neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\ - /neutron:uuid", - "data":{"uuid":{"xmlns":"urn:opendaylight:neutron", - "content":"a51e439f-4d02-4e76-9b0d-08f6c08855dd"}}, - "operation":"created"}, - {"path": - "/neutron:neutron/neutron:ports/neutron:port\ - [neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\ - /neutron:status", - "data":{"status":{"xmlns":"urn:opendaylight:neutron", - "content":"ACTIVE"}}, - "operation":"created"} - ]}, - "eventTime":"2017-03-23T09:28:55.379-07:00"}}""" - - sample_port_status_payload_one_item = """{"notification": - {"xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0", - "data-changed-notification": { - "data-change-event": { - "data": { "status": { - "content": "ACTIVE", - "xmlns": "urn:opendaylight:neutron" - }}, - "operation": "updated", - "path": "/neutron:neutron/neutron:ports/neutron:port\ - [neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28']\ - /neutron:status" - }, - "xmlns": "urn:opendaylight:params:xml:ns:yang:controller:md:\ - sal:remote" - }, - "eventTime": "2017-02-22T02:27:32+02:00" }}""" - - def setUp(self): - """Setup test.""" - super(TestEventDataParser, self).setUp() - - def test_get_item_port_status_payload(self): - sample = jsonutils.loads(self.sample_port_status_payload) - expected_events = (sample - [wsc.EventDataParser.NOTIFICATION_TAG] - [wsc.EventDataParser.DC_NOTIFICATION_TAG] - [wsc.EventDataParser.DC_EVENT_TAG]) - event_0 = expected_events[0] - event = wsc.EventDataParser.get_item(self.sample_port_status_payload) - operation, path, data = next(event).get_fields() - - self.assertEqual(event_0.get('operation'), operation) - self.assertEqual(event_0.get('path'), path) - self.assertEqual(event_0.get('data'), data) - - uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid") - self.assertEqual("'a51e439f-4d02-4e76-9b0d-08f6c08855dd'", uuid) - - uuid = wsc.EventDataParser.extract_field(path, "invalidkey") - self.assertIsNone(uuid) - - def test_get_item_port_status_payload_one_item(self): - sample = jsonutils.loads(self.sample_port_status_payload_one_item) - expected_events = (sample - [wsc.EventDataParser.NOTIFICATION_TAG] - [wsc.EventDataParser.DC_NOTIFICATION_TAG] - [wsc.EventDataParser.DC_EVENT_TAG]) - event = (wsc.EventDataParser. - get_item(self.sample_port_status_payload_one_item)) - operation, path, data = next(event).get_fields() - - self.assertEqual(expected_events.get('operation'), operation) - self.assertEqual(expected_events.get('path'), path) - self.assertEqual(expected_events.get('data'), data) - - uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid") - self.assertEqual("'d6e6335d-9568-4949-aef1-4107e34c5f28'", uuid) diff --git a/networking_odl/tests/unit/db/__init__.py b/networking_odl/tests/unit/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/db/test_db.py b/networking_odl/tests/unit/db/test_db.py deleted file mode 100644 index 7795dc8b0..000000000 --- a/networking_odl/tests/unit/db/test_db.py +++ /dev/null @@ -1,321 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from datetime import timedelta - -import mock - -from sqlalchemy.orm import exc - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.db import models -from networking_odl.tests.unit import test_base_db - - -class DbTestCase(test_base_db.ODLBaseDbTestCase): - - UPDATE_ROW = [odl_const.ODL_NETWORK, 'id', odl_const.ODL_UPDATE, - {'test': 'data'}] - - def setUp(self): - super(DbTestCase, self).setUp() - - def _update_row(self, row): - self.db_session.merge(row) - self.db_session.flush() - - def _test_validate_updates(self, first_entry, second_entry, expected_deps, - state=None): - db.create_pending_row(self.db_session, *first_entry) - if state: - row = db.get_all_db_rows(self.db_session)[0] - row.state = state - self._update_row(row) - - deps = db.get_pending_or_processing_ops( - self.db_session, second_entry[1], second_entry[2]) - self.assertEqual(expected_deps, len(deps) != 0) - - def _test_retry_count(self, retry_num, max_retry, - expected_retry_count, expected_state): - # add new pending row - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - - # update the row with the requested retry_num - row = db.get_all_db_rows(self.db_session)[0] - row.retry_count = retry_num - 1 - db.update_pending_db_row_retry(self.db_session, row, max_retry) - - # validate the state and the retry_count of the row - row = db.get_all_db_rows(self.db_session)[0] - self.assertEqual(expected_state, row.state) - self.assertEqual(expected_retry_count, row.retry_count) - - def _test_update_row_state(self, from_state, to_state): - # add new pending row - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - - row = db.get_all_db_rows(self.db_session)[0] - for state in [from_state, to_state]: - # update the row state - db.update_db_row_state(self.db_session, row, state) - - # validate the new state - row = db.get_all_db_rows(self.db_session)[0] - self.assertEqual(state, row.state) - - def test_updates_same_object_uuid(self): - self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True) - - def test_validate_updates_different_object_uuid(self): - other_row = list(self.UPDATE_ROW) - other_row[1] += 'a' - self._test_validate_updates(self.UPDATE_ROW, other_row, False) - - def test_validate_updates_different_object_type(self): - other_row = list(self.UPDATE_ROW) - other_row[0] = odl_const.ODL_PORT - other_row[1] += 'a' - self._test_validate_updates(self.UPDATE_ROW, other_row, False) - - def test_check_for_older_ops_processing(self): - self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True, - state=odl_const.PROCESSING) - - def test_get_oldest_pending_row_none_when_no_rows(self): - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertIsNone(row) - - def _test_get_oldest_pending_row_none(self, state): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - row = db.get_all_db_rows(self.db_session)[0] - row.state = state - self._update_row(row) - - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertIsNone(row) - - def test_get_oldest_pending_row_none_when_row_processing(self): - self._test_get_oldest_pending_row_none(odl_const.PROCESSING) - - def test_get_oldest_pending_row_none_when_row_failed(self): - self._test_get_oldest_pending_row_none(odl_const.FAILED) - - def test_get_oldest_pending_row_none_when_row_completed(self): - self._test_get_oldest_pending_row_none(odl_const.COMPLETED) - - def test_get_oldest_pending_row(self): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertIsNotNone(row) - self.assertEqual(odl_const.PROCESSING, row.state) - - def test_get_oldest_pending_row_order(self): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - older_row = db.get_all_db_rows(self.db_session)[0] - older_row.last_retried -= timedelta(minutes=1) - self._update_row(older_row) - - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertEqual(older_row, row) - - def test_get_oldest_pending_row_when_conflict(self): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - update_mock = mock.MagicMock( - side_effect=(exc.StaleDataError, mock.DEFAULT)) - - # Mocking is mandatory to achieve a deadlock regardless of the DB - # backend being used when running the tests - with mock.patch.object(db, 'update_db_row_state', new=update_mock): - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertIsNotNone(row) - - self.assertEqual(2, update_mock.call_count) - - def _test_get_oldest_pending_row_with_dep(self, dep_state): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - parent_row = db.get_all_db_rows(self.db_session)[0] - db.update_db_row_state(self.db_session, parent_row, dep_state) - db.create_pending_row(self.db_session, *self.UPDATE_ROW, - depending_on=[parent_row]) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - if row is not None: - self.assertNotEqual(parent_row.seqnum, row.seqnum) - - return row - - def test_get_oldest_pending_row_when_dep_completed(self): - row = self._test_get_oldest_pending_row_with_dep(odl_const.COMPLETED) - self.assertEqual(odl_const.PROCESSING, row.state) - - def test_get_oldest_pending_row_when_dep_failed(self): - row = self._test_get_oldest_pending_row_with_dep(odl_const.FAILED) - self.assertEqual(odl_const.PROCESSING, row.state) - - def test_get_oldest_pending_row_returns_parent_when_dep_pending(self): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - parent_row = db.get_all_db_rows(self.db_session)[0] - db.create_pending_row(self.db_session, *self.UPDATE_ROW, - depending_on=[parent_row]) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertEqual(parent_row, row) - - def test_get_oldest_pending_row_none_when_dep_processing(self): - row = self._test_get_oldest_pending_row_with_dep(odl_const.PROCESSING) - self.assertIsNone(row) - - def _test_delete_rows_by_state_and_time(self, last_retried, row_retention, - state, expected_rows): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - - # update state and last retried - row = db.get_all_db_rows(self.db_session)[0] - row.state = state - row.last_retried = row.last_retried - timedelta(seconds=last_retried) - self._update_row(row) - - db.delete_rows_by_state_and_time(self.db_session, - odl_const.COMPLETED, - timedelta(seconds=row_retention)) - - # validate the number of rows in the journal - rows = db.get_all_db_rows(self.db_session) - self.assertEqual(expected_rows, len(rows)) - - def test_delete_completed_rows_no_new_rows(self): - self._test_delete_rows_by_state_and_time(0, 10, odl_const.COMPLETED, 1) - - def test_delete_completed_rows_one_new_row(self): - self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0) - - def test_delete_completed_rows_wrong_state(self): - self._test_delete_rows_by_state_and_time(10, 8, odl_const.PENDING, 1) - - def test_valid_retry_count(self): - self._test_retry_count(1, 1, 1, odl_const.PENDING) - - def test_invalid_retry_count(self): - self._test_retry_count(2, 1, 1, odl_const.FAILED) - - def test_update_row_state_to_pending(self): - self._test_update_row_state(odl_const.PROCESSING, odl_const.PENDING) - - def test_update_row_state_to_processing(self): - self._test_update_row_state(odl_const.PENDING, odl_const.PROCESSING) - - def test_update_row_state_to_failed(self): - self._test_update_row_state(odl_const.PROCESSING, odl_const.FAILED) - - def test_update_row_state_to_completed(self): - self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED) - - def _test_periodic_task_lock_unlock(self, db_func, existing_state, - expected_state, expected_result, - task='test_task'): - row = models.OpenDaylightPeriodicTask(state=existing_state, - task=task) - self.db_session.add(row) - self.db_session.flush() - - self.assertEqual(expected_result, db_func(self.db_session, - task)) - row = self.db_session.query(models.OpenDaylightPeriodicTask).filter_by( - task=task).one() - - self.assertEqual(expected_state, row['state']) - - def test_lock_periodic_task(self): - self._test_periodic_task_lock_unlock(db.lock_periodic_task, - odl_const.PENDING, - odl_const.PROCESSING, - True) - - def test_lock_periodic_task_fails_when_processing(self): - self._test_periodic_task_lock_unlock(db.lock_periodic_task, - odl_const.PROCESSING, - odl_const.PROCESSING, - False) - - def test_unlock_periodic_task(self): - self._test_periodic_task_lock_unlock(db.unlock_periodic_task, - odl_const.PROCESSING, - odl_const.PENDING, - True) - - def test_unlock_periodic_task_fails_when_pending(self): - self._test_periodic_task_lock_unlock(db.unlock_periodic_task, - odl_const.PENDING, - odl_const.PENDING, - False) - - def test_multiple_row_tasks(self): - self._test_periodic_task_lock_unlock(db.unlock_periodic_task, - odl_const.PENDING, - odl_const.PENDING, - False) - - def _add_tasks(self, tasks): - row = [] - for count, task in enumerate(tasks): - row.append(models.OpenDaylightPeriodicTask(state=odl_const.PENDING, - task=task)) - self.db_session.add(row[count]) - - self.db_session.flush() - - rows = self.db_session.query(models.OpenDaylightPeriodicTask).all() - self.assertEqual(len(tasks), len(rows)) - - def _perform_ops_on_all_rows(self, tasks, to_lock): - if to_lock: - curr_state = odl_const.PENDING - exp_state = odl_const.PROCESSING - func = db.lock_periodic_task - else: - exp_state = odl_const.PENDING - curr_state = odl_const.PROCESSING - func = db.unlock_periodic_task - - processed = [] - for task in tasks: - row = self.db_session.query( - models.OpenDaylightPeriodicTask).filter_by(task=task).one() - - self.assertEqual(row['state'], curr_state) - self.assertTrue(func(self.db_session, task)) - rows = self.db_session.query( - models.OpenDaylightPeriodicTask).filter_by().all() - - processed.append(task) - - for row in rows: - if row['task'] in processed: - self.assertEqual(exp_state, row['state']) - else: - self.assertEqual(curr_state, row['state']) - - self.assertFalse(func(self.db_session, tasks[-1])) - - def test_multiple_row_tasks_lock_unlock(self): - task1 = 'test_random_task' - task2 = 'random_task_random' - task3 = 'task_test_random' - tasks = [task1, task2, task3] - self._add_tasks(tasks) - self._perform_ops_on_all_rows(tasks, to_lock=True) - self._perform_ops_on_all_rows(tasks, to_lock=False) diff --git a/networking_odl/tests/unit/db/test_sqlite.py b/networking_odl/tests/unit/db/test_sqlite.py deleted file mode 100644 index 3ecf77986..000000000 --- a/networking_odl/tests/unit/db/test_sqlite.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2016 Intel Corporation. -# Copyright 2016 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.db import models -from networking_odl.tests.unit import test_base_db - - -class DbTestCase(test_base_db.ODLBaseDbTestCase): - - UPDATE_ROW = [odl_const.ODL_NETWORK, 'id', odl_const.ODL_UPDATE, - {'test': 'data'}] - model = models.OpenDaylightJournal - - def setUp(self): - super(DbTestCase, self).setUp() - - def _create_row(self): - db.create_pending_row(self.db_session, *self.UPDATE_ROW) - rows = db.get_all_db_rows(self.db_session) - self.assertEqual(1, len(rows)) - return rows[0] - - def test_equal_created_at(self): - row = self._create_row() - got = self.db_session.query(self.model).filter_by( - created_at=row.created_at).all() - self.assertEqual(1, len(got)) - - def test_get_by_primary_key_filter(self): - row = self._create_row() - # NOTE(manjeets) as seqnum is primary key so there would be - # exactly one row created. - query = self.db_session.query(models.OpenDaylightJournal) - got = query.filter_by(seqnum=row.seqnum).one() - self.assertEqual(row, got) - - def test_compare_created_at(self): - row = self._create_row() - created_at = row.created_at + datetime.timedelta(minutes=1) - got = self.db_session.query(self.model).filter( - self.model.created_at < created_at).all() - self.assertEqual(1, len(got)) diff --git a/networking_odl/tests/unit/fwaas/__init__.py b/networking_odl/tests/unit/fwaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/fwaas/test_fwaas_odl.py b/networking_odl/tests/unit/fwaas/test_fwaas_odl.py deleted file mode 100644 index 42c3deb27..000000000 --- a/networking_odl/tests/unit/fwaas/test_fwaas_odl.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_fwaas_odl ----------------------------------- - -Tests for the L3 FWaaS plugin for networking-odl. -""" - -from networking_odl.fwaas import driver as fwaas_odl -from networking_odl.tests import base as odl_base - -from neutron.tests import base - - -class TestODL_FWaaS(base.BaseTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestODL_FWaaS, self).setUp() - - def test_init(self): - # just create an instance of OpenDaylightFwaasDriver - fwaas_odl.OpenDaylightFwaasDriver() diff --git a/networking_odl/tests/unit/journal/__init__.py b/networking_odl/tests/unit/journal/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/journal/test_dependency_validations.py b/networking_odl/tests/unit/journal/test_dependency_validations.py deleted file mode 100644 index 03d14bd35..000000000 --- a/networking_odl/tests/unit/journal/test_dependency_validations.py +++ /dev/null @@ -1,405 +0,0 @@ -# -# Copyright (C) 2016 Intel Corp. Isaku Yamahata -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import testscenarios - -from networking_odl.common import constants as const -from networking_odl.db import db -from networking_odl.journal import dependency_validations -from networking_odl.tests.unit import test_base_db - - -load_tests = testscenarios.load_tests_apply_scenarios - - -_NET_ID = 'NET_ID' -_NET_DATA = {'id': _NET_ID} -_SUBNET_ID = 'SUBNET_ID' -_SUBNET_DATA = {'network_id': _NET_ID} -_PORT_ID = 'PORT_ID' -_PORT_DATA = {'network_id': _NET_ID, - 'fixed_ips': [{'subnet_id': _SUBNET_ID}]} -_ROUTER_ID = 'ROUTER_ID' -_ROUTER_DATA = {'id': 'ROUTER_ID', - 'gw_port_id': 'GW_PORT_ID'} -_L2GW_ID = 'l2gw_id' -_L2GW_DATA = {'id': _L2GW_ID} -_L2GWCONN_ID = 'l2gwconn_id' -_L2GWCONN_DATA = {'id': _L2GWCONN_ID, - 'network_id': _NET_ID, - 'gateway_id': _L2GW_ID} -_TRUNK_ID = 'TRUNK_ID' -_SUBPORT_ID = 'CPORT_ID' -_TRUNK_DATA = {'trunk_id': _TRUNK_ID, - 'port_id': _PORT_ID, - 'sub_ports': [{'port_id': _SUBPORT_ID}]} -_BGPVPN_ID = 'BGPVPN_ID' - - -def get_data(res_type, operation): - if res_type == const.ODL_NETWORK: - return _NET_DATA - elif res_type == const.ODL_SUBNET: - if operation == const.ODL_DELETE: - return [_NET_ID] - return _SUBNET_DATA - elif res_type == const.ODL_PORT: - # TODO(yamahata): test case of (ODL_port, ODL_DELETE) is missing - if operation == const.ODL_DELETE: - return [_NET_ID, _SUBNET_ID] - return _PORT_DATA - elif res_type == const.ODL_ROUTER: - return _ROUTER_DATA - elif res_type == const.ODL_L2GATEWAY: - return _L2GW_DATA - elif res_type == const.ODL_L2GATEWAY_CONNECTION: - return _L2GWCONN_DATA - elif res_type == const.ODL_TRUNK: - if operation == const.ODL_DELETE: - return [_PORT_ID, _SUBPORT_ID] - return _TRUNK_DATA - elif res_type == const.ODL_BGPVPN: - if operation == const.ODL_DELETE: - return [_NET_ID, _ROUTER_ID] - else: - routers = [] - networks = [] - if operation == const.ODL_UPDATE: - routers = [_ROUTER_ID] - networks = [_NET_ID] - return {'id': _BGPVPN_ID, 'networks': networks, - 'routers': routers, - 'route_distinguishers': ['100:1']} - return [] - - -def subnet_fail_network_dep(net_op, subnet_op): - return {'expected': True, - 'first_type': const.ODL_NETWORK, - 'first_operation': net_op, - 'first_id': _NET_ID, - 'second_type': const.ODL_SUBNET, - 'second_operation': subnet_op, - 'second_id': _SUBNET_ID} - - -def subnet_succeed_network_dep(net_op, subnet_op): - return {'expected': False, - 'first_type': const.ODL_SUBNET, - 'first_operation': subnet_op, - 'first_id': _SUBNET_ID, - 'second_type': const.ODL_NETWORK, - 'second_operation': net_op, - 'second_id': _NET_ID} - - -# TODO(vthapar) add tests for l2gw dependency validations -class BaseDependencyValidationsTestCase(object): - def test_dependency(self): - db.create_pending_row( - self.db_session, self.first_type, self.first_id, - self.first_operation, - get_data(self.first_type, self.first_operation)) - deps = dependency_validations.calculate( - self.db_session, self.second_operation, self.second_type, - self.second_id, get_data(self.second_type, self.second_operation)) - self.assertEqual(self.expected, len(deps) != 0) - - -class SubnetDependencyValidationsTestCase( - test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): - scenarios = ( - ("subnet_create_depends_on_older_network_create", - subnet_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("subnet_create_depends_on_older_network_update", - subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("subnet_create_depends_on_older_network_delete", - subnet_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("subnet_create_doesnt_depend_on_newer_network_create", - subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("subnet_create_doesnt_depend_on_newer_network_update", - subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("subnet_create_doesnt_depend_on_newer_network_delete", - subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("subnet_update_depends_on_older_network_create", - subnet_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("subnet_update_depends_on_older_network_update", - subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("subnet_update_depends_on_older_network_delete", - subnet_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ("subnet_update_doesnt_depend_on_newer_network_create", - subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("subnet_update_doesnt_depend_on_newer_network_update", - subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("subnet_update_doesnt_depend_on_newer_network_delete", - subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ("subnet_delete_doesnt_depend_on_older_network_create", - subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)), - ("subnet_delete_doesnt_depend_on_older_network_update", - subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)), - ("subnet_delete_doesnt_depend_on_newer_network_create", - subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)), - ("subnet_delete_doesnt_depend_on_newer_network_update", - subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)), - ) - - -def port_fail_network_dep(net_op, port_op): - return {'expected': True, - 'first_type': const.ODL_NETWORK, - 'first_operation': net_op, - 'first_id': _NET_ID, - 'second_type': const.ODL_PORT, - 'second_operation': port_op, - 'second_id': _PORT_ID} - - -def port_succeed_network_dep(net_op, port_op): - return {'expected': False, - 'first_type': const.ODL_PORT, - 'first_operation': port_op, - 'first_id': _PORT_ID, - 'second_type': const.ODL_NETWORK, - 'second_operation': net_op, - 'second_id': _NET_ID} - - -def port_fail_subnet_dep(subnet_op, port_op): - return {'expected': True, - 'first_type': const.ODL_SUBNET, - 'first_operation': subnet_op, - 'first_id': _SUBNET_ID, - 'second_type': const.ODL_PORT, - 'second_operation': port_op, - 'second_id': _PORT_ID} - - -def port_succeed_subnet_dep(subnet_op, port_op): - return {'expected': False, - 'first_type': const.ODL_PORT, - 'first_operation': port_op, - 'first_id': _PORT_ID, - 'second_type': const.ODL_SUBNET, - 'second_operation': subnet_op, - 'second_id': _SUBNET_ID} - - -class PortDependencyValidationsTestCase( - test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): - scenarios = ( - ("port_create_depends_on_older_network_create", - port_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("port_create_depends_on_older_network_update", - port_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("port_create_depends_on_older_network_delete", - port_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_network_create", - port_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_network_update", - port_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_network_delete", - port_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("port_update_depends_on_older_network_create", - port_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("port_update_depends_on_older_network_update", - port_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("port_update_depends_on_older_network_delete", - port_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_network_create", - port_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_network_update", - port_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_network_delete", - port_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ("port_create_depends_on_older_subnet_create", - port_fail_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("port_create_depends_on_older_subnet_update", - port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("port_create_depends_on_older_subnet_delete", - port_fail_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_subnet_create", - port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_subnet_update", - port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)), - ("port_create_doesnt_depend_on_newer_subnet_delete", - port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)), - ("port_update_depends_on_older_subnet_create", - port_fail_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("port_update_depends_on_older_subnet_update", - port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("port_update_depends_on_older_subnet_delete", - port_fail_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_subnet_create", - port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_subnet_update", - port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)), - ("port_update_doesnt_depend_on_newer_subnet_delete", - port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)), - ) - - -def trunk_dep(first_type, second_type, first_op, second_op, result, - sub_port=False): - expected = {'fail': True, 'pass': False} - port_id = _SUBPORT_ID if sub_port else _PORT_ID - type_id = {const.ODL_PORT: port_id, - const.ODL_TRUNK: _TRUNK_ID} - return {'expected': expected[result], - 'first_type': first_type, - 'first_operation': first_op, - 'first_id': type_id[first_type], - 'second_type': second_type, - 'second_operation': second_op, - 'second_id': type_id[second_type]} - - -class TrunkDependencyValidationsTestCase( - test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): - scenarios = ( - ("trunk_create_depends_on_older_port_create", - trunk_dep(const.ODL_PORT, const.ODL_TRUNK, - const.ODL_CREATE, const.ODL_CREATE, 'fail')), - ("trunk_create_doesnt_depend_on_newer_port_create", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("trunk_create_doesnt_depend_on_port_update", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_CREATE, const.ODL_UPDATE, 'pass')), - ("trunk_create_doesnt_depend_on_newer_port_delete", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_CREATE, const.ODL_DELETE, 'pass')), - # TODO(vthapar): add more/better validations for subport - # trunk update means subport add/delete - ("trunk_update_depends_on_older_trunk_create", - trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, - const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)), - ("trunk_update_depends_on_older_port_create", - trunk_dep(const.ODL_PORT, const.ODL_TRUNK, - const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)), - ("trunk_update_doesnt_depend_on_newer_port_create", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_UPDATE, const.ODL_CREATE, 'pass', True)), - ("trunk_update_doesnt_depend_on_port_update", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_UPDATE, const.ODL_UPDATE, 'pass', True)), - ("trunk_update_doesnt_depend_on_newer_port_delete", - trunk_dep(const.ODL_TRUNK, const.ODL_PORT, - const.ODL_UPDATE, const.ODL_DELETE, 'pass', True)), - # trunk delete cases - ("trunk_delete_depends_on_older_trunk_create", - trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, - const.ODL_CREATE, const.ODL_DELETE, 'fail', True)), - ("trunk_delete_depends_on_older_trunk_update", - trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, - const.ODL_UPDATE, const.ODL_DELETE, 'fail', True)), - ("trunk_delete_doesnt_depend_on_older_port_create", - trunk_dep(const.ODL_PORT, const.ODL_TRUNK, - const.ODL_CREATE, const.ODL_DELETE, 'pass')), - ) - - -def l2gw_dep(first_type, second_type, first_op, second_op, result): - expected = {'fail': True, 'pass': False} - type_id = {const.ODL_NETWORK: _NET_ID, - const.ODL_L2GATEWAY: _L2GW_ID, - const.ODL_L2GATEWAY_CONNECTION: _L2GWCONN_ID} - return {'expected': expected[result], - 'first_type': first_type, - 'first_operation': first_op, - 'first_id': type_id[first_type], - 'second_type': second_type, - 'second_operation': second_op, - 'second_id': type_id[second_type]} - - -class L2GWDependencyValidationsTestCase( - test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): - scenarios = ( - ("L2GWConn_create_depends_on_older_network_create", - l2gw_dep(const.ODL_NETWORK, const.ODL_L2GATEWAY_CONNECTION, - const.ODL_CREATE, const.ODL_CREATE, 'fail')), - ("L2GWConn_create_depends_on_older_L2GW_create", - l2gw_dep(const.ODL_L2GATEWAY, const.ODL_L2GATEWAY_CONNECTION, - const.ODL_CREATE, const.ODL_CREATE, 'fail')), - ("L2GWConn_create_doesnt_depend_on_newer_network_create", - l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_NETWORK, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("L2GWConn_create_doesnt_depend_on_newer_L2GW_create", - l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_L2GATEWAY, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ) - - -# TODO(vthapar): Refactor *_dep into a common method -def bgpvpn_dep(first_type, second_type, first_op, second_op, result): - expected = {'fail': True, 'pass': False} - type_id = {const.ODL_NETWORK: _NET_ID, - const.ODL_ROUTER: _ROUTER_ID, - const.ODL_BGPVPN: _BGPVPN_ID} - return {'expected': expected[result], - 'first_type': first_type, - 'first_operation': first_op, - 'first_id': type_id[first_type], - 'second_type': second_type, - 'second_operation': second_op, - 'second_id': type_id[second_type]} - - -class BGPVPNDependencyValidationsTestCase( - test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): - scenarios = ( - ("bgpvpn_create_doesnt_depend_on_older_network_create", - bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("bgpvpn_create_doesnt_depend_on_newer_network_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("bgpvpn_create_doesnt_depend_on_older_router_create", - bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("bgpvpn_create_doesnt_depend_on_newer_router_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER, - const.ODL_CREATE, const.ODL_CREATE, 'pass')), - ("bgpvpn_update_depends_on_older_bgpvpn_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_UPDATE, 'fail')), - ("bgpvpn_update_depends_on_older_network_create", - bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_UPDATE, 'fail')), - ("bgpvpn_update_doesnt_depend_on_newer_network_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK, - const.ODL_UPDATE, const.ODL_CREATE, 'pass')), - ("bgpvpn_update_depends_on_older_router_create", - bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_UPDATE, 'fail')), - ("bgpvpn_update_doesnt_depend_on_newer_router_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER, - const.ODL_UPDATE, const.ODL_CREATE, 'pass')), - # bgpvpn delete cases - ("bgpvpn_delete_depends_on_older_bgpvpn_create", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_DELETE, 'fail')), - ("bgpvpn_delete_depends_on_older_bgpvpn_update", - bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, - const.ODL_UPDATE, const.ODL_DELETE, 'fail')), - ("bgpvpn_delete_doesnt_depend_on_older_network_create", - bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_DELETE, 'pass')), - ("bgpvpn_delete_doesnt_depend_on_older_router_create", - bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, - const.ODL_CREATE, const.ODL_DELETE, 'pass')), - ) diff --git a/networking_odl/tests/unit/journal/test_full_sync.py b/networking_odl/tests/unit/journal/test_full_sync.py deleted file mode 100644 index 940a853a8..000000000 --- a/networking_odl/tests/unit/journal/test_full_sync.py +++ /dev/null @@ -1,241 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import requests - -from networking_l2gw.services.l2gateway.common import constants as l2gw_const -from networking_sfc.extensions import flowclassifier as fc_const -from networking_sfc.extensions import sfc as sfc_const -from neutron.services.trunk import constants as t_consts -from neutron_lib.api.definitions import bgpvpn as bgpvpn_const -from neutron_lib.plugins import constants -from neutron_lib.plugins import directory - -from networking_odl.bgpvpn import odl_v2 as bgpvpn_driver -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.journal import full_sync -from networking_odl.journal import journal -from networking_odl.l2gateway import driver_v2 as l2gw_driver -from networking_odl.l3 import l3_odl_v2 -from networking_odl.lbaas import lbaasv2_driver_v2 as lbaas_driver -from networking_odl.ml2 import mech_driver_v2 -from networking_odl.qos import qos_driver_v2 as qos_driver -from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2 -from networking_odl.sfc import sfc_driver_v2 as sfc_driver -from networking_odl.tests import base -from networking_odl.tests.unit import test_base_db -from networking_odl.trunk import trunk_driver_v2 as trunk_driver - - -class FullSyncTestCase(test_base_db.ODLBaseDbTestCase): - def setUp(self): - self.useFixture( - base.OpenDaylightRestClientGlobalFixture(full_sync._CLIENT)) - super(FullSyncTestCase, self).setUp() - - self._CLIENT = full_sync._CLIENT.get_client() - - for plugin_name in self._get_all_resources(): - mocked = mock.MagicMock() - if plugin_name == constants.CORE: - self.plugin = mocked - elif plugin_name == constants.L3: - self.l3_plugin = mocked - - directory.add_plugin(plugin_name, mocked) - - self.addCleanup(self.clean_registered_resources) - - @staticmethod - def _get_all_resources(): - return { - constants.L3: l3_odl_v2.L3_RESOURCES, - constants.CORE: mech_driver_v2.L2_RESOURCES, - constants.LOADBALANCERV2: lbaas_driver.LBAAS_RESOURCES, - t_consts.TRUNK: trunk_driver.TRUNK_RESOURCES, - constants.QOS: qos_driver.QOS_RESOURCES, - sfc_const.SFC_EXT: sfc_driver.SFC_RESOURCES, - bgpvpn_const.LABEL: bgpvpn_driver.BGPVPN_RESOURCES, - fc_const.FLOW_CLASSIFIER_EXT: - sfc_flowclassifier_v2.SFC_FC_RESOURCES, - l2gw_const.L2GW: l2gw_driver.L2GW_RESOURCES, - } - - @staticmethod - def clean_registered_resources(): - full_sync.ALL_RESOURCES = {} - - def test_no_full_sync_when_canary_exists(self): - full_sync.full_sync(self.db_session) - self.assertEqual([], db.get_all_db_rows(self.db_session)) - - def _mock_l2_resources(self): - expected_journal = {odl_const.ODL_NETWORK: '1', - odl_const.ODL_SUBNET: '2', - odl_const.ODL_PORT: '3'} - network_id = expected_journal[odl_const.ODL_NETWORK] - self.plugin.get_networks.return_value = [{'id': network_id}] - self.plugin.get_subnets.return_value = [ - {'id': expected_journal[odl_const.ODL_SUBNET], - 'network_id': network_id}] - port = {'id': expected_journal[odl_const.ODL_PORT], - odl_const.ODL_SGS: None, - 'tenant_id': '123', - 'fixed_ips': [], - 'network_id': network_id} - self.plugin.get_ports.side_effect = ([port], []) - return expected_journal - - def _filter_out_canary(self, rows): - return [row for row in rows if row['object_uuid'] != - full_sync._CANARY_NETWORK_ID] - - def _test_no_full_sync_when_canary_in_journal(self, state): - self._mock_canary_missing() - self._mock_l2_resources() - db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, - full_sync._CANARY_NETWORK_ID, - odl_const.ODL_CREATE, {}) - row = db.get_all_db_rows(self.db_session)[0] - db.update_db_row_state(self.db_session, row, state) - - full_sync.full_sync(self.db_session) - - rows = db.get_all_db_rows(self.db_session) - self.assertEqual([], self._filter_out_canary(rows)) - - def test_no_full_sync_when_canary_pending_creation(self): - self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING) - - def test_no_full_sync_when_canary_is_processing(self): - self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING) - - @mock.patch.object(db, 'delete_pending_rows') - @mock.patch.object(full_sync, '_full_sync_needed') - @mock.patch.object(full_sync, '_sync_resources') - @mock.patch.object(journal, 'record') - def test_sync_resource_order( - self, record_mock, _sync_resources_mock, _full_sync_needed_mock, - delete_pending_rows_mock): - - full_sync.ALL_RESOURCES = self._get_all_resources() - _full_sync_needed_mock._full_sync_needed.return_value = True - session = mock.MagicMock() - full_sync.full_sync(session) - - _sync_resources_mock.assert_has_calls( - [mock.call(session, mock.ANY, mock.ANY, - object_type, collection_name) - for (object_type, collection_name) in [ - (odl_const.ODL_SG, odl_const.ODL_SGS), - (odl_const.ODL_SG_RULE, odl_const.ODL_SG_RULES), - (odl_const.ODL_NETWORK, odl_const.ODL_NETWORKS), - (odl_const.ODL_SUBNET, odl_const.ODL_SUBNETS), - (odl_const.ODL_ROUTER, odl_const.ODL_ROUTERS), - (odl_const.ODL_PORT, odl_const.ODL_PORTS), - (odl_const.ODL_FLOATINGIP, odl_const.ODL_FLOATINGIPS), - (odl_const.ODL_LOADBALANCER, odl_const.ODL_LOADBALANCERS), - (odl_const.ODL_LISTENER, odl_const.ODL_LISTENERS), - (odl_const.ODL_POOL, odl_const.ODL_POOLS), - (odl_const.ODL_MEMBER, odl_const.ODL_MEMBERS), - (odl_const.ODL_HEALTHMONITOR, - odl_const.ODL_HEALTHMONITORS), - (odl_const.ODL_QOS_POLICY, odl_const.ODL_QOS_POLICIES), - (odl_const.ODL_TRUNK, odl_const.ODL_TRUNKS), - (odl_const.ODL_BGPVPN, odl_const.ODL_BGPVPNS), - (odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, - odl_const.ODL_BGPVPN_NETWORK_ASSOCIATIONS), - (odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, - odl_const.ODL_BGPVPN_ROUTER_ASSOCIATIONS), - (odl_const.ODL_SFC_FLOW_CLASSIFIER, - odl_const.ODL_SFC_FLOW_CLASSIFIERS), - (odl_const.ODL_SFC_PORT_PAIR, - odl_const.ODL_SFC_PORT_PAIRS), - (odl_const.ODL_SFC_PORT_PAIR_GROUP, - odl_const.ODL_SFC_PORT_PAIR_GROUPS), - (odl_const.ODL_SFC_PORT_CHAIN, - odl_const.ODL_SFC_PORT_CHAINS), - (odl_const.ODL_L2GATEWAY, odl_const.ODL_L2GATEWAYS), - (odl_const.ODL_L2GATEWAY_CONNECTION, - odl_const.ODL_L2GATEWAY_CONNECTIONS)]]) - - def test_client_error_propagates(self): - class TestException(Exception): - def __init__(self): - pass - - self._CLIENT.get.side_effect = TestException() - self.assertRaises(TestException, full_sync.full_sync, self.db_session) - - def _mock_canary_missing(self): - get_return = mock.MagicMock() - get_return.status_code = requests.codes.not_found - self._CLIENT.get.return_value = get_return - - def _assert_canary_created(self): - rows = db.get_all_db_rows(self.db_session) - self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID - for r in rows)) - return rows - - def _test_full_sync_resources(self, expected_journal): - self._mock_canary_missing() - - full_sync.full_sync(self.db_session) - - rows = self._assert_canary_created() - rows = self._filter_out_canary(rows) - self.assertItemsEqual(expected_journal.keys(), - [row['object_type'] for row in rows]) - for row in rows: - self.assertEqual(expected_journal[row['object_type']], - row['object_uuid']) - - def test_full_sync_removes_pending_rows(self): - db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, "uuid", - odl_const.ODL_CREATE, {'foo': 'bar'}) - self._test_full_sync_resources({}) - - def test_full_sync_no_resources(self): - self._test_full_sync_resources({}) - - def test_full_sync_l2_resources(self): - full_sync.ALL_RESOURCES = {constants.CORE: mech_driver_v2.L2_RESOURCES} - self._test_full_sync_resources(self._mock_l2_resources()) - - def _mock_router_port(self, port_id): - router_port = {'id': port_id, - 'device_id': '1', - 'tenant_id': '1', - 'fixed_ips': [{'subnet_id': '1'}]} - self.plugin.get_ports.side_effect = ([], [router_port]) - - def _mock_l3_resources(self): - expected_journal = {odl_const.ODL_ROUTER: '1', - odl_const.ODL_FLOATINGIP: '2'} - self.l3_plugin.get_routers.return_value = [ - {'id': expected_journal[odl_const.ODL_ROUTER], - 'gw_port_id': None}] - self.l3_plugin.get_floatingips.return_value = [ - {'id': expected_journal[odl_const.ODL_FLOATINGIP]}] - - return expected_journal - - def test_full_sync_l3_resources(self): - full_sync.ALL_RESOURCES = {constants.L3: l3_odl_v2.L3_RESOURCES} - self._test_full_sync_resources(self._mock_l3_resources()) diff --git a/networking_odl/tests/unit/journal/test_journal.py b/networking_odl/tests/unit/journal/test_journal.py deleted file mode 100644 index d4a8ca5f7..000000000 --- a/networking_odl/tests/unit/journal/test_journal.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2017 NEC Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from networking_odl.common import constants as odl_const -from networking_odl.db import models -from networking_odl.journal import journal -from networking_odl.tests.unit import base_v2 - - -class OpenDaylightJournalThreadTest(base_v2.OpenDaylightTestCase): - def setUp(self): - super(OpenDaylightJournalThreadTest, self).setUp() - self.journal = journal.OpenDaylightJournalThread() - self.addCleanup(self.cleanup) - - @staticmethod - def cleanup(): - journal.MAKE_URL.clear() - - def test_json_data(self): - object_type = 'testobject' - data = 'testdata' - row = models.OpenDaylightJournal(object_type=object_type, - object_uuid=uuidutils.generate_uuid(), - operation=odl_const.ODL_CREATE, - data=data) - - self.assertEqual("%ss" % object_type, self.journal._json_data(row)[1]) - - def test_json_data_customized_url(self): - object_type = 'randomtestobject' - data = 'testdata' - journal.register_url_builder(object_type, lambda row: row.object_type) - row = models.OpenDaylightJournal(object_type=object_type, - object_uuid=uuidutils.generate_uuid(), - operation=odl_const.ODL_CREATE, - data=data) - - url_param = self.journal._json_data(row) - self.assertEqual(object_type, url_param[1]) diff --git a/networking_odl/tests/unit/journal/test_periodic_task.py b/networking_odl/tests/unit/journal/test_periodic_task.py deleted file mode 100644 index 3bba4c3c4..000000000 --- a/networking_odl/tests/unit/journal/test_periodic_task.py +++ /dev/null @@ -1,139 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import threading -import time - -from neutron.db import api as neutron_db_api - -import mock - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.db import models -from networking_odl.journal import periodic_task -from networking_odl.tests.unit import test_base_db - - -class PeriodicTaskThreadTestCase(test_base_db.ODLBaseDbTestCase): - def setUp(self): - super(PeriodicTaskThreadTestCase, self).setUp() - row = models.OpenDaylightPeriodicTask(task='test-maintenance', - state=odl_const.PENDING) - self.db_session.add(row) - self.db_session.flush() - - self.thread = periodic_task.PeriodicTask('test-maintenance') - self.thread.interval = 0.01 - self.addCleanup(self.thread.cleanup) - - def test__execute_op_no_exception(self): - with mock.patch.object(periodic_task, 'LOG') as mock_log: - operation = mock.MagicMock() - operation.__name__ = "test" - self.thread.register_operation(operation) - db_session = neutron_db_api.get_reader_session() - self.thread._execute_op(operation, db_session) - operation.assert_called() - mock_log.info.assert_called() - mock_log.exception.assert_not_called() - - def test__execute_op_with_exception(self): - with mock.patch.object(periodic_task, 'LOG') as mock_log: - operation = mock.MagicMock(side_effect=Exception()) - operation.__name__ = "test" - db_session = neutron_db_api.get_reader_session() - self.thread._execute_op(operation, db_session) - mock_log.exception.assert_called() - - def test_thread_works(self): - callback_event = threading.Event() - count = [0] - - def callback_op(**kwargs): - count[0] += 1 - - # The following should be true on the second call, so we're making - # sure that the thread runs more than once. - if count[0] > 1: - callback_event.set() - - self.thread.register_operation(callback_op) - self.thread.start() - - # Make sure the callback event was called and not timed out - self.assertTrue(callback_event.wait(timeout=5)) - - def test_thread_continues_after_exception(self): - exception_event = threading.Event() - callback_event = threading.Event() - - def exception_op(**kwargs): - if not exception_event.is_set(): - exception_event.set() - raise Exception() - - def callback_op(**kwargs): - callback_event.set() - - for op in [exception_op, callback_op]: - self.thread.register_operation(op) - - self.thread.start() - - # Make sure the callback event was called and not timed out - self.assertTrue(callback_event.wait(timeout=5)) - - def test_multiple_thread_work(self): - self.thread1 = periodic_task.PeriodicTask('test-maintenance1') - callback_event = threading.Event() - callback_event1 = threading.Event() - self.thread1.interval = 0.01 - self.addCleanup(self.thread1.cleanup) - - def callback_op(**kwargs): - callback_event.set() - - def callback_op1(**kwargs): - callback_event1.set() - - self.thread.register_operation(callback_op) - self.thread.register_operation(callback_op1) - self.thread.start() - self.assertTrue(callback_event.wait(timeout=5)) - - self.thread1.start() - self.assertTrue(callback_event1.wait(timeout=5)) - - @mock.patch.object(db, "was_periodic_task_executed_recently") - def test_back_to_back_job(self, mock_status_method): - callback_event = threading.Event() - - def callback_op(**kwargs): - callback_event.set() - - self.thread.register_operation(callback_op) - msg = ("Periodic %s task executed after periodic " - "interval Skipping execution.") - with mock.patch.object(periodic_task.LOG, 'info') as mock_log_info: - mock_status_method.return_value = True - self.thread.start() - time.sleep(1) - mock_log_info.assert_called_with(msg, 'test-maintenance') - self.assertFalse(callback_event.wait(timeout=1)) - mock_log_info.assert_called_with(msg, 'test-maintenance') - mock_status_method.return_value = False - self.assertTrue(callback_event.wait(timeout=2)) diff --git a/networking_odl/tests/unit/journal/test_recovery.py b/networking_odl/tests/unit/journal/test_recovery.py deleted file mode 100644 index ed25ff330..000000000 --- a/networking_odl/tests/unit/journal/test_recovery.py +++ /dev/null @@ -1,170 +0,0 @@ -# -# Copyright (C) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -from neutron.db import api as neutron_db_api -from neutron.tests.unit.testlib_api import SqlTestCaseLight -from neutron_lib import exceptions as nexc -from neutron_lib.plugins import constants as plugin_constants -from neutron_lib.plugins import directory - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.db import models -from networking_odl.journal import full_sync -from networking_odl.journal import recovery -from networking_odl.l3 import l3_odl_v2 -from networking_odl.ml2 import mech_driver_v2 -from networking_odl.tests import base - - -class RecoveryTestCase(SqlTestCaseLight): - def setUp(self): - self.useFixture( - base.OpenDaylightRestClientGlobalFixture(recovery._CLIENT)) - super(RecoveryTestCase, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self._CLIENT = recovery._CLIENT.get_client() - self.addCleanup(self._db_cleanup) - self.addCleanup(self.clean_registered_resources) - - def _db_cleanup(self): - self.db_session.query(models.OpenDaylightJournal).delete() - - @staticmethod - def clean_registered_resources(): - full_sync.ALL_RESOURCES = {} - - def _mock_resource(self, plugin, resource_type): - mock_resource = mock.MagicMock() - get_func = getattr(plugin, 'get_{}'.format(resource_type)) - get_func.return_value = mock_resource - return mock_resource - - def _mock_row(self, resource_type): - return mock.MagicMock(object_type=resource_type) - - def _test__get_latest_resource(self, plugin, resource_type): - l2 = mech_driver_v2.L2_RESOURCES - full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2 - mock_resource = self._mock_resource(plugin, resource_type) - mock_row = self._mock_row(resource_type) - - resource = recovery._get_latest_resource(mock_row) - self.assertEqual(mock_resource, resource) - - @mock.patch.object(directory, 'get_plugin') - def test__get_latest_resource_l2(self, plugin_mock): - for resource_type in mech_driver_v2.L2_RESOURCES: - plugin = plugin_mock.return_value - self._test__get_latest_resource(plugin, resource_type) - - @mock.patch.object(directory, 'get_plugin') - def test__get_latest_resource_l3(self, plugin_mock): - full_sync.ALL_RESOURCES[plugin_constants.L3] = l3_odl_v2.L3_RESOURCES - for resource_type in l3_odl_v2.L3_RESOURCES: - plugin = plugin_mock.return_value - self._test__get_latest_resource(plugin, resource_type) - - def test__get_latest_resource_unsupported(self): - mock_row = self._mock_row('aaa') - self.assertRaises( - recovery.UnsupportedResourceType, recovery._get_latest_resource, - mock_row) - - @mock.patch.object(directory, 'get_plugin') - def test__get_latest_resource_none(self, plugin_mock): - plugin_mock.return_value.get_network.side_effect = nexc.NotFound() - l2 = mech_driver_v2.L2_RESOURCES - full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2 - - mock_row = self._mock_row(odl_const.ODL_NETWORK) - self.assertRaises( - nexc.NotFound, recovery._get_latest_resource, mock_row) - - def test_journal_recovery_no_rows(self): - recovery.journal_recovery(self.db_session) - self.assertFalse(self._CLIENT.get_resource.called) - - def _test_recovery(self, operation, odl_resource, expected_state): - db.create_pending_row( - self.db_session, odl_const.ODL_NETWORK, 'id', operation, {}) - created_row = db.get_all_db_rows(self.db_session)[0] - db.update_db_row_state(self.db_session, created_row, odl_const.FAILED) - - self._CLIENT.get_resource.return_value = odl_resource - - recovery.journal_recovery(self.db_session) - - row = db.get_all_db_rows_by_state(self.db_session, expected_state)[0] - self.assertEqual(created_row['seqnum'], row['seqnum']) - return created_row - - def test_journal_recovery_hadles_failure_quietly(self): - self._CLIENT.get_resource.side_effect = Exception('') - self._test_recovery( - odl_const.ODL_DELETE, None, odl_const.FAILED) - - def test_journal_recovery_deleted_row_not_in_odl(self): - self._test_recovery(odl_const.ODL_DELETE, None, odl_const.COMPLETED) - - def test_journal_recovery_created_row_exists_in_odl(self): - self._test_recovery(odl_const.ODL_CREATE, {}, odl_const.COMPLETED) - - def test_journal_recovery_deleted_row_exists_in_odl(self): - self._test_recovery(odl_const.ODL_DELETE, {}, odl_const.PENDING) - - @mock.patch.object(recovery, '_get_latest_resource') - def _test_recovery_creates_operation( - self, operation, resource, odl_resource, expected_operation, - recovery_mock): - if resource is not None: - recovery_mock.return_value = resource - else: - recovery_mock.side_effect = nexc.NotFound - original_row = self._test_recovery( - operation, odl_resource, odl_const.COMPLETED) - - pending_row = db.get_all_db_rows_by_state( - self.db_session, odl_const.PENDING)[0] - self.assertEqual(expected_operation, pending_row['operation']) - self.assertEqual(original_row['object_type'], - pending_row['object_type']) - self.assertEqual(original_row['object_uuid'], - pending_row['object_uuid']) - - def test_recovery_created_row_not_in_odl(self): - self._test_recovery_creates_operation( - odl_const.ODL_CREATE, {}, None, odl_const.ODL_CREATE) - - def test_recovery_updated_row_not_in_odl(self): - self._test_recovery_creates_operation( - odl_const.ODL_UPDATE, {}, None, odl_const.ODL_CREATE) - - def test_recovery_updated_resource_missing_but_exists_in_odl(self): - self._test_recovery_creates_operation( - odl_const.ODL_UPDATE, None, {}, odl_const.ODL_DELETE) - - @mock.patch.object(recovery, '_get_latest_resource') - def test_recovery_created_resource_missing_and_not_in_odl(self, rmock): - rmock.side_effect = nexc.NotFound - self._test_recovery(odl_const.ODL_CREATE, None, odl_const.COMPLETED) - - @mock.patch.object(recovery, '_get_latest_resource') - def test_recovery_updated_resource_missing_and_not_in_odl(self, rmock): - rmock.side_effect = nexc.NotFound - self._test_recovery(odl_const.ODL_UPDATE, None, odl_const.COMPLETED) diff --git a/networking_odl/tests/unit/l2gateway/__init__.py b/networking_odl/tests/unit/l2gateway/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/l2gateway/test_driver.py b/networking_odl/tests/unit/l2gateway/test_driver.py deleted file mode 100644 index dd9ceba99..000000000 --- a/networking_odl/tests/unit/l2gateway/test_driver.py +++ /dev/null @@ -1,128 +0,0 @@ -# -# Copyright (C) 2016 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy - -import mock - -from networking_odl.l2gateway import driver -from neutron.tests import base - - -class TestOpenDaylightL2gwDriver(base.DietTestCase): - - def setUp(self): - self.mocked_odlclient = mock.patch( - 'networking_odl.common.client' - '.OpenDaylightRestClient.create_client').start().return_value - self.driver = driver.OpenDaylightL2gwDriver(service_plugin=None, - validator=None) - super(TestOpenDaylightL2gwDriver, self).setUp() - - def _get_fake_l2_gateway(self): - fake_l2_gateway_id = "5227c228-6bba-4bbe-bdb8-6942768ff0f1" - fake_l2_gateway = { - "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", - "id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1", - "name": "test-gateway", - "devices": [ - { - "device_name": "switch1", - "interfaces": [ - { - "name": "port1", - "segmentation_id": [100] - }, - { - "name": "port2", - "segmentation_id": [151, 152] - } - ] - }, - { - "device_name": "switch2", - "interfaces": [ - { - "name": "port5", - "segmentation_id": [200] - }, - { - "name": "port6", - "segmentation_id": [251, 252] - } - ] - } - ] - } - return fake_l2_gateway_id, fake_l2_gateway - - def _get_fake_l2_gateway_connection(self): - fake_l2_gateway_connection_id = "5227c228-6bba-4bbe-bdb8-6942768ff02f" - fake_l2_gateway_connection = { - "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", - "id": "5227c228-6bba-4bbe-bdb8-6942768ff02f", - "network_id": "be0a7495-05c4-4be0-b796-1412835c6830", - "default_segmentation_id": 77, - "l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1" - } - return fake_l2_gateway_connection_id, fake_l2_gateway_connection - - def test_create_l2_gateway_postcommit(self): - mocked_sendjson = self.mocked_odlclient.sendjson - fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway() - expected = {"l2_gateway": fake_l2gateway} - self.driver.create_l2_gateway_postcommit(mock.ANY, fake_l2gateway) - mocked_sendjson.assert_called_once_with('post', driver.L2GATEWAYS, - expected) - - def test_delete_l2_gateway_postcommit(self): - mocked_trydelete = self.mocked_odlclient.try_delete - fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway() - self.driver.delete_l2_gateway_postcommit(mock.ANY, fake_l2gateway_id) - url = driver.L2GATEWAYS + '/' + fake_l2gateway_id - mocked_trydelete.assert_called_once_with(url) - - def test_update_l2_gateway_postcommit(self): - mocked_sendjson = self.mocked_odlclient.sendjson - fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway() - expected = {"l2_gateway": fake_l2gateway} - self.driver.update_l2_gateway_postcommit(mock.ANY, fake_l2gateway) - url = driver.L2GATEWAYS + '/' + fake_l2gateway_id - mocked_sendjson.assert_called_once_with('put', url, expected) - - def test_create_l2_gateway_connection_postcommit(self): - mocked_sendjson = self.mocked_odlclient.sendjson - (fake_l2gateway_conn_id, - fake_l2gateway_conn) = self._get_fake_l2_gateway_connection() - expected_l2gateway_conn = copy.deepcopy(fake_l2gateway_conn) - expected_l2gateway_conn['gateway_id'] = ( - fake_l2gateway_conn['l2_gateway_id']) - expected_l2gateway_conn.pop('l2_gateway_id') - expected = {"l2gateway_connection": expected_l2gateway_conn} - self.driver.create_l2_gateway_connection_postcommit( - mock.ANY, fake_l2gateway_conn) - mocked_sendjson.assert_called_once_with('post', - driver.L2GATEWAY_CONNECTIONS, - expected) - - def test_delete_l2_gateway_connection_postcommit(self): - mocked_trydelete = self.mocked_odlclient.try_delete - (fake_l2gateway_conn_id, - fake_l2gateway_conn) = self._get_fake_l2_gateway_connection() - url = driver.L2GATEWAY_CONNECTIONS + '/' + fake_l2gateway_conn_id - self.driver.delete_l2_gateway_connection_postcommit( - mock.ANY, fake_l2gateway_conn_id) - mocked_trydelete.assert_called_once_with(url) diff --git a/networking_odl/tests/unit/l2gateway/test_driver_v2.py b/networking_odl/tests/unit/l2gateway/test_driver_v2.py deleted file mode 100644 index 7afdd13d6..000000000 --- a/networking_odl/tests/unit/l2gateway/test_driver_v2.py +++ /dev/null @@ -1,147 +0,0 @@ -# -# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -from neutron.db import api as neutron_db_api - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.l2gateway import driver_v2 as driverv2 -from networking_odl.tests.unit import base_v2 - - -class OpenDaylightL2GWDriverTestCase(base_v2.OpenDaylightConfigBase): - - def setUp(self): - super(OpenDaylightL2GWDriverTestCase, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.driver = driverv2.OpenDaylightL2gwDriver(service_plugin=None) - self.context = self._get_mock_context() - - def _get_mock_context(self): - context = mock.Mock() - context.session = self.db_session - return context - - def _get_fake_l2_gateway(self): - fake_l2_gateway = { - "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", - "id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1", - "name": "test-gateway", - "devices": [ - { - "device_name": "switch1", - "interfaces": [ - { - "name": "port1", - "segmentation_id": [100] - }, - { - "name": "port2", - "segmentation_id": [151, 152] - } - ] - }, - { - "device_name": "switch2", - "interfaces": [ - { - "name": "port5", - "segmentation_id": [200] - }, - { - "name": "port6", - "segmentation_id": [251, 252] - } - ] - } - ] - } - return fake_l2_gateway - - def _get_fake_l2_gateway_connection(self): - fake_l2_gateway_connection = { - "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", - "id": "5227c228-6bba-4bbe-bdb8-6942768ff02f", - "network_id": "be0a7495-05c4-4be0-b796-1412835c6830", - "default_segmentation_id": 77, - "l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1" - } - return fake_l2_gateway_connection - - def _assert_op(self, operation, object_type, data, precommit=True): - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - if precommit: - self.context.flush() - self.assertEqual(operation, row['operation']) - self.assertEqual(object_type, row['object_type']) - self.assertEqual(data['id'], row['object_uuid']) - else: - self.assertIsNone(row) - - def test_create_l2_gateway(self): - fake_data = self._get_fake_l2_gateway() - self.driver.create_l2_gateway_precommit(self.context, fake_data) - self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY, - fake_data) - self.driver.create_l2_gateway_postcommit(self.context, fake_data) - self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY, - fake_data, False) - - def test_delete_l2_gateway(self): - fake_data = self._get_fake_l2_gateway() - self.driver.delete_l2_gateway_precommit(self.context, fake_data['id']) - self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY, - fake_data) - self.driver.delete_l2_gateway_postcommit(self.context, fake_data['id']) - self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY, - fake_data, False) - - def test_update_l2_gateway(self): - fake_data = self._get_fake_l2_gateway() - self.driver.update_l2_gateway_precommit(self.context, fake_data) - self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY, - fake_data) - self.driver.update_l2_gateway_postcommit(self.context, fake_data) - self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY, - fake_data, False) - - def test_create_l2_gateway_connection(self): - fake_data = self._get_fake_l2_gateway_connection() - self.driver.create_l2_gateway_connection_precommit(self.context, - fake_data) - self._assert_op(odl_const.ODL_CREATE, - odl_const.ODL_L2GATEWAY_CONNECTION, - fake_data) - self.driver.create_l2_gateway_connection_postcommit(self.context, - fake_data) - self._assert_op(odl_const.ODL_CREATE, - odl_const.ODL_L2GATEWAY_CONNECTION, - fake_data, False) - - def test_delete_l2_gateway_connection(self): - fake_data = self._get_fake_l2_gateway_connection() - self.driver.delete_l2_gateway_connection_precommit(self.context, - fake_data['id']) - self._assert_op(odl_const.ODL_DELETE, - odl_const.ODL_L2GATEWAY_CONNECTION, - fake_data) - self.driver.delete_l2_gateway_connection_postcommit(self.context, - fake_data['id']) - self._assert_op(odl_const.ODL_DELETE, - odl_const.ODL_L2GATEWAY_CONNECTION, - fake_data, False) diff --git a/networking_odl/tests/unit/l3/__init__.py b/networking_odl/tests/unit/l3/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/l3/test_l3_odl.py b/networking_odl/tests/unit/l3/test_l3_odl.py deleted file mode 100644 index 27972c7e0..000000000 --- a/networking_odl/tests/unit/l3/test_l3_odl.py +++ /dev/null @@ -1,316 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_l3_odl ----------------------------------- - -Tests for the L3 service plugin for networking-odl. -""" -import copy - -import mock - -from neutron.extensions import l3 -from neutron.extensions import l3_ext_gw_mode -from neutron.tests.unit.api.v2 import test_base -from neutron.tests.unit.extensions import base as test_extensions_base -from neutron_lib.plugins import constants -from webob import exc - -from networking_odl.tests import base as odl_base - - -_get_path = test_base._get_path - - -class Testodll3(test_extensions_base.ExtensionTestCase): - - fmt = 'json' - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(Testodll3, self).setUp() - # support ext-gw-mode - for key in l3.RESOURCE_ATTRIBUTE_MAP.keys(): - l3.RESOURCE_ATTRIBUTE_MAP[key].update( - l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {})) - self._setUpExtension( - 'networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin', - constants.L3, l3.RESOURCE_ATTRIBUTE_MAP, - l3.L3, '', allow_pagination=True, allow_sorting=True, - supported_extension_aliases=['router', 'ext-gw-mode'], - use_quota=True) - - @staticmethod - def _get_mock_network_operation_context(): - current = {'status': 'ACTIVE', - 'subnets': [], - 'name': 'net1', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': 'test-tenant', - 'provider:network_type': 'local', - 'router:external': False, - 'shared': False, - 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'provider:segmentation_id': None} - context = mock.Mock(current=current) - return context - - @staticmethod - def _get_router_test(): - router_id = "234237d4-1e7f-11e5-9bd7-080027328c3a" - router = {'router': {'name': 'router1', 'admin_state_up': True, - 'tenant_id': router_id, - 'project_id': router_id, - 'external_gateway_info': None}} - return router_id, router - - @staticmethod - def _get_floating_ip_test(): - floating_ip_id = "e4997650-6a83-4230-950a-8adab8e524b2" - floating_ip = { - "floatingip": {"fixed_ip_address": None, - "floating_ip_address": None, - "floating_network_id": None, - "id": floating_ip_id, - "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72", - "port_id": None, - "status": None, - "tenant_id": "test-tenant" - } - } - return floating_ip_id, floating_ip - - @staticmethod - def _get_port_test(): - port_id = "3a44f4e5-1694-493a-a1fb-393881c673a4" - subnet_id = "a2f1f29d-571b-4533-907f-5803ab96ead1" - port = {'id': port_id, - 'network_id': "84b126bb-f45e-4b2e-8202-7e5ce9e21fe7", - 'fixed_ips': [{'ip_address': '19.4.4.4', - 'prefixlen': 24, - 'subnet_id': subnet_id}], - 'subnets': [{'id': subnet_id, - 'cidr': '19.4.4.0/24', - 'gateway_ip': '19.4.4.1'}]} - return port_id, port - - def test_create_router(self): - router_id, router = self._get_router_test() - - return_value = copy.deepcopy(router['router']) - return_value.update({'status': "ACTIVE", 'id': router_id}) - - instance = self.plugin.return_value - instance.create_router.return_value = return_value - instance.get_routers_count.return_value = 0 - - res = self.api.post(_get_path('routers', fmt=self.fmt), - self.serialize(router), - content_type='application/%s' % self.fmt) - - instance.create_router.assert_called_once_with(mock.ANY, router=router) - self.assertEqual(exc.HTTPCreated.code, res.status_int) - res = self.deserialize(res) - self.assertIn('router', res) - router = res['router'] - self.assertEqual(router_id, router['id']) - self.assertEqual("ACTIVE", router['status']) - self.assertEqual(True, router['admin_state_up']) - - def test_update_router(self): - router_id, router = self._get_router_test() - - router_request_info = {'external_gateway_info': { - "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8", - "enable_snat": True} - } - return_value = copy.deepcopy(router['router']) - return_value.update(router_request_info) - return_value.update({'status': "ACTIVE", 'id': router_id}) - - instance = self.plugin.return_value - instance.update_router.return_value = return_value - - router_request = {'router': router_request_info} - res = self.api.put(_get_path('routers', id=router_id, fmt=self.fmt), - self.serialize(router_request)) - instance.update_router.assert_called_once_with(mock.ANY, router_id, - router=router_request) - - self.assertEqual(exc.HTTPOk.code, res.status_int) - res = self.deserialize(res) - self.assertIn('router', res) - router = res['router'] - self.assertEqual(router_id, router['id']) - self.assertEqual("3c5bcddd-6af9-4e6b-9c3e-c153e521cab8", - router["external_gateway_info"]['network_id']) - self.assertEqual(True, router["external_gateway_info"]['enable_snat']) - - def test_delete_router(self): - router_id, router = self._get_router_test() - - instance = self.plugin.return_value - - res = self.api.delete(_get_path('routers', id=router_id, fmt=self.fmt)) - instance.delete_router.assert_called_once_with(mock.ANY, router_id) - - self.assertEqual(exc.HTTPNoContent.code, res.status_int) - - def test_create_floating_ip(self): - floating_ip_id, floating_ip = self._get_floating_ip_test() - port_id, port = self._get_port_test() - - floating_ip_request_info = {"floating_network_id": - "376da547-b977-4cfe-9cba-275c80debf57", - "tenant_id": "test-tenant", - "project_id": "test-tenant", - "fixed_ip_address": "10.0.0.3", - "subnet_id": port['subnets'][0]['id'], - "port_id": port_id, - "floating_ip_address": "172.24.4.228" - } - - return_value = copy.deepcopy(floating_ip['floatingip']) - return_value.update(floating_ip_request_info) - return_value.update({'status': "ACTIVE"}) - - instance = self.plugin.return_value - instance.create_floatingip.return_value = return_value - instance.get_floatingips_count.return_value = 0 - instance.get_port = mock.Mock(return_value=port) - - floating_ip_request = {'floatingip': floating_ip_request_info} - - res = self.api.post(_get_path('floatingips', fmt=self.fmt), - self.serialize(floating_ip_request)) - - instance.create_floatingip.\ - assert_called_once_with(mock.ANY, - floatingip=floating_ip_request) - - self.assertEqual(exc.HTTPCreated.code, res.status_int) - res = self.deserialize(res) - self.assertIn('floatingip', res) - floatingip = res['floatingip'] - self.assertEqual(floating_ip_id, floatingip['id']) - self.assertEqual("ACTIVE", floatingip['status']) - - def test_update_floating_ip(self): - floating_ip_id, floating_ip = self._get_floating_ip_test() - - floating_ip_request_info = {"port_id": None} - - return_value = copy.deepcopy(floating_ip['floatingip']) - return_value.update(floating_ip_request_info) - return_value.update({"status": "ACTIVE", - "tenant_id": "test-tenant", - "floating_network_id": - "376da547-b977-4cfe-9cba-275c80debf57", - "fixed_ip_address": None, - "floating_ip_address": "172.24.4.228" - }) - - instance = self.plugin.return_value - instance.get_floatingip = mock.Mock(return_value=floating_ip) - instance.update_floatingip.return_value = return_value - port_id, port = self._get_port_test() - instance.get_port = mock.Mock(return_value=port) - - floating_ip_request = {'floatingip': floating_ip_request_info} - - res = self.api.put(_get_path('floatingips', id=floating_ip_id, - fmt=self.fmt), - self.serialize(floating_ip_request)) - - instance.update_floatingip.\ - assert_called_once_with(mock.ANY, - floating_ip_id, - floatingip=floating_ip_request) - - self.assertEqual(exc.HTTPOk.code, res.status_int) - res = self.deserialize(res) - self.assertIn('floatingip', res) - floatingip = res['floatingip'] - self.assertEqual(floating_ip_id, floatingip['id']) - self.assertIsNone(floatingip['port_id']) - self.assertIsNone(floatingip['fixed_ip_address']) - - def test_delete_floating_ip(self): - floating_ip_id, floating_ip = self._get_floating_ip_test() - - instance = self.plugin.return_value - instance.get_floatingip = mock.Mock(return_value=floating_ip) - res = self.api.delete(_get_path('floatingips', id=floating_ip_id)) - instance.delete_floatingip.assert_called_once_with(mock.ANY, - floating_ip_id) - - self.assertEqual(exc.HTTPNoContent.code, res.status_int) - - def test_add_router_interface(self): - router_id, router = self._get_router_test() - interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"} - return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e", - "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4", - "id": router_id - } - return_value.update(interface_info) - - instance = self.plugin.return_value - instance.add_router_interface.return_value = return_value - - res = self.api.put(_get_path('routers', id=router_id, - action="add_router_interface", - fmt=self.fmt), - self.serialize(interface_info) - ) - - instance.add_router_interface.assert_called_once_with(mock.ANY, - router_id, - interface_info) - - self.assertEqual(exc.HTTPOk.code, res.status_int) - res = self.deserialize(res) - self.assertEqual(router_id, res['id']) - self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1", - res['subnet_id']) - - def test_remove_router_interface(self): - router_id, router = self._get_router_test() - interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1", - "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4" - } - return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e", - "id": router_id - } - return_value.update(interface_info) - - instance = self.plugin.return_value - instance.remove_router_interface.return_value = return_value - res = self.api.put(_get_path('routers', id=router_id, - action="remove_router_interface", - fmt=self.fmt), - self.serialize(interface_info) - ) - - instance.remove_router_interface.\ - assert_called_once_with(mock.ANY, - router_id, - interface_info) - - self.assertEqual(exc.HTTPOk.code, res.status_int) - res = self.deserialize(res) - self.assertEqual(router_id, res['id']) - self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1", - res['subnet_id']) diff --git a/networking_odl/tests/unit/l3/test_l3_odl_v2.py b/networking_odl/tests/unit/l3/test_l3_odl_v2.py deleted file mode 100644 index b4d5b2a78..000000000 --- a/networking_odl/tests/unit/l3/test_l3_odl_v2.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import requests - -from neutron.db import api as neutron_db_api -from neutron.extensions import external_net as external_net -from neutron.plugins.ml2 import plugin -from neutron.tests import base -from neutron.tests.unit.db import test_db_base_plugin_v2 -from neutron.tests.unit import testlib_api -from neutron_lib import context -from neutron_lib.plugins import constants -from neutron_lib.plugins import directory -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.common import filters -from networking_odl.db import db -from networking_odl.journal import journal -from networking_odl.ml2 import mech_driver_v2 -from networking_odl.tests import base as odl_base -from networking_odl.tests.unit import test_base_db - -EMPTY_DEP = {'gw_port_id': None} -FLOATINGIP_ID = uuidutils.generate_uuid() -NETWORK_ID = uuidutils.generate_uuid() -ROUTER_ID = uuidutils.generate_uuid() -SUBNET_ID = uuidutils.generate_uuid() -PORT_ID = uuidutils.generate_uuid() - - -class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - super(OpenDayLightMechanismConfigTests, self).setUp() - cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight_v2'], 'ml2') - cfg.CONF.set_override('port_binding_controller', - 'legacy-port-binding', 'ml2_odl') - - def _set_config(self, url='http://127.0.0.1:9999', username='someuser', - password='somepass'): - cfg.CONF.set_override('url', url, 'ml2_odl') - cfg.CONF.set_override('username', username, 'ml2_odl') - cfg.CONF.set_override('password', password, 'ml2_odl') - - def _test_missing_config(self, **kwargs): - self._set_config(**kwargs) - self.assertRaisesRegex(cfg.RequiredOptError, - 'value required for option \w+ in group ' - '\[ml2_odl\]', - plugin.Ml2Plugin) - - def test_valid_config(self): - self._set_config() - plugin.Ml2Plugin() - - def test_missing_url_raises_exception(self): - self._test_missing_config(url=None) - - def test_missing_username_raises_exception(self): - self._test_missing_config(username=None) - - def test_missing_password_raises_exception(self): - self._test_missing_config(password=None) - - -class DataMatcher(object): - - def __init__(self, operation, object_type, object_dict): - self._data = object_dict.copy() - self._object_type = object_type - filters.filter_for_odl(object_type, operation, self._data) - - def __eq__(self, s): - data = jsonutils.loads(s) - return self._data == data[self._object_type] - - def __ne__(self, s): - return not self.__eq__(s) - - -class OpenDaylightL3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, - test_base_db.ODLBaseDbTestCase, - base.BaseTestCase): - def setUp(self): - cfg.CONF.set_override("core_plugin", - 'neutron.plugins.ml2.plugin.Ml2Plugin') - cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight_v2'], 'ml2') - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - cfg.CONF.set_override("service_plugins", ['odl-router_v2']) - core_plugin = cfg.CONF.core_plugin - service_plugins = {'l3_plugin_name': 'odl-router_v2'} - self.useFixture(odl_base.OpenDaylightJournalThreadFixture()) - mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, - '_record_in_journal').start() - mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, - 'sync_from_callback_precommit').start() - mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, - 'sync_from_callback_postcommit').start() - self.useFixture(odl_base.OpenDaylightPeriodicTaskFixture()) - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - super(OpenDaylightL3TestCase, self).setUp( - plugin=core_plugin, service_plugins=service_plugins) - self.db_session = neutron_db_api.get_writer_session() - self.plugin = directory.get_plugin() - self.plugin._network_is_external = mock.Mock(return_value=True) - self.driver = directory.get_plugin(constants.L3) - self.thread = journal.OpenDaylightJournalThread() - self.driver.get_floatingip = mock.Mock( - return_value={'router_id': ROUTER_ID, - 'floating_network_id': NETWORK_ID}) - - @staticmethod - def _get_mock_router_operation_info(network, subnet): - router_context = context.get_admin_context() - router = {odl_const.ODL_ROUTER: - {'name': 'router1', - 'admin_state_up': True, - 'tenant_id': network['network']['tenant_id'], - 'external_gateway_info': {'network_id': - network['network']['id']}}} - return router_context, router - - @staticmethod - def _get_mock_floatingip_operation_info(network, subnet): - floatingip_context = context.get_admin_context() - floatingip = {odl_const.ODL_FLOATINGIP: - {'floating_network_id': network['network']['id'], - 'tenant_id': network['network']['tenant_id']}} - return floatingip_context, floatingip - - @staticmethod - def _get_mock_router_interface_operation_info(network, subnet): - router_intf_context = context.get_admin_context() - router_intf_dict = {'subnet_id': subnet['subnet']['id'], - 'id': network['network']['id']} - return router_intf_context, router_intf_dict - - @classmethod - def _get_mock_operation_info(cls, object_type, *args): - getter = getattr(cls, '_get_mock_' + object_type + '_operation_info') - return getter(*args) - - @classmethod - def _get_mock_request_response(cls, status_code): - response = mock.Mock(status_code=status_code) - response.raise_for_status = mock.Mock() if status_code < 400 else ( - mock.Mock(side_effect=requests.exceptions.HTTPError( - cls._status_code_msgs[status_code]))) - return response - - def _test_operation(self, status_code, expected_calls, *args, **kwargs): - request_response = self._get_mock_request_response(status_code) - with mock.patch('requests.sessions.Session.request', - return_value=request_response) as mock_method: - self.thread.sync_pending_entries() - - if expected_calls: - mock_method.assert_called_with( - headers={'Content-Type': 'application/json'}, - timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs) - self.assertEqual(expected_calls, mock_method.call_count) - - def _call_operation_object(self, operation, object_type, object_id, - network, subnet): - object_context, object_dict = self._get_mock_operation_info( - object_type, network, subnet) - method = getattr(self.driver, operation + '_' + object_type) - - if operation == odl_const.ODL_CREATE: - new_object_dict = method(object_context, object_dict) - elif operation == odl_const.ODL_UPDATE: - new_object_dict = method(object_context, object_id, object_dict) - else: - new_object_dict = method(object_context, object_id) - - return object_context, new_object_dict - - def _test_operation_thread_processing(self, object_type, operation, - network, subnet, object_id, - expected_calls=1): - http_requests = {odl_const.ODL_CREATE: 'post', - odl_const.ODL_UPDATE: 'put', - odl_const.ODL_DELETE: 'delete'} - status_codes = {odl_const.ODL_CREATE: requests.codes.created, - odl_const.ODL_UPDATE: requests.codes.ok, - odl_const.ODL_DELETE: requests.codes.no_content} - - http_request = http_requests[operation] - status_code = status_codes[operation] - - # Create database entry. - object_context, new_object_dict = self._call_operation_object( - operation, object_type, object_id, network, subnet) - - # Setup expected results. - if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: - url = (cfg.CONF.ml2_odl.url + '/' + object_type + 's/' + - object_id) - else: - url = cfg.CONF.ml2_odl.url + '/' + object_type + 's' - - if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]: - kwargs = { - 'url': url, - 'data': DataMatcher(operation, object_type, new_object_dict)} - else: - kwargs = {'url': url, 'data': None} - - # Call threading routine to process database entry. Test results. - self._test_operation(status_code, expected_calls, http_request, - **kwargs) - - return new_object_dict - - def _test_thread_processing(self, object_type): - # Create network and subnet. - kwargs = {'arg_list': (external_net.EXTERNAL,), - external_net.EXTERNAL: True} - with self.network(**kwargs) as network: - with self.subnet(network=network, cidr='10.0.0.0/24'): - # Add and process create request. - new_object_dict = self._test_operation_thread_processing( - object_type, odl_const.ODL_CREATE, network, None, None) - object_id = new_object_dict['id'] - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(1, len(rows)) - - # Add and process 'update' request. Adds to database. - self._test_operation_thread_processing( - object_type, odl_const.ODL_UPDATE, network, None, - object_id) - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(2, len(rows)) - - # Add and process 'delete' request. Adds to database. - self._test_operation_thread_processing( - object_type, odl_const.ODL_DELETE, network, None, - object_id) - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(3, len(rows)) - - def _test_db_results(self, object_id, operation, object_type): - rows = db.get_all_db_rows(self.db_session) - - self.assertEqual(1, len(rows)) - self.assertEqual(operation, rows[0]['operation']) - self.assertEqual(object_type, rows[0]['object_type']) - self.assertEqual(object_id, rows[0]['object_uuid']) - - self._db_cleanup() - - def _test_object_db(self, object_type): - # Create network and subnet for testing. - kwargs = {'arg_list': (external_net.EXTERNAL,), - external_net.EXTERNAL: True} - with self.network(**kwargs) as network: - with self.subnet(network=network): - object_context, object_dict = self._get_mock_operation_info( - object_type, network, None) - - # Add and test 'create' database entry. - method = getattr(self.driver, - odl_const.ODL_CREATE + '_' + object_type) - new_object_dict = method(object_context, object_dict) - object_id = new_object_dict['id'] - self._test_db_results(object_id, odl_const.ODL_CREATE, - object_type) - - # Add and test 'update' database entry. - method = getattr(self.driver, - odl_const.ODL_UPDATE + '_' + object_type) - method(object_context, object_id, object_dict) - self._test_db_results(object_id, odl_const.ODL_UPDATE, - object_type) - - # Add and test 'delete' database entry. - method = getattr(self.driver, - odl_const.ODL_DELETE + '_' + object_type) - method(object_context, object_id) - self._test_db_results(object_id, odl_const.ODL_DELETE, - object_type) - - def _test_dependency_processing( - self, test_operation, test_object, test_id, test_data, - dep_operation, dep_object, dep_id, dep_data): - - # Mock sendjson to verify that it never gets called. - mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, - 'sendjson').start() - - # Create dependency db row and mark as 'processing' so it won't - # be processed by the journal thread. - ctxt = mock.MagicMock() - ctxt.session = self.db_session - journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) - row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) - db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) - - # Create test row with dependent ID. - journal.record(ctxt, test_object, test_id, test_operation, test_data) - - # Call journal thread. - self.thread.sync_pending_entries() - - # Verify that dependency row is still set at 'processing'. - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.PROCESSING) - self.assertEqual(1, len(rows)) - - # Verify that the test row was processed and set back to 'pending' - # to be processed again. - rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) - self.assertEqual(1, len(rows)) - - # Verify that _json_data was not called. - self.assertFalse(mock_sendjson.call_count) - - def test_router_db(self): - self._test_object_db(odl_const.ODL_ROUTER) - - def test_floatingip_db(self): - self._test_object_db(odl_const.ODL_FLOATINGIP) - - def test_router_threading(self): - self._test_thread_processing(odl_const.ODL_ROUTER) - - def test_floatingip_threading(self): - self._test_thread_processing(odl_const.ODL_FLOATINGIP) - - def test_delete_network_validate_ext_delete_router_dep(self): - router_context = [NETWORK_ID] - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_NETWORK, NETWORK_ID, None, - odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, - router_context) - - def test_create_router_validate_ext_create_port_dep(self): - router_context = {'gw_port_id': PORT_ID} - self._test_dependency_processing( - odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, - router_context, - odl_const.ODL_CREATE, odl_const.ODL_PORT, PORT_ID, - {'fixed_ips': [], 'network_id': None, odl_const.ODL_SGS: None, - 'tenant_id': 'tenant'}) - - def test_delete_router_validate_ext_delete_floatingip_dep(self): - floatingip_context = [ROUTER_ID] - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None, - odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - floatingip_context) - - def test_delete_router_validate_self_create_dep(self): - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, - odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) - - def test_delete_router_validate_self_update_dep(self): - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, - odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) - - def test_update_router_validate_self_create_dep(self): - self._test_dependency_processing( - odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, - odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) - - def test_create_floatingip_validate_ext_create_network_dep(self): - floatingip_context = {'floating_network_id': NETWORK_ID} - self._test_dependency_processing( - odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - floatingip_context, - odl_const.ODL_CREATE, odl_const.ODL_NETWORK, NETWORK_ID, {}) - - def test_update_floatingip_validate_self_create_dep(self): - floatingip_context = {'floating_network_id': NETWORK_ID} - self._test_dependency_processing( - odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - floatingip_context, - odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - EMPTY_DEP) - - def test_delete_floatingip_validate_self_create_dep(self): - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - EMPTY_DEP, - odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - {}) - - def test_delete_floatingip_validate_self_update_dep(self): - self._test_dependency_processing( - odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - EMPTY_DEP, - odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, - {}) diff --git a/networking_odl/tests/unit/lbaas/__init__.py b/networking_odl/tests/unit/lbaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v1.py b/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v1.py deleted file mode 100644 index 480c18650..000000000 --- a/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v1.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_lbaas_odl ----------------------------------- - -Tests for the LBaaS plugin for networking-odl. -""" - -import mock - -from networking_odl.lbaas import driver_v2 as lbaas_odl -from networking_odl.tests import base as odl_base - -from neutron.tests import base - - -class TestODL_LBaaS(base.BaseTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestODL_LBaaS, self).setUp() - - def test_init(self): - # just create an instance of OpenDaylightLbaasDriverV2 - self.plugin = mock.Mock() - lbaas_odl.OpenDaylightLbaasDriverV2(self.plugin) diff --git a/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v2.py b/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v2.py deleted file mode 100644 index 1d6ba3063..000000000 --- a/networking_odl/tests/unit/lbaas/test_lbaasv2_odl_v2.py +++ /dev/null @@ -1,177 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -import networking_odl -from neutron.db import api as neutron_db_api -import neutron_lbaas -from neutron_lbaas.services.loadbalancer import data_models - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.journal import journal -from networking_odl.lbaas import lbaasv2_driver_v2 as lb_driver -from networking_odl.tests.unit import base_v2 - - -class OpenDaylightLBaaSBaseTestCase(base_v2.OpenDaylightConfigBase): - session = None - - @classmethod - def _get_mock_context(cls, session=None): - current = {'tenant_id': 'tenant_id'} - context = mock.Mock(current=current) - if not session: - if not cls.session: - cls.session = neutron_db_api.get_writer_session() - session = cls.session - - context.session = session - return context - - @staticmethod - def _get_faked_model(obj): - lb = data_models.LoadBalancer(id='test_lb') - if obj == 'lbaas/loadbalancer': - return lb - - pool = data_models.Pool(id='test_pool_id', - loadbalancer=lb) - if obj == 'lbaas/pool': - return pool - - listener = data_models.Listener(id='test_listener_id', - loadbalancer=lb) - if obj == 'lbaas/listener': - return listener - - member = data_models.Member(id='test_member_id', - pool=pool) - if obj == 'lbaas/member': - return member - - hm = data_models.HealthMonitor(id='test_health_monitor_id', - pool=pool) - - return hm - - @mock.patch.object( - networking_odl.journal.journal.OpenDaylightJournalThread, - 'set_sync_event') - @mock.patch.object(neutron_lbaas.drivers.driver_mixins.BaseManagerMixin, - 'successful_completion') - def base_test_operation(self, obj_driver, obj_type, operation, op_const, - mock_set_sync_event, mock_successful_completion): - context = self._get_mock_context() - obj = self._get_faked_model(obj_type) - getattr(obj_driver, operation)(context, obj) - row = db.get_oldest_pending_db_row_with_lock(context.session) - self.assertEqual(operation, row['operation']) - if obj_type != odl_const.ODL_MEMBER: - self.assertEqual(("lbaas/%s" % obj_type), row['object_type']) - else: - self.assertEqual(journal.MAKE_URL[obj_type](row), - ("lbaas/pools/%s/member" % obj.pool.id)) - - -class OpenDaylightLBaaSDriverTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, obj_type, operation, op_const): - driver = mock.Mock() - obj_driver = lb_driver.OpenDaylightManager(driver, obj_type) - self.base_test_operation(self, obj_driver, obj_type, - operation, op_const) - - -class ODLLoadBalancerManagerTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, operation, op_const): - driver = mock.Mock() - obj_type = odl_const.ODL_LOADBALANCER - obj_driver = lb_driver.ODLLoadBalancerManager(driver) - self.base_test_operation(obj_driver, obj_type, operation, op_const) - - def test_create_load_balancer(self): - self._test_operation('create', odl_const.ODL_CREATE) - - def test_update_load_balancer(self): - self._test_operation('update', odl_const.ODL_UPDATE) - - def test_delete_load_balancer(self): - self._test_operation('delete', odl_const.ODL_DELETE) - - -class ODLListenerManagerTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, operation, op_const): - driver = mock.Mock() - obj_type = odl_const.ODL_LISTENER - obj_driver = lb_driver.ODLListenerManager(driver) - self.base_test_operation(obj_driver, obj_type, operation, op_const) - - def test_create_listener(self): - self._test_operation('create', odl_const.ODL_CREATE) - - def test_update_listener(self): - self._test_operation('update', odl_const.ODL_UPDATE) - - def test_delete_listener(self): - self._test_operation('delete', odl_const.ODL_DELETE) - - -class ODLPoolManagerTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, operation, op_const): - obj_type = odl_const.ODL_POOL - obj = mock.MagicMock() - obj_driver = lb_driver.ODLPoolManager(obj) - self.base_test_operation(obj_driver, obj_type, operation, op_const) - - def test_create_pool(self): - self._test_operation('create', odl_const.ODL_CREATE) - - def test_update_pool(self): - self._test_operation('update', odl_const.ODL_UPDATE) - - def test_delete_pool(self): - self._test_operation('delete', odl_const.ODL_DELETE) - - -class ODLMemberManagerTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, operation, op_const): - driver = mock.Mock() - obj_type = odl_const.ODL_MEMBER - obj_driver = lb_driver.ODLMemberManager(driver) - self.base_test_operation(obj_driver, obj_type, operation, op_const) - - def test_create_member(self): - self._test_operation('create', odl_const.ODL_CREATE) - - def test_update_member(self): - self._test_operation('update', odl_const.ODL_UPDATE) - - def test_delete_member(self): - self._test_operation('delete', odl_const.ODL_DELETE) - - -class ODLHealthMonitorManagerTestCase(OpenDaylightLBaaSBaseTestCase): - def _test_operation(self, operation, op_const): - driver = mock.Mock() - obj_type = odl_const.ODL_HEALTHMONITOR - obj_driver = lb_driver.ODLHealthMonitorManager(driver) - self.base_test_operation(obj_driver, obj_type, operation, op_const) - - def test_create_health_monitor(self): - self._test_operation('create', odl_const.ODL_CREATE) - - def test_update_health_monitor(self): - self._test_operation('update', odl_const.ODL_UPDATE) - - def test_delete_health_monitor(self): - self._test_operation('delete', odl_const.ODL_DELETE) diff --git a/networking_odl/tests/unit/ml2/__init__.py b/networking_odl/tests/unit/ml2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh b/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh deleted file mode 100755 index 15f9b9334..000000000 --- a/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -uuid=$(sudo ovs-vsctl get Open_vSwitch . _uuid) - -# Test data -sudo ovs-vsctl set Open_vSwitch $uuid \ - external_ids:odl_os_hostconfig_hostid="devstack" - -# sudo ovs-vsctl set Open_vSwitch $uuid \ -# external_ids:odl_os_hostconfig_hosttype="ODL L2" - -config=$(cat <<____CONFIG -{"supported_vnic_types":[ - {"vnic_type":"normal","vif_type":"ovs","vif_details":{}}], - "allowed_network_types":["local","vlan","vxlan","gre"], - "bridge_mappings":{"physnet1":"br-ex"}} -____CONFIG -) - -echo config: $config - -sudo ovs-vsctl set Open_vSwitch $uuid \ - external_ids:odl_os_hostconfig_config_odl_l2="$config" diff --git a/networking_odl/tests/unit/ml2/odl_teststub.js b/networking_odl/tests/unit/ml2/odl_teststub.js deleted file mode 100644 index 1ee02d522..000000000 --- a/networking_odl/tests/unit/ml2/odl_teststub.js +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2016 OpenStack Foundation - * All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain - * a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - * $nodejs odl_teststub.js - * - * local.conf or ml2_conf.ini should be set to the following: - * - * [ml2_odl] - * port_binding_controller = pseudo-agentdb-binding - * password = admin - * username = admin - * url = http://localhost:8080/controller/nb/v2/neutron - * restconf_uri = http://localhost:8125/ # for this stub - * - * To test with ODL *end to end* use below URL for restconf_uri and configure - * ovsdb external_ids using the test script: config-ovs-external_ids.sh - * - * http://localhost:8181/restconf/operational/neutron:neutron/hostconfigs - */ - -var http = require('http'); - -const PORT=8125; - -__test_odl_hconfig = {"hostconfigs": {"hostconfig": [ - {"host-id": "devstack", - "host-type": "ODL L2", - "config": { - "supported_vnic_types": [ - {"vnic_type": "normal", - "vif_type": "ovs", - "vif_details": {}}], - "allowed_network_types": ["local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1":"br-ex"} - } - }] - }} - - -function handleRequest(req, res){ - res.setHeader('Content-Type', 'application/json'); - res.end(JSON.stringify(__test_odl_hconfig)); -} - -var server = http.createServer(handleRequest); - -server.listen(PORT, function(){ - console.log("Server listening on: http://localhost:%s", PORT); - }); diff --git a/networking_odl/tests/unit/ml2/test_driver.py b/networking_odl/tests/unit/ml2/test_driver.py deleted file mode 100644 index 75059ae10..000000000 --- a/networking_odl/tests/unit/ml2/test_driver.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2013-2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron_lib import context - -from networking_odl.common import constants as const -from networking_odl.ml2 import mech_driver as driver - - -class TestODLShim(test_plugin.Ml2PluginV2TestCase): - - def setUp(self): - super(TestODLShim, self).setUp() - self.context = context.get_admin_context() - self.plugin = mock.Mock() - self.driver = driver.OpenDaylightMechanismDriver() - self.driver.odl_drv = mock.Mock() - - def test_create_network_postcommit(self): - self.driver.create_network_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE, - const.ODL_NETWORKS, - self.context) - - def test_update_network_postcommit(self): - self.driver.update_network_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE, - const.ODL_NETWORKS, - self.context) - - def test_delete_network_postcommit(self): - self.driver.delete_network_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE, - const.ODL_NETWORKS, - self.context) - - def test_create_subnet_postcommit(self): - self.driver.create_subnet_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE, - const.ODL_SUBNETS, - self.context) - - def test_update_subnet_postcommit(self): - self.driver.update_subnet_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE, - const.ODL_SUBNETS, - self.context) - - def test_delete_subnet_postcommit(self): - self.driver.delete_subnet_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE, - const.ODL_SUBNETS, - self.context) - - def test_create_port_postcommit(self): - self.driver.create_port_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE, - const.ODL_PORTS, - self.context) - - def test_update_port_postcommit(self): - self.driver.update_port_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE, - const.ODL_PORTS, - self.context) - - def test_delete_port_postcommit(self): - self.driver.delete_port_postcommit(self.context) - self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE, - const.ODL_PORTS, - self.context) - - def test_bind_port_delegation(self): - # given front-end with attached back-end - front_end = self.driver - front_end.odl_drv = back_end = mock.MagicMock( - spec=driver.OpenDaylightDriver) - # given PortContext to be forwarded to back-end without using - context = object() - - # when binding port - front_end.bind_port(context) - - # then port is bound by back-end - back_end.bind_port.assert_called_once_with(context) diff --git a/networking_odl/tests/unit/ml2/test_legacy_port_binding.py b/networking_odl/tests/unit/ml2/test_legacy_port_binding.py deleted file mode 100644 index d31e23e7a..000000000 --- a/networking_odl/tests/unit/ml2/test_legacy_port_binding.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from neutron.plugins.ml2 import driver_api -from neutron.plugins.ml2 import driver_context as ctx -from neutron_lib.api.definitions import portbindings -from neutron_lib import constants as n_constants -from neutron_lib.plugins.ml2 import api - -from networking_odl.ml2 import legacy_port_binding -from networking_odl.tests import base - - -class TestLegacyPortBindingManager(base.DietTestCase): - # valid and invalid segments - valid_segment = { - api.ID: 'API_ID', - api.NETWORK_TYPE: n_constants.TYPE_LOCAL, - api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - invalid_segment = { - api.ID: 'API_ID', - api.NETWORK_TYPE: n_constants.TYPE_NONE, - api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - def test_check_segment(self): - """Validate the _check_segment method.""" - - all_network_types = [n_constants.TYPE_FLAT, n_constants.TYPE_GRE, - n_constants.TYPE_LOCAL, n_constants.TYPE_VXLAN, - n_constants.TYPE_VLAN, n_constants.TYPE_NONE] - - mgr = legacy_port_binding.LegacyPortBindingManager() - - valid_types = { - network_type - for network_type in all_network_types - if mgr._check_segment({api.NETWORK_TYPE: network_type})} - - self.assertEqual({ - n_constants.TYPE_FLAT, n_constants.TYPE_LOCAL, - n_constants.TYPE_GRE, n_constants.TYPE_VXLAN, - n_constants.TYPE_VLAN}, valid_types) - - def test_bind_port(self): - - network = mock.MagicMock(spec=driver_api.NetworkContext) - - port_context = mock.MagicMock( - spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'}, - segments_to_bind=[self.valid_segment, self.invalid_segment], - network=network) - - mgr = legacy_port_binding.LegacyPortBindingManager() - vif_type = mgr._get_vif_type(port_context) - - mgr.bind_port(port_context) - - port_context.set_binding.assert_called_once_with( - self.valid_segment[api.ID], vif_type, - mgr.vif_details, status=n_constants.PORT_STATUS_ACTIVE) - - def test_bind_port_unsupported_vnic_type(self): - network = mock.MagicMock(spec=driver_api.NetworkContext) - port_context = mock.MagicMock( - spec=ctx.PortContext, - current={'id': 'CURRENT_CONTEXT_ID', - portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}, - segments_to_bind=[self.valid_segment, self.invalid_segment], - network=network) - - mgr = legacy_port_binding.LegacyPortBindingManager() - mgr.bind_port(port_context) - port_context.set_binding.assert_not_called() diff --git a/networking_odl/tests/unit/ml2/test_mechanism_odl.py b/networking_odl/tests/unit/ml2/test_mechanism_odl.py deleted file mode 100644 index 28aad27a5..000000000 --- a/networking_odl/tests/unit/ml2/test_mechanism_odl.py +++ /dev/null @@ -1,659 +0,0 @@ -# Copyright (c) 2013-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import mock -import testscenarios - -from oslo_config import cfg -from oslo_serialization import jsonutils -import requests -import webob.exc - -from neutron.db import segments_db -from neutron.extensions import multiprovidernet as mpnet -from neutron.plugins.ml2 import config -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2 import driver_context as driver_context -from neutron.plugins.ml2 import models -from neutron.plugins.ml2 import plugin -from neutron.tests import base -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron.tests.unit import testlib_api -from neutron_lib.api.definitions import portbindings -from neutron_lib.api.definitions import provider_net as providernet -from neutron_lib import constants as n_constants -from neutron_lib.plugins.ml2 import api as ml2_api - -from networking_odl.common import client -from networking_odl.common import constants as odl_const -from networking_odl.ml2 import legacy_port_binding -from networking_odl.ml2 import mech_driver -from networking_odl.tests import base as odl_base - - -# Required to generate tests from scenarios. Not compatible with nose. -load_tests = testscenarios.load_tests_apply_scenarios - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - - -HOST = 'fake-host' -PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' -FAKE_NETWORK = {'status': 'ACTIVE', - 'subnets': [], - 'name': 'net1', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': 'test-tenant', - 'provider:network_type': 'local', - 'router:external': False, - 'shared': False, - 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'provider:segmentation_id': None} - -FAKE_SUBNET = {'ipv6_ra_mode': None, - 'allocation_pools': [{'start': '10.0.0.2', - 'end': '10.0.1.254'}], - 'host_routes': [], - 'ipv6_address_mode': None, - 'cidr': '10.0.0.0/23', - 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', - 'name': '', - 'enable_dhcp': True, - 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'tenant_id': 'test-tenant', - 'dns_nameservers': [], - 'gateway_ip': '10.0.0.1', - 'ip_version': 4, - 'shared': False} - -FAKE_PORT = {'status': 'DOWN', - 'binding:host_id': '', - 'allowed_address_pairs': [], - 'device_owner': 'fake_owner', - 'binding:profile': {}, - 'fixed_ips': [], - 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', - 'security_groups': [], - 'device_id': 'fake_device', - 'name': '', - 'admin_state_up': True, - 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701', - 'tenant_id': 'bad_tenant_id', - 'binding:vif_details': {}, - 'binding:vnic_type': 'normal', - 'binding:vif_type': 'unbound', - 'mac_address': '12:34:56:78:21:b6'} - -FAKE_SECURITY_GROUP = {'description': 'Default security group', - 'id': '6875fc07-853f-4230-9ab9-23d1af894240', - 'name': 'default', - 'security_group_rules': [], - 'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'} - -FAKE_SECURITY_GROUP_RULE = {'direction': 'ingress', - 'ethertype': 'IPv4', - 'id': '399029df-cefe-4a7a-b6d6-223558627d23', - 'port_range_max': 0, - 'port_range_min': 0, - 'protocol': 0, - 'remote_group_id': '6875fc07-853f-4230-9ab9', - 'remote_ip_prefix': 0, - 'security_group_id': '6875fc07-853f-4230-9ab9', - 'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'} - - -class OpenDaylightTestCase(test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['opendaylight'] - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - mock.patch.object( - client.OpenDaylightRestClient, - 'sendjson', - new=self.check_sendjson).start() - super(OpenDaylightTestCase, self).setUp() - self.port_create_status = 'DOWN' - self.mech = mech_driver.OpenDaylightMechanismDriver() - - def check_sendjson(self, method, urlpath, obj): - self.assertFalse(urlpath.startswith("http://")) - - -class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - super(OpenDayLightMechanismConfigTests, self).setUp() - config.cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight'], - 'ml2') - config.cfg.CONF.set_override('port_binding_controller', - 'legacy-port-binding', 'ml2_odl') - - def _set_config(self, url='http://127.0.0.1:9999', username='someuser', - password='somepass'): - config.cfg.CONF.set_override('url', url, 'ml2_odl') - config.cfg.CONF.set_override('username', username, 'ml2_odl') - config.cfg.CONF.set_override('password', password, 'ml2_odl') - - def _test_missing_config(self, **kwargs): - self._set_config(**kwargs) - self.assertRaisesRegex(config.cfg.RequiredOptError, - 'value required for option \w+ in group ' - '\[ml2_odl\]', - plugin.Ml2Plugin) - - def test_valid_config(self): - self._set_config() - plugin.Ml2Plugin() - - def test_missing_url_raises_exception(self): - self._test_missing_config(url=None) - - def test_missing_username_raises_exception(self): - self._test_missing_config(username=None) - - def test_missing_password_raises_exception(self): - self._test_missing_config(password=None) - - -class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet, - OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2, - OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2, - OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2, - OpenDaylightTestCase): - - def setUp(self): - mock.patch.object( - mech_driver.OpenDaylightDriver, - 'out_of_sync', - new_callable=mock.PropertyMock(return_value=False)).start() - super(OpenDaylightMechanismTestPortsV2, self).setUp() - - def test_update_port_mac(self): - self.check_update_port_mac( - host_arg={portbindings.HOST_ID: HOST}, - arg_list=(portbindings.HOST_ID,), - expected_status=webob.exc.HTTPConflict.code, - expected_error='PortBound') - - -class DataMatcher(object): - - def __init__(self, operation, object_type, context): - self._data = context.current.copy() - self._object_type = object_type - filter_cls = mech_driver.OpenDaylightDriver.FILTER_MAP[ - '%ss' % object_type] - attr_filter = getattr(filter_cls, 'filter_%s_attributes' % operation) - attr_filter(self._data, context) - - def __eq__(self, s): - data = jsonutils.loads(s) - return self._data == data[self._object_type] - - def __ne__(self, s): - return not self.__eq__(s) - - -class OpenDaylightSyncTestCase(OpenDaylightTestCase): - - def setUp(self): - super(OpenDaylightSyncTestCase, self).setUp() - self.given_back_end = mech_driver.OpenDaylightDriver() - - def test_simple_sync_all_with_HTTPError_not_found(self): - self.given_back_end.out_of_sync = True - ml2_plugin = plugin.Ml2Plugin() - port_mock = mock.MagicMock(port_binding=models.PortBinding()) - - response = mock.Mock(status_code=requests.codes.not_found) - fake_exception = requests.exceptions.HTTPError('Test', - response=response) - - def side_eff(*args, **kwargs): - # HTTP ERROR exception with 404 status code will be raised when use - # sendjson to get the object in ODL DB - if args[0] == 'get': - raise fake_exception - - with mock.patch.object(client.OpenDaylightRestClient, 'sendjson', - side_effect=side_eff), \ - mock.patch.object(plugin.Ml2Plugin, 'get_networks', - return_value=[FAKE_NETWORK.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_network', - return_value=FAKE_NETWORK.copy()), \ - mock.patch.object(plugin.Ml2Plugin, 'get_subnets', - return_value=[FAKE_SUBNET.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, '_get_port', - return_value=port_mock), \ - mock.patch.object(plugin.Ml2Plugin, 'get_ports', - return_value=[FAKE_PORT.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_security_groups', - return_value=[FAKE_SECURITY_GROUP.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules', - return_value=[FAKE_SECURITY_GROUP_RULE.copy()]): - self.given_back_end.sync_full(ml2_plugin) - - sync_id_list = [FAKE_SECURITY_GROUP['id'], - FAKE_SECURITY_GROUP_RULE['id'], - FAKE_NETWORK['id'], FAKE_SUBNET['id'], - FAKE_PORT['id']] - - act = [] - for args, kwargs in \ - client.OpenDaylightRestClient.sendjson.call_args_list: - if args[0] == 'post': - for key in args[2]: - act.append(args[2][key][0]['id']) - self.assertEqual(act, sync_id_list) - - def test_simple_sync_all_with_all_synced(self): - self.given_back_end.out_of_sync = True - ml2_plugin = plugin.Ml2Plugin() - - with mock.patch.object(client.OpenDaylightRestClient, 'sendjson', - return_value=None), \ - mock.patch.object(plugin.Ml2Plugin, 'get_networks', - return_value=[FAKE_NETWORK.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_subnets', - return_value=[FAKE_SUBNET.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_ports', - return_value=[FAKE_PORT.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_security_groups', - return_value=[FAKE_SECURITY_GROUP.copy()]), \ - mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules', - return_value=[FAKE_SECURITY_GROUP_RULE.copy()]): - self.given_back_end.sync_full(ml2_plugin) - - # it's only called for GET, there is no call for PUT - # 5 = network, subnet, port, security_group, security_group_rule - self.assertEqual(5, - client.OpenDaylightRestClient.sendjson.call_count) - - -class OpenDaylightMechanismDriverTestCase(base.BaseTestCase): - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - super(OpenDaylightMechanismDriverTestCase, self).setUp() - config.cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight'], 'ml2') - self.mech = mech_driver.OpenDaylightMechanismDriver() - self.mech.initialize() - - @staticmethod - def _get_mock_network_operation_context(): - context = mock.Mock(current=FAKE_NETWORK.copy()) - return context - - @staticmethod - def _get_mock_subnet_operation_context(): - context = mock.Mock(current=FAKE_SUBNET.copy()) - return context - - @staticmethod - def _get_mock_port_operation_context(): - context = mock.Mock(current=FAKE_PORT.copy()) - context._plugin.get_security_group = mock.Mock(return_value={}) - return context - - @classmethod - def _get_mock_operation_context(cls, object_type): - getter = getattr(cls, '_get_mock_%s_operation_context' % object_type) - return getter() - - _status_code_msgs = { - 200: '', - 201: '', - 204: '', - 400: '400 Client Error: Bad Request', - 401: '401 Client Error: Unauthorized', - 403: '403 Client Error: Forbidden', - 404: '404 Client Error: Not Found', - 409: '409 Client Error: Conflict', - 501: '501 Server Error: Not Implemented', - 503: '503 Server Error: Service Unavailable', - } - - @classmethod - def _get_mock_request_response(cls, status_code): - response = mock.Mock(status_code=status_code) - response.raise_for_status = mock.Mock() if status_code < 400 else ( - mock.Mock(side_effect=requests.exceptions.HTTPError( - cls._status_code_msgs[status_code], response=response))) - return response - - def _test_single_operation(self, method, context, status_code, - exc_class=None, *args, **kwargs): - self.mech.odl_drv.out_of_sync = False - request_response = self._get_mock_request_response(status_code) - with mock.patch('requests.sessions.Session.request', - return_value=request_response) as mock_method: - if exc_class is not None: - self.assertRaises(exc_class, method, context) - else: - method(context) - mock_method.assert_called_once_with( - headers={'Content-Type': 'application/json'}, - timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs) - - def _test_create_resource_postcommit(self, object_type, status_code, - exc_class=None): - method = getattr(self.mech, 'create_%s_postcommit' % object_type) - context = self._get_mock_operation_context(object_type) - url = '%s/%ss' % (config.cfg.CONF.ml2_odl.url, object_type) - kwargs = {'url': url, - 'data': DataMatcher(odl_const.ODL_CREATE, object_type, - context)} - self._test_single_operation(method, context, status_code, exc_class, - 'post', **kwargs) - - def _test_update_resource_postcommit(self, object_type, status_code, - exc_class=None): - method = getattr(self.mech, 'update_%s_postcommit' % object_type) - context = self._get_mock_operation_context(object_type) - url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type, - context.current['id']) - kwargs = {'url': url, - 'data': DataMatcher(odl_const.ODL_UPDATE, object_type, - context)} - self._test_single_operation(method, context, status_code, exc_class, - 'put', **kwargs) - - def _test_delete_resource_postcommit(self, object_type, status_code, - exc_class=None): - method = getattr(self.mech, 'delete_%s_postcommit' % object_type) - context = self._get_mock_operation_context(object_type) - url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type, - context.current['id']) - kwargs = {'url': url, 'data': None} - self._test_single_operation(method, context, status_code, exc_class, - odl_const.ODL_DELETE, **kwargs) - - def test_create_network_postcommit(self): - self._test_create_resource_postcommit(odl_const.ODL_NETWORK, - requests.codes.created) - for status_code in (requests.codes.bad_request, - requests.codes.unauthorized): - self._test_create_resource_postcommit( - odl_const.ODL_NETWORK, status_code, - requests.exceptions.HTTPError) - - def test_create_subnet_postcommit(self): - self._test_create_resource_postcommit(odl_const.ODL_SUBNET, - requests.codes.created) - for status_code in (requests.codes.bad_request, - requests.codes.unauthorized, - requests.codes.forbidden, - requests.codes.not_found, - requests.codes.conflict, - requests.codes.not_implemented): - self._test_create_resource_postcommit( - odl_const.ODL_SUBNET, status_code, - requests.exceptions.HTTPError) - - def test_create_port_postcommit(self): - self._test_create_resource_postcommit(odl_const.ODL_PORT, - requests.codes.created) - for status_code in (requests.codes.bad_request, - requests.codes.unauthorized, - requests.codes.forbidden, - requests.codes.not_found, - requests.codes.conflict, - requests.codes.not_implemented, - requests.codes.service_unavailable): - self._test_create_resource_postcommit( - odl_const.ODL_PORT, status_code, - requests.exceptions.HTTPError) - - def test_update_network_postcommit(self): - self._test_update_resource_postcommit(odl_const.ODL_NETWORK, - requests.codes.ok) - for status_code in (requests.codes.bad_request, - requests.codes.forbidden, - requests.codes.not_found): - self._test_update_resource_postcommit( - odl_const.ODL_NETWORK, status_code, - requests.exceptions.HTTPError) - - def test_update_subnet_postcommit(self): - self._test_update_resource_postcommit(odl_const.ODL_SUBNET, - requests.codes.ok) - for status_code in (requests.codes.bad_request, - requests.codes.unauthorized, - requests.codes.forbidden, - requests.codes.not_found, - requests.codes.not_implemented): - self._test_update_resource_postcommit( - odl_const.ODL_SUBNET, status_code, - requests.exceptions.HTTPError) - - def test_update_port_postcommit(self): - self._test_update_resource_postcommit(odl_const.ODL_PORT, - requests.codes.ok) - for status_code in (requests.codes.bad_request, - requests.codes.unauthorized, - requests.codes.forbidden, - requests.codes.not_found, - requests.codes.conflict, - requests.codes.not_implemented): - self._test_update_resource_postcommit( - odl_const.ODL_PORT, status_code, - requests.exceptions.HTTPError) - - def test_delete_network_postcommit(self): - self._test_delete_resource_postcommit(odl_const.ODL_NETWORK, - requests.codes.no_content) - self._test_delete_resource_postcommit(odl_const.ODL_NETWORK, - requests.codes.not_found) - for status_code in (requests.codes.unauthorized, - requests.codes.conflict): - self._test_delete_resource_postcommit( - odl_const.ODL_NETWORK, status_code, - requests.exceptions.HTTPError) - - def test_delete_subnet_postcommit(self): - self._test_delete_resource_postcommit(odl_const.ODL_SUBNET, - requests.codes.no_content) - self._test_delete_resource_postcommit(odl_const.ODL_SUBNET, - requests.codes.not_found) - for status_code in (requests.codes.unauthorized, - requests.codes.conflict, - requests.codes.not_implemented): - self._test_delete_resource_postcommit( - odl_const.ODL_SUBNET, status_code, - requests.exceptions.HTTPError) - - def test_delete_port_postcommit(self): - self._test_delete_resource_postcommit(odl_const.ODL_PORT, - requests.codes.no_content) - self._test_delete_resource_postcommit(odl_const.ODL_PORT, - requests.codes.not_found) - for status_code in (requests.codes.unauthorized, - requests.codes.forbidden, - requests.codes.not_implemented): - self._test_delete_resource_postcommit( - odl_const.ODL_PORT, status_code, - requests.exceptions.HTTPError) - - def test_port_emtpy_tenant_id_work_around(self): - """Validate the work around code of port creation""" - plugin = mock.Mock() - plugin_context = mock.Mock() - network = self._get_mock_operation_context( - odl_const.ODL_NETWORK).current - port = self._get_mock_operation_context(odl_const.ODL_PORT).current - tenant_id = network['tenant_id'] - port['tenant_id'] = '' - binding = models.PortBinding() - - with mock.patch.object(segments_db, 'get_network_segments'): - context = driver_context.PortContext( - plugin, plugin_context, port, network, binding, 0, None) - self.mech.odl_drv.FILTER_MAP[ - odl_const.ODL_PORTS].filter_create_attributes(port, context) - self.assertEqual(tenant_id, port['tenant_id']) - - def test_update_port_filter(self): - """Validate the filter code on update port operation""" - items_to_filter = ['network_id', 'id', 'status', 'tenant_id'] - plugin_context = mock.Mock() - network = self._get_mock_operation_context( - odl_const.ODL_NETWORK).current - subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current - port = self._get_mock_operation_context(odl_const.ODL_PORT).current - port['fixed_ips'] = [{'subnet_id': subnet['id'], - 'ip_address': '10.0.0.10'}] - port['mac_address'] = port['mac_address'].upper() - orig_port = copy.deepcopy(port) - binding = models.PortBinding() - - with mock.patch.object(segments_db, 'get_network_segments'): - context = driver_context.PortContext( - plugin, plugin_context, port, network, binding, 0, None) - self.mech.odl_drv.FILTER_MAP[ - odl_const.ODL_PORTS].filter_update_attributes(port, context) - for key, value in port.items(): - if key not in items_to_filter: - self.assertEqual(orig_port[key], value) - - -class TestOpenDaylightMechanismDriver(base.DietTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.useFixture(odl_base.OpenDaylightFeaturesFixture()) - super(TestOpenDaylightMechanismDriver, self).setUp() - config.cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight'], 'ml2') - - # given valid and invalid segments - valid_segment = { - ml2_api.ID: 'API_ID', - ml2_api.NETWORK_TYPE: n_constants.TYPE_LOCAL, - ml2_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - ml2_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - invalid_segment = { - ml2_api.ID: 'API_ID', - ml2_api.NETWORK_TYPE: n_constants.TYPE_NONE, - ml2_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - ml2_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - def test_bind_port_front_end(self): - given_front_end = mech_driver.OpenDaylightMechanismDriver() - given_port_context = self.given_port_context() - given_back_end = mech_driver.OpenDaylightDriver() - given_front_end.odl_drv = given_back_end - given_back_end.port_binding_controller = \ - legacy_port_binding.LegacyPortBindingManager() - - # when port is bound - given_front_end.bind_port(given_port_context) - - # then context binding is setup with returned vif_type and valid - # segment API ID - given_port_context.set_binding.assert_called_once_with( - self.valid_segment[ml2_api.ID], portbindings.VIF_TYPE_OVS, - given_back_end.port_binding_controller.vif_details, - status=n_constants.PORT_STATUS_ACTIVE) - - def given_port_context(self): - from neutron.plugins.ml2 import driver_context as ctx - - # given NetworkContext - network = mock.MagicMock(spec=api.NetworkContext) - - # given port context - return mock.MagicMock( - spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'}, - segments_to_bind=[self.valid_segment, self.invalid_segment], - network=network, - _new_bound_segment=self.valid_segment) - - -class _OpenDaylightDriverVlanTransparencyBase(OpenDaylightTestCase): - def setUp(self): - super(_OpenDaylightDriverVlanTransparencyBase, self).setUp() - self.mech.initialize() - - def _driver_context(self, network): - return mock.MagicMock(current=network) - - -class TestOpenDaylightDriverVlanTransparencyNetwork( - _OpenDaylightDriverVlanTransparencyBase): - def _test_network_type(self, expected, network_type): - context = self._driver_context({providernet.NETWORK_TYPE: - network_type}) - self.assertEqual(expected, - self.mech.check_vlan_transparency(context)) - - def test_vlan_transparency(self): - context = self._driver_context({}) - self.assertEqual(True, - self.mech.check_vlan_transparency(context)) - - for network_type in [n_constants.TYPE_VXLAN]: - self._test_network_type(True, network_type) - for network_type in [n_constants.TYPE_FLAT, n_constants.TYPE_GENEVE, - n_constants.TYPE_GRE, n_constants.TYPE_LOCAL, - n_constants.TYPE_VLAN]: - self._test_network_type(False, network_type) - - -class TestOpenDaylightDriverVlanTransparency( - _OpenDaylightDriverVlanTransparencyBase): - scenarios = [ - ('vxlan_vxlan', - {'expected': True, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VXLAN]}), - ('gre_vxlan', - {'expected': False, - 'network_types': [n_constants.TYPE_GRE, n_constants.TYPE_VXLAN]}), - ('vxlan_vlan', - {'expected': False, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN]}), - ('vxlan_flat', - {'expected': False, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_FLAT]}), - ('vlan_vlan', - {'expected': False, - 'network_types': [n_constants.TYPE_VLAN, n_constants.TYPE_VLAN]}), - ] - - def test_network_segments(self): - segments = [{providernet.NETWORK_TYPE: type_} - for type_ in self.network_types] - context = self._driver_context({mpnet.SEGMENTS: segments}) - self.assertEqual(self.expected, - self.mech.check_vlan_transparency(context)) diff --git a/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py b/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py deleted file mode 100644 index 1bb25499c..000000000 --- a/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py +++ /dev/null @@ -1,727 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import datetime -import operator - -import mock -import requests -import testscenarios - -from neutron.db import api as neutron_db_api -from neutron.db.models import securitygroup -from neutron.db import segments_db -from neutron.extensions import multiprovidernet as mpnet -from neutron.plugins.ml2 import plugin -from neutron.tests.unit.plugins.ml2 import test_plugin -from neutron.tests.unit import testlib_api -from neutron_lib.api.definitions import provider_net as providernet -from neutron_lib import constants as n_constants -from neutron_lib.plugins import directory -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from networking_odl.common import callback -from networking_odl.common import constants as odl_const -from networking_odl.common import filters -from networking_odl.common import utils -from networking_odl.db import db -from networking_odl.journal import cleanup -from networking_odl.journal import journal -from networking_odl.ml2 import mech_driver_v2 -from networking_odl.tests import base -from networking_odl.tests.unit import base_v2 - - -# Required to generate tests from scenarios. Not compatible with nose. -load_tests = testscenarios.load_tests_apply_scenarios - -cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') - -SECURITY_GROUP = '2f9244b4-9bee-4e81-bc4a-3f3c2045b3d7' -SG_FAKE_ID = uuidutils.generate_uuid() -SG_RULE_FAKE_ID = uuidutils.generate_uuid() - - -class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase): - def setUp(self): - super(OpenDayLightMechanismConfigTests, self).setUp() - self.useFixture(base.OpenDaylightFeaturesFixture()) - self.useFixture(base.OpenDaylightJournalThreadFixture()) - cfg.CONF.set_override('mechanism_drivers', - ['logger', 'opendaylight_v2'], 'ml2') - cfg.CONF.set_override('port_binding_controller', - 'legacy-port-binding', 'ml2_odl') - - def _set_config(self, url='http://127.0.0.1:9999', username='someuser', - password='somepass'): - cfg.CONF.set_override('url', url, 'ml2_odl') - cfg.CONF.set_override('username', username, 'ml2_odl') - cfg.CONF.set_override('password', password, 'ml2_odl') - - def _test_missing_config(self, **kwargs): - self._set_config(**kwargs) - self.assertRaisesRegex(cfg.RequiredOptError, - 'value required for option \w+ in group ' - '\[ml2_odl\]', - plugin.Ml2Plugin) - - def test_valid_config(self): - self._set_config() - plugin.Ml2Plugin() - - def test_missing_url_raises_exception(self): - self._test_missing_config(url=None) - - def test_missing_username_raises_exception(self): - self._test_missing_config(username=None) - - def test_missing_password_raises_exception(self): - self._test_missing_config(password=None) - - -class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet, - base_v2.OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2, - base_v2.OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2, - base_v2.OpenDaylightTestCase): - pass - - -class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2, - base_v2.OpenDaylightTestCase): - pass - - -class DataMatcher(object): - - def __init__(self, operation, object_type, context): - if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: - self._data = copy.deepcopy(context[object_type]) - elif object_type == odl_const.ODL_PORT: - # NOTE(yamahata): work around for journal._enrich_port() - self._data = copy.deepcopy(context.current) - if self._data.get(odl_const.ODL_SGS): - self._data[odl_const.ODL_SGS] = [ - {'id': id_} for id_ in self._data[odl_const.ODL_SGS]] - else: - self._data = copy.deepcopy(context.current) - self._object_type = object_type - filters.filter_for_odl(object_type, operation, self._data) - - def __eq__(self, s): - data = jsonutils.loads(s) - return self._data == data[self._object_type] - - def __ne__(self, s): - return not self.__eq__(s) - - def __repr__(self): - # for debugging - return 'DataMatcher(%(object_type)s, %(data)s)' % { - 'object_type': self._object_type, - 'data': self._data} - - -class AttributeDict(dict): - def __init__(self, *args, **kwargs): - super(AttributeDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -class OpenDaylightMechanismDriverTestCase(base_v2.OpenDaylightConfigBase): - def setUp(self): - self.useFixture(base.OpenDaylightFeaturesFixture()) - self.useFixture(base.OpenDaylightJournalThreadFixture()) - super(OpenDaylightMechanismDriverTestCase, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.mech = mech_driver_v2.OpenDaylightMechanismDriver() - self.mech.initialize() - - @staticmethod - def _get_mock_network_operation_context(): - current = {'status': 'ACTIVE', - 'subnets': [], - 'name': 'net1', - 'provider:physical_network': None, - 'admin_state_up': True, - 'tenant_id': 'test-tenant', - 'provider:network_type': 'local', - 'router:external': False, - 'shared': False, - 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'provider:segmentation_id': None} - context = mock.Mock(current=current) - context._plugin_context.session = neutron_db_api.get_writer_session() - return context - - @staticmethod - def _get_mock_subnet_operation_context(): - current = {'ipv6_ra_mode': None, - 'allocation_pools': [{'start': '10.0.0.2', - 'end': '10.0.1.254'}], - 'host_routes': [], - 'ipv6_address_mode': None, - 'cidr': '10.0.0.0/23', - 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', - 'name': '', - 'enable_dhcp': True, - 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'tenant_id': 'test-tenant', - 'dns_nameservers': [], - 'gateway_ip': '10.0.0.1', - 'ip_version': 4, - 'shared': False} - context = mock.Mock(current=current) - context._plugin_context.session = neutron_db_api.get_writer_session() - return context - - @staticmethod - def _get_mock_port_operation_context(): - current = {'status': 'DOWN', - 'binding:host_id': '', - 'allowed_address_pairs': [], - 'device_owner': 'fake_owner', - 'binding:profile': {}, - 'fixed_ips': [{ - 'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}], - 'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940', - 'security_groups': [SECURITY_GROUP], - 'device_id': 'fake_device', - 'name': '', - 'admin_state_up': True, - 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', - 'tenant_id': 'test-tenant', - 'binding:vif_details': {}, - 'binding:vnic_type': 'normal', - 'binding:vif_type': 'unbound', - 'mac_address': '12:34:56:78:21:b6'} - _network = OpenDaylightMechanismDriverTestCase.\ - _get_mock_network_operation_context().current - _plugin = directory.get_plugin() - _plugin.writer_get_security_group = mock.Mock( - return_value=SECURITY_GROUP) - _plugin.get_port = mock.Mock(return_value=current) - _plugin.get_network = mock.Mock(return_value=_network) - _plugin_context_mock = {'session': neutron_db_api.get_writer_session()} - _network_context_mock = {'_network': _network} - context = {'current': AttributeDict(current), - '_plugin': _plugin, - '_plugin_context': AttributeDict(_plugin_context_mock), - '_network_context': AttributeDict(_network_context_mock)} - return AttributeDict(context) - - @staticmethod - def _get_mock_security_group_operation_context(): - context = {odl_const.ODL_SG: {'name': 'test_sg', - 'project_id': 'test-tenant', - 'tenant_id': 'test-tenant', - 'description': 'test-description', - 'security_group_rules': [], - 'id': SG_FAKE_ID}} - return context - - @staticmethod - def _get_mock_security_group_rule_operation_context(): - context = {odl_const.ODL_SG_RULE: {'security_group_id': SG_FAKE_ID, - 'id': SG_RULE_FAKE_ID}} - _plugin = directory.get_plugin() - _plugin._get_security_group_rule = mock.Mock( - return_value=AttributeDict(context[odl_const.ODL_SG_RULE])) - return context - - @classmethod - def _get_mock_operation_context(cls, object_type): - getter = getattr(cls, '_get_mock_%s_operation_context' % object_type) - return getter() - - _status_code_msgs = { - 200: '', - 201: '', - 204: '', - 400: '400 Client Error: Bad Request', - 401: '401 Client Error: Unauthorized', - 403: '403 Client Error: Forbidden', - 404: '404 Client Error: Not Found', - 409: '409 Client Error: Conflict', - 501: '501 Server Error: Not Implemented', - 503: '503 Server Error: Service Unavailable', - } - - @classmethod - def _get_mock_request_response(cls, status_code): - response = mock.Mock(status_code=status_code) - response.raise_for_status = mock.Mock() if status_code < 400 else ( - mock.Mock(side_effect=requests.exceptions.HTTPError( - cls._status_code_msgs[status_code]))) - return response - - def _test_operation(self, status_code, expected_calls, - *args, **kwargs): - request_response = self._get_mock_request_response(status_code) - with mock.patch('requests.sessions.Session.request', - return_value=request_response) as mock_method: - self.run_journal_processing() - - if expected_calls: - mock_method.assert_called_with( - headers={'Content-Type': 'application/json'}, - timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs) - self.assertEqual(expected_calls, mock_method.call_count) - - def _call_operation_object(self, operation, object_type): - context = self._get_mock_operation_context(object_type) - - if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: - plugin_context_mock = mock.Mock() - plugin_context_mock.session = neutron_db_api.get_writer_session() - res_type = [rt for rt in callback._RESOURCE_MAPPING.values() - if rt.singular == object_type][0] - res_id = context[object_type]['id'] - context_ = (copy.deepcopy(context) - if operation != odl_const.ODL_DELETE else None) - if (object_type == odl_const.ODL_SG and - operation in [odl_const.ODL_CREATE, odl_const.ODL_DELETE]): - # TODO(yamahata): remove this work around once - # https://review.openstack.org/#/c/281693/ - # is merged. - if operation == odl_const.ODL_CREATE: - sg = securitygroup.SecurityGroup( - id=res_id, name=context_[object_type]['name'], - tenant_id=context_[object_type]['tenant_id'], - description=context_[object_type]['description']) - plugin_context_mock.session.add(sg) - sg_dict = dict(sg) - sg_dict['security_group_rules'] = [] - self.mech.sync_from_callback_precommit( - plugin_context_mock, operation, res_type, res_id, - context_, security_group=sg_dict) - if operation == odl_const.ODL_DELETE: - self.mech.sync_from_callback_precommit( - plugin_context_mock, operation, res_type, res_id, - context_, - security_group={'security_group_rules': - {'id': SG_RULE_FAKE_ID}}, - security_group_rule_ids=[SG_RULE_FAKE_ID]) - else: - self.mech.sync_from_callback_precommit( - plugin_context_mock, operation, res_type, res_id, context_) - else: - method = getattr(self.mech, '%s_%s_precommit' % (operation, - object_type)) - method(context) - self.db_session.flush() - - def _test_operation_object(self, operation, object_type): - self._call_operation_object(operation, object_type) - - context = self._get_mock_operation_context(object_type) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertEqual(operation, row['operation']) - self.assertEqual(object_type, row['object_type']) - self.assertEqual(context.current['id'], row['object_uuid']) - self._db_cleanup() - - def _test_thread_processing(self, operation, object_type, - expected_calls=1): - http_requests = {odl_const.ODL_CREATE: 'post', - odl_const.ODL_UPDATE: 'put', - odl_const.ODL_DELETE: 'delete'} - status_codes = {odl_const.ODL_CREATE: requests.codes.created, - odl_const.ODL_UPDATE: requests.codes.ok, - odl_const.ODL_DELETE: requests.codes.no_content} - - http_request = http_requests[operation] - status_code = status_codes[operation] - - self._call_operation_object(operation, object_type) - - context = self._get_mock_operation_context(object_type) - url_object_type = utils.neutronify(object_type) - if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: - if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: - uuid = context[object_type]['id'] - else: - uuid = context.current['id'] - url = '%s/%ss/%s' % (cfg.CONF.ml2_odl.url, url_object_type, uuid) - else: - url = '%s/%ss' % (cfg.CONF.ml2_odl.url, url_object_type) - - if (object_type == odl_const.ODL_SG and - operation == odl_const.ODL_CREATE): - context = copy.deepcopy(context) - if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]: - kwargs = { - 'url': url, - 'data': DataMatcher(operation, object_type, context)} - else: - kwargs = {'url': url, 'data': None} - - self._test_operation(status_code, expected_calls, http_request, - **kwargs) - - def _test_object_type(self, object_type, delete_expected_calls=1): - # Add and process create request. - self._test_thread_processing(odl_const.ODL_CREATE, object_type) - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(1, len(rows)) - - # Add and process update request. Adds to database. - self._test_thread_processing(odl_const.ODL_UPDATE, object_type) - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(2, len(rows)) - - # Add and process update request. Adds to database. - self._test_thread_processing(odl_const.ODL_DELETE, object_type, - delete_expected_calls) - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(2 + delete_expected_calls, len(rows)) - - def _test_object_type_pending_network(self, object_type): - # Create a network (creates db row in pending state). - self._call_operation_object(odl_const.ODL_CREATE, - odl_const.ODL_NETWORK) - - # Create object_type database row and process. This results in both - # the object_type and network rows being processed. - self._test_thread_processing(odl_const.ODL_CREATE, object_type, - expected_calls=2) - - # Verify both rows are now marked as completed. - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.COMPLETED) - self.assertEqual(2, len(rows)) - - def _test_object_type_processing_network(self, object_type): - self._test_object_operation_pending_another_object_operation( - object_type, odl_const.ODL_CREATE, odl_const.ODL_NETWORK, - odl_const.ODL_CREATE) - - def _test_object_operation_pending_object_operation( - self, object_type, operation, pending_operation): - self._test_object_operation_pending_another_object_operation( - object_type, operation, object_type, pending_operation) - - def _test_object_operation_pending_another_object_operation( - self, object_type, operation, pending_type, pending_operation): - # Create the object_type (creates db row in pending state). - self._call_operation_object(pending_operation, - pending_type) - - # Get pending row and mark as processing so that - # this row will not be processed by journal thread. - row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) - db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) - - # Create the object_type database row and process. - # Verify that object request is not processed because the - # dependent object operation has not been marked as 'completed'. - self._test_thread_processing(operation, - object_type, - expected_calls=0) - - # Verify that all rows are still in the database. - rows = db.get_all_db_rows_by_state(self.db_session, - odl_const.PROCESSING) - self.assertEqual(1, len(rows)) - rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) - self.assertEqual(1, len(rows)) - - def _test_parent_delete_pending_child_delete(self, parent, child): - self._test_object_operation_pending_another_object_operation( - parent, odl_const.ODL_DELETE, child, odl_const.ODL_DELETE) - - def _test_cleanup_processing_rows(self, last_retried, expected_state): - # Create a dummy network (creates db row in pending state). - self._call_operation_object(odl_const.ODL_CREATE, - odl_const.ODL_NETWORK) - - # Get pending row and mark as processing and update - # the last_retried time - row = db.get_all_db_rows_by_state(self.db_session, - odl_const.PENDING)[0] - row.last_retried = last_retried - db.update_db_row_state(self.db_session, row, odl_const.PROCESSING) - - # Test if the cleanup marks this in the desired state - # based on the last_retried timestamp - cleanup.JournalCleanup().cleanup_processing_rows(self.db_session) - - # Verify that the Db row is in the desired state - rows = db.get_all_db_rows_by_state(self.db_session, expected_state) - self.assertEqual(1, len(rows)) - - def test_driver(self): - for operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE, - odl_const.ODL_DELETE]: - for object_type in [odl_const.ODL_NETWORK, odl_const.ODL_SUBNET, - odl_const.ODL_PORT]: - self._test_operation_object(operation, object_type) - - def test_port_precommit_no_tenant(self): - context = self._get_mock_operation_context(odl_const.ODL_PORT) - context.current['tenant_id'] = '' - - method = getattr(self.mech, 'create_port_precommit') - method(context) - self.db_session.flush() - - # Verify that the Db row has a tenant - rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) - self.assertEqual(1, len(rows)) - _network = OpenDaylightMechanismDriverTestCase.\ - _get_mock_network_operation_context().current - self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id']) - - def test_network(self): - self._test_object_type(odl_const.ODL_NETWORK) - - def test_network_update_pending_network_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_NETWORK, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) - - def test_network_delete_pending_network_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_CREATE) - - def test_network_delete_pending_network_update(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) - - def test_network_delete_pending_subnet_delete(self): - self._test_parent_delete_pending_child_delete( - odl_const.ODL_NETWORK, odl_const.ODL_SUBNET) - - def test_network_delete_pending_port_delete(self): - self._test_parent_delete_pending_child_delete( - odl_const.ODL_NETWORK, odl_const.ODL_PORT) - - def test_subnet(self): - self._test_object_type(odl_const.ODL_SUBNET) - - def test_subnet_update_pending_subnet_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_SUBNET, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) - - def test_subnet_delete_pending_subnet_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_CREATE) - - def test_subnet_delete_pending_subnet_update(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) - - def test_subnet_pending_network(self): - self._test_object_type_pending_network(odl_const.ODL_SUBNET) - - def test_subnet_processing_network(self): - self._test_object_type_processing_network(odl_const.ODL_SUBNET) - - def test_subnet_delete_pending_port_delete(self): - self._test_parent_delete_pending_child_delete( - odl_const.ODL_SUBNET, odl_const.ODL_PORT) - - def test_port(self): - self._test_object_type(odl_const.ODL_PORT) - - def test_port_update_pending_port_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_PORT, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) - - def test_port_delete_pending_port_create(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_CREATE) - - def test_port_delete_pending_port_update(self): - self._test_object_operation_pending_object_operation( - odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) - - def test_port_pending_network(self): - self._test_object_type_pending_network(odl_const.ODL_PORT) - - def test_port_processing_network(self): - self._test_object_type_processing_network(odl_const.ODL_PORT) - - def test_cleanup_processing_rows_time_not_expired(self): - self._test_cleanup_processing_rows(datetime.datetime.utcnow(), - odl_const.PROCESSING) - - def test_cleanup_processing_rows_time_expired(self): - old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24) - self._test_cleanup_processing_rows(old_time, odl_const.PENDING) - - def test_thread_call(self): - """Verify that the sync thread method is called.""" - - with mock.patch.object( - journal.OpenDaylightJournalThread, - 'start_odl_sync_thread') as mock_sync_thread: - self.mech = mech_driver_v2.OpenDaylightMechanismDriver() - self.mech.initialize() - - # Create any object that would spin up the sync thread via the - # decorator call_thread_on_end() used by all the event handlers. - self._call_operation_object(odl_const.ODL_CREATE, - odl_const.ODL_NETWORK) - - # Verify that the thread call was made. - mock_sync_thread.assert_called() - - def test_sg(self): - self._test_object_type(odl_const.ODL_SG, 2) - - def test_sg_rule(self): - self._test_object_type(odl_const.ODL_SG_RULE) - - def test_sg_delete(self): - with mock.patch.object(journal, 'record') as record: - context = self._get_mock_operation_context(odl_const.ODL_SG) - res_id = context[odl_const.ODL_SG]['id'] - plugin_context_mock = mock.Mock() - plugin_context_mock.session = neutron_db_api.get_writer_session() - rule = mock.Mock() - rule.id = SG_RULE_FAKE_ID - rule.security_group_id = SG_FAKE_ID - sg = mock.Mock() - sg.id = SG_FAKE_ID - sg.security_group_rules = [rule] - kwargs = {'security_group': sg, - 'security_group_rule_ids': [SG_RULE_FAKE_ID]} - self.mech.sync_from_callback_precommit( - plugin_context_mock, odl_const.ODL_DELETE, - callback._RESOURCE_MAPPING[odl_const.ODL_SG], - res_id, context, **kwargs) - record.assert_has_calls( - [mock.call(mock.ANY, 'security_group_rule', - SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID]), - mock.call(mock.ANY, 'security_group', SG_FAKE_ID, - 'delete', - {'description': 'test-description', - 'project_id': 'test-tenant', - 'security_group_rules': [], - 'tenant_id': 'test-tenant', - 'id': SG_FAKE_ID, 'name': 'test_sg'})]) - - def test_sync_multiple_updates(self): - # add 2 updates - for i in range(2): - self._call_operation_object(odl_const.ODL_UPDATE, - odl_const.ODL_NETWORK) - - # get the last update row - rows = db.get_all_db_rows(self.db_session) - rows.sort(key=operator.attrgetter("seqnum")) - first_row = rows[0] - - # change the state to processing - db.update_db_row_state(self.db_session, first_row, - odl_const.PROCESSING) - - # create 1 more operation to trigger the sync thread - # verify that there are no calls to ODL controller, because the - # first row was processing (exit_after_run = true) - self._test_thread_processing(odl_const.ODL_UPDATE, - odl_const.ODL_NETWORK, expected_calls=0) - - # validate that all the pending rows stays in 'pending' state - # first row should be 'processing' because it was not processed - processing = db.get_all_db_rows_by_state(self.db_session, 'processing') - self.assertEqual(1, len(processing)) - rows = db.get_all_db_rows_by_state(self.db_session, 'pending') - self.assertEqual(2, len(rows)) - - def test_update_port_filter(self): - """Validate the filter code on update port operation""" - expected_items = ['fixed_ips', 'security_groups', 'device_id', - 'security_groups', 'admin_state_up'] - subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current - port = self._get_mock_operation_context(odl_const.ODL_PORT).current - port['fixed_ips'] = [{'subnet_id': subnet['id'], - 'ip_address': '10.0.0.10'}] - port['mac_address'] = port['mac_address'].upper() - - orig_port = copy.deepcopy(port) - - with mock.patch.object(segments_db, 'get_network_segments'): - filters.filter_for_odl(odl_const.ODL_PORT, - odl_const.ODL_UPDATE, port) - for key, value in orig_port.items(): - if key in expected_items: - self.assertEqual(port[key], value) - - -class _OpenDaylightDriverVlanTransparencyBase(base_v2.OpenDaylightTestCase): - def _driver_context(self, network): - return mock.MagicMock(current=network) - - -class TestOpenDaylightDriverVlanTransparencyNetwork( - _OpenDaylightDriverVlanTransparencyBase): - def _test_network_type(self, expected, network_type): - context = self._driver_context({providernet.NETWORK_TYPE: - network_type}) - self.assertEqual(expected, - self.mech.check_vlan_transparency(context)) - - def test_none_network_type(self): - context = self._driver_context({}) - self.assertTrue(self.mech.check_vlan_transparency(context)) - - def test_vlan_transparency(self): - for network_type in [n_constants.TYPE_VXLAN]: - self._test_network_type(True, network_type) - for network_type in [n_constants.TYPE_FLAT, n_constants.TYPE_GENEVE, - n_constants.TYPE_GRE, n_constants.TYPE_LOCAL, - n_constants.TYPE_VLAN]: - self._test_network_type(False, network_type) - - -class TestOpenDaylightDriverVlanTransparency( - _OpenDaylightDriverVlanTransparencyBase): - scenarios = [ - ("vxlan_vxlan", - {'expected': True, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VXLAN]}), - ("gre_vxlan", - {'expected': False, - 'network_types': [n_constants.TYPE_GRE, n_constants.TYPE_VXLAN]}), - ("vxlan_vlan", - {'expected': False, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN]}), - ("vxlan_flat", - {'expected': False, - 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_FLAT]}), - ("vlan_vlan", - {'expected': False, - 'network_types': [n_constants.TYPE_VLAN, n_constants.TYPE_VLAN]}), - ] - - def test_network_segments(self): - segments = [{providernet.NETWORK_TYPE: type_} - for type_ in self.network_types] - context = self._driver_context({mpnet.SEGMENTS: segments}) - self.assertEqual(self.expected, - self.mech.check_vlan_transparency(context)) diff --git a/networking_odl/tests/unit/ml2/test_port_binding.py b/networking_odl/tests/unit/ml2/test_port_binding.py deleted file mode 100644 index 35ae9ece2..000000000 --- a/networking_odl/tests/unit/ml2/test_port_binding.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from networking_odl.ml2 import legacy_port_binding -from networking_odl.ml2 import port_binding -from networking_odl.tests import base - - -class TestPortBindingManager(base.DietTestCase): - - def test_create(self): - mgr = port_binding.PortBindingManager.create( - name="legacy-port-binding") - self.assertEqual("legacy-port-binding", mgr.name) - self.assertIsInstance(mgr.controller, - legacy_port_binding.LegacyPortBindingManager) - - def test_create_with_nonexist_name(self): - self.assertRaises(AssertionError, - port_binding.PortBindingManager.create, - name="nonexist-port-binding") - - @mock.patch.object(legacy_port_binding.LegacyPortBindingManager, - "bind_port") - def test_bind_port(self, mock_method): - port_context = mock.Mock() - mgr = port_binding.PortBindingManager.create( - name="legacy-port-binding") - mgr.controller.bind_port(port_context) - mock_method.assert_called_once_with(port_context) diff --git a/networking_odl/tests/unit/ml2/test_port_status_update.py b/networking_odl/tests/unit/ml2/test_port_status_update.py deleted file mode 100644 index d430d5ea1..000000000 --- a/networking_odl/tests/unit/ml2/test_port_status_update.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import threading - -import mock - -from networking_odl.common.client import OpenDaylightRestClient -from networking_odl.common import websocket_client as odl_ws_client -from networking_odl.common.websocket_client import OpenDaylightWebsocketClient -from networking_odl.ml2.port_status_update import OdlPortStatusUpdate -from networking_odl.tests import base -from neutron.db import provisioning_blocks -import neutron_lib.context -import neutron_lib.plugins.directory - - -class TestOdlPortStatusUpdate(base.DietTestCase): - - WEBSOCK_NOTIFICATION = re.sub('\s*', '', """ - { - "notification": { - "data-changed-notification": { - "data-change-event": { - "data": { - "status": { - "content": "ACTIVE", - "xmlns": "urn:opendaylight:neutron" - } - }, - "operation": "updated", - "path": - "/neutron:neutron/neutron:ports/neutron:port[ - neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28'] - /neutron:status" - }, - "xmlns": - "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote" - }, - "eventTime": "2017-02-22T02:27:32+02:00", - "xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0" - } - }""") - - def setUp(self): - self.useFixture(base.OpenDaylightFeaturesFixture()) - self.mock_ws_client = mock.patch.object( - OpenDaylightWebsocketClient, 'odl_create_websocket') - super(TestOdlPortStatusUpdate, self).setUp() - - def test_object_create(self): - OdlPortStatusUpdate() - - @mock.patch.object(provisioning_blocks, 'provisioning_complete') - def test_websock_recv(self, mocked_provisioning_complete): - updater = OdlPortStatusUpdate() - updater._process_websocket_recv(self.WEBSOCK_NOTIFICATION, False) - mocked_provisioning_complete.assert_called_once() - self.assertTrue(mocked_provisioning_complete.call_args[0][1] - == 'd6e6335d-9568-4949-aef1-4107e34c5f28') - - @mock.patch.object(provisioning_blocks, 'provisioning_complete') - @mock.patch.object(neutron_lib.context, 'get_admin_context') - @mock.patch.object(OpenDaylightRestClient, 'get') - @mock.patch.object(neutron_lib.plugins.directory, 'get_plugin') - def test_pull_missed_statuses(self, mocked_get_plugin, mocked_get, ac, pc): - uuid = 'd6e6335d-9568-4949-aef1-4107e34c5f28' - plugin = mock.MagicMock() - plugin.get_ports = mock.MagicMock(return_value=[{'id': uuid}]) - mocked_get_plugin.return_value = plugin - - updater = OdlPortStatusUpdate() - updater._pull_missed_statuses() - - mocked_get.assert_called_with(uuid) - - @mock.patch.object(threading, 'Thread') - def test_process_websocket_reconnect(self, mocked_thread): - updater = OdlPortStatusUpdate() - updater._process_websocket_reconnect( - odl_ws_client.ODL_WEBSOCKET_CONNECTED) - mocked_thread.assert_called() - mocked_thread.return_value.start.assert_called() diff --git a/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py b/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py deleted file mode 100644 index b6cfd03f9..000000000 --- a/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py +++ /dev/null @@ -1,512 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy -from os import path as os_path -from string import Template - -import mock -from oslo_serialization import jsonutils - -from neutron.db import provisioning_blocks -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2 import driver_context as ctx -from neutron_lib.api.definitions import portbindings -from neutron_lib import constants as n_const -from neutron_lib.plugins import directory -from neutron_lib.plugins.ml2 import api as ml2_api - -from networking_odl.common import odl_features -from networking_odl.ml2 import pseudo_agentdb_binding -from networking_odl.tests import base -from networking_odl.tests.unit import test_base_db -from requests.exceptions import HTTPError - -from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin - -AGENTDB_BINARY = 'neutron-odlagent-portbinding' -L2_TYPE = "ODL L2" - - -class TestPseudoAgentDBBindingController(test_base_db.ODLBaseDbTestCase): - """Test class for AgentDBPortBinding.""" - - # test data hostconfig and hostconfig-dbget - sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [ - {"host-id": "devstack", - "host-type": "ODL L2", - "config": """{"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "ovs", - "vif_details": {}}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}"""} - ]}} - - # Test data for string interpolation of substitutable identifers - # e.g. $PORT_ID identifier in the configurations JSON string below shall - # be substituted with portcontext.current['id'] eliminating the check - # for specific vif_type making port-binding truly switch agnostic. - # Refer: Python string templates and interpolation (string.Template) - sample_hconf_str_tmpl_subs_vpp = { - "host": "devstack", # host-id in ODL JSON - "agent_type": "ODL L2", # host-type in ODL JSON - # config in ODL JSON - "configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "vhostuser", - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "socket_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "server", - "vhostuser_socket": - "/tmp/socket_$PORT_ID" - }}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}} - } - - sample_hconf_str_tmpl_subs_ovs = { - "host": "devstack", # host-id in ODL JSON - "agent_type": "ODL L2", # host-type in ODL JSON - # config in ODL JSON - "configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "vhostuser", - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "vhu_", - "vhostuser_socket_dir": "/var/run/openvswitch", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "client", - "vhostuser_socket": - "/var/run/openvswitch/vhu_$PORT_ID" - }}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}} - } - - sample_hconf_str_tmpl_nosubs = { - "host": "devstack", # host-id in ODL JSON - "agent_type": "ODL L2", # host-type in ODL JSON - # config in ODL JSON - "configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "ovs", - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "socket_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "server", - "vhostuser_socket": - "/var/run/openvswitch/PORT_NOSUBS" - }}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}} - } - - # Test data for vanilla OVS - sample_hconfig_dbget_ovs = {"configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS, - "vif_details": { - "some_test_details": None - }}], - "allowed_network_types": ["local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}} - - # Test data for OVS-DPDK - sample_hconfig_dbget_ovs_dpdk = {"configurations": { - "supported_vnic_types": [{ - "vnic_type": "normal", - "vif_type": portbindings.VIF_TYPE_VHOST_USER, - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "vhu_", - # Assumption: /var/run mounted as tmpfs - "vhostuser_socket_dir": "/var/run/openvswitch", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "client", - "vhostuser_socket": "/var/run/openvswitch/vhu_$PORT_ID"}}], - "allowed_network_types": ["local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}} - - # Test data for VPP - sample_hconfig_dbget_vpp = {"configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER, - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "socket_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "server", - "vhostuser_socket": "/tmp/socket_$PORT_ID" - }}], - "allowed_network_types": ["local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}} - - # Test data for length of string - sample_odl_hconfigs_length = { - "host": "devstack", # host-id in ODL JSON - "agent_type": "ODL L2", # host-type in ODL JSON - # config in ODL JSON - "configurations": {"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "vhostuser", - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": True, - "support_vhost_user": True, - "port_prefix": "longprefix_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": True, - "vhostuser_mode": "server", - "vhostuser_socket": - "/tmp/longprefix_$PORT_ID" - }}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}} - } - - # Raw test data for unicode/string comparison - sample_odl_hconfigs_length_raw = { - "host": "devstack", - "agent_type": "ODL L2", - "configurations": """{"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "vhostuser", - "vif_details": { - "uuid": "TEST_UUID", - "has_datapath_type_netdev": true, - "support_vhost_user": true, - "port_prefix": "prefix_", - "vhostuser_socket_dir": "/tmp", - "vhostuser_ovs_plug": true, - "vhostuser_mode": "server", - "vhostuser_socket": - "/tmp/prefix_$PORT_ID" - }}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}""" - } - - # test data valid and invalid segments - test_valid_segment = { - ml2_api.ID: 'API_ID', - ml2_api.NETWORK_TYPE: n_const.TYPE_LOCAL, - ml2_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - ml2_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - test_invalid_segment = { - ml2_api.ID: 'API_ID', - ml2_api.NETWORK_TYPE: n_const.TYPE_NONE, - ml2_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', - ml2_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} - - def setUp(self): - """Setup test.""" - self.useFixture(base.OpenDaylightRestClientFixture()) - self.useFixture(base.OpenDaylightFeaturesFixture()) - super(TestPseudoAgentDBBindingController, self).setUp() - - fake_agents_db = mock.MagicMock() - fake_agents_db.create_or_update_agent = mock.MagicMock() - - self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController( - db_plugin=fake_agents_db) - - def test_make_hostconf_uri(self): - """test make uri.""" - test_path = '/restconf/neutron:neutron/hostconfigs' - expected = "http://localhost:8080/restconf/neutron:neutron/hostconfigs" - test_uri = self.mgr._make_hostconf_uri(path=test_path) - - self.assertEqual(expected, test_uri) - - def test_update_agents_db(self): - """test agent update.""" - self.mgr._update_agents_db( - hostconfigs=self.sample_odl_hconfigs['hostconfigs']['hostconfig']) - self.mgr.agents_db.create_or_update_agent.assert_called_once() - - def _get_raised_response(self, json_data, status_code): - - class MockHTTPError(HTTPError): - def __init__(self, json_data, status_code): - self.json_data = json_data - self.status_code = status_code - self.response = self - - class MockResponse(object): - def __init__(self, json_data, status_code): - self.raise_obj = MockHTTPError(json_data, status_code) - - def raise_for_status(self): - raise self.raise_obj - - return MockResponse(json_data, status_code) - - def test_hostconfig_response_404(self): - with mock.patch.object(self.mgr.odl_rest_client, - 'get', return_value=self. - _get_raised_response({}, 404)): - self.assertEqual(self.mgr._rest_get_hostconfigs(), []) - - def test_is_valid_segment(self): - """Validate the _check_segment method.""" - all_network_types = [n_const.TYPE_FLAT, n_const.TYPE_GRE, - n_const.TYPE_LOCAL, n_const.TYPE_VXLAN, - n_const.TYPE_VLAN, n_const.TYPE_NONE] - - valid_types = { - network_type - for network_type in all_network_types - if self.mgr._is_valid_segment( - {ml2_api.NETWORK_TYPE: network_type}, - {'allowed_network_types': [ - n_const.TYPE_LOCAL, n_const.TYPE_GRE, - n_const.TYPE_VXLAN, n_const.TYPE_VLAN]})} - - self.assertEqual({ - n_const.TYPE_LOCAL, n_const.TYPE_GRE, n_const.TYPE_VXLAN, - n_const.TYPE_VLAN}, valid_types) - - def test_bind_port_with_vif_type_ovs(self): - """test bind_port with vanilla ovs.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment]) - - vif_type = portbindings.VIF_TYPE_OVS - vif_details = {'some_test_details': None} - - self.mgr._hconfig_bind_port( - port_context, self.sample_hconfig_dbget_ovs) - - port_context.set_binding.assert_called_once_with( - self.test_valid_segment[ml2_api.ID], vif_type, - vif_details, status=n_const.PORT_STATUS_ACTIVE) - - def _set_pass_vif_details(self, port_context, vif_details): - """extract vif_details and update vif_details if needed.""" - vhostuser_socket_dir = vif_details.get( - 'vhostuser_socket_dir', '/var/run/openvswitch') - port_spec = vif_details.get( - 'port_prefix', 'vhu_') + port_context.current['id'] - socket_path = os_path.join(vhostuser_socket_dir, port_spec) - vif_details.update({portbindings.VHOST_USER_SOCKET: socket_path}) - - return vif_details - - def test_bind_port_with_vif_type_vhost_user(self): - """test bind_port with ovs-dpdk.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment], - host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_ovs)]) - - self.mgr.bind_port(port_context) - - pass_vif_type = portbindings.VIF_TYPE_VHOST_USER - pass_vif_details = self.sample_hconfig_dbget_ovs_dpdk[ - 'configurations']['supported_vnic_types'][0]['vif_details'] - self._set_pass_vif_details(port_context, pass_vif_details) - - port_context.set_binding.assert_called_once_with( - self.test_valid_segment[ml2_api.ID], pass_vif_type, - pass_vif_details, status=n_const.PORT_STATUS_ACTIVE) - - def test_bind_port_with_vif_type_vhost_user_vpp(self): - """test bind_port with vpp.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment], - host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_vpp)]) - - self.mgr.bind_port(port_context) - - pass_vif_type = portbindings.VIF_TYPE_VHOST_USER - pass_vif_details = self.sample_hconfig_dbget_vpp['configurations'][ - 'supported_vnic_types'][0]['vif_details'] - self._set_pass_vif_details(port_context, pass_vif_details) - - port_context.set_binding.assert_called_once_with( - self.test_valid_segment[ml2_api.ID], pass_vif_type, - pass_vif_details, status=n_const.PORT_STATUS_ACTIVE) - - def test_bind_port_without_valid_segment(self): - """test bind_port without a valid segment.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment]) - - self.mgr._hconfig_bind_port( - port_context, self.sample_hconfig_dbget_ovs) - - port_context.set_binding.assert_not_called() - - def test_no_str_template_substitution_in_configuration_string(self): - """Test for no identifier substituion in config JSON string.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment]) - - hconf_dict = self.mgr._substitute_hconfig_tmpl( - port_context, self.sample_hconf_str_tmpl_nosubs) - - test_string = hconf_dict['configurations'][ - 'supported_vnic_types'][0][ - 'vif_details'][portbindings.VHOST_USER_SOCKET] - - expected_str = '/var/run/openvswitch/PORT_NOSUBS' - - self.assertEqual(expected_str, test_string) - - def test_str_template_substitution_in_configuration_string(self): - """Test for identifier substitution in config JSON string.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment]) - - hconf_dict = self.mgr._substitute_hconfig_tmpl( - port_context, self.sample_hconf_str_tmpl_subs_vpp) - - test_string = hconf_dict['configurations'][ - 'supported_vnic_types'][0][ - 'vif_details'][portbindings.VHOST_USER_SOCKET] - - expected_str = Template('/tmp/socket_$PORT_ID') - expected_str = expected_str.safe_substitute({ - 'PORT_ID': port_context.current['id']}) - - self.assertEqual(expected_str, test_string) - - def test_str_template_substitution_length_in_configuration_string(self): - """Test for identifier substitution in config JSON string.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment]) - - hconf_dict = self.mgr._substitute_hconfig_tmpl( - port_context, self.sample_odl_hconfigs_length) - - test_string = hconf_dict['configurations'][ - 'supported_vnic_types'][0][ - 'vif_details'][portbindings.VHOST_USER_SOCKET] - - expected_str = Template('/tmp/longprefix_$PORT_ID') - expected_str = expected_str.safe_substitute({ - 'PORT_ID': port_context.current['id']}) - - self.assertNotEqual(expected_str, test_string) - self.assertEqual(len(test_string) - len('/tmp/'), 14) - - def test_template_substitution_in_raw_configuration(self): - """Test for identifier substitution in config string.""" - port_context = self._fake_port_context( - fake_segments=[self.test_invalid_segment, self.test_valid_segment]) - - # Substitute raw string configuration with json - raw_configurations = self.sample_odl_hconfigs_length_raw[ - 'configurations'] - raw_configurations_json = jsonutils.loads(raw_configurations) - self.sample_odl_hconfigs_length_raw['configurations'] = ( - raw_configurations_json) - - hconf_dict = self.mgr._substitute_hconfig_tmpl( - port_context, self.sample_odl_hconfigs_length_raw) - - test_string = hconf_dict['configurations'][ - 'supported_vnic_types'][0][ - 'vif_details'][portbindings.VHOST_USER_SOCKET] - - expected_str = Template('/tmp/prefix_$PORT_ID') - expected_str = expected_str.safe_substitute({ - 'PORT_ID': port_context.current['id']}) - - self.assertEqual(expected_str, test_string) - - def _fake_port_context(self, fake_segments, host_agents=None): - network = mock.MagicMock(spec=api.NetworkContext) - return mock.MagicMock( - spec=ctx.PortContext, - current={'id': 'PORTID', - portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL}, - segments_to_bind=fake_segments, network=network, - host_agents=lambda agent_type: host_agents, - _plugin_context=mock.MagicMock() - ) - - @mock.patch.object(provisioning_blocks, 'add_provisioning_component') - def test_prepare_inital_port_status_no_websocket( - self, mocked_add_provisioning_component): - odl_features.feature_set = set() - port_ctx = self._fake_port_context( - fake_segments=[self.test_valid_segment]) - initial_port_status = self.mgr._prepare_initial_port_status(port_ctx) - self.assertEqual(initial_port_status, n_const.PORT_STATUS_ACTIVE) - mocked_add_provisioning_component.assert_not_called() - - @mock.patch.object(provisioning_blocks, 'add_provisioning_component') - def test_prepare_inital_port_status_with_websocket( - self, mocked_add_provisioning_component): - odl_features.feature_set.add(odl_features.OPERATIONAL_PORT_STATUS) - port_ctx = self._fake_port_context( - fake_segments=[self.test_valid_segment]) - initial_port_status = self.mgr._prepare_initial_port_status(port_ctx) - self.assertEqual(initial_port_status, n_const.PORT_STATUS_DOWN) - mocked_add_provisioning_component.assert_called() - - -class TestPseudoAgentDBBindingControllerBug1608659( - test_plugin.NeutronDbPluginV2TestCase): - """Test class for Bug1608659.""" - - # test data hostconfig - sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [ - {"host-id": "devstack-control", - "host-type": "ODL L2", - "config": """{"supported_vnic_types": [ - {"vnic_type": "normal", "vif_type": "vhostuser", - "vif_details": - {"port_filter": "False", - "vhostuser_socket": "/var/run/openvswitch"}}], - "allowed_network_types": [ - "local", "vlan", "vxlan", "gre"], - "bridge_mappings": {"physnet1": "br-ex"}}"""}, - {"host-id": "devstack-control", - "host-type": "ODL L3", - "config": """{ "some_details": "dummy_details" }"""} - ]}} - - def setUp(self): - self.useFixture(base.OpenDaylightRestClientFixture()) - super(TestPseudoAgentDBBindingControllerBug1608659, self).setUp( - plugin='ml2') - self.core_plugin = directory.get_plugin() - self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController( - self.core_plugin) - - def test_execute_no_exception(self): - with mock.patch.object(pseudo_agentdb_binding, 'LOG') as mock_log: - self.mgr._update_agents_db( - self.sample_odl_hconfigs['hostconfigs']['hostconfig']) - # Assert no exception happened - self.assertFalse(mock_log.exception.called) diff --git a/networking_odl/tests/unit/qos/__init__.py b/networking_odl/tests/unit/qos/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/qos/test_qos_driver_v2.py b/networking_odl/tests/unit/qos/test_qos_driver_v2.py deleted file mode 100644 index 6eae751d4..000000000 --- a/networking_odl/tests/unit/qos/test_qos_driver_v2.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from neutron.db import api as neutron_db_api - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.qos import qos_driver_v2 as qos_driver -from networking_odl.tests import base -from networking_odl.tests.unit import base_v2 - - -class OpenDaylightQosDriverTestCase(base_v2.OpenDaylightConfigBase): - - def setUp(self): - self.useFixture(base.OpenDaylightJournalThreadFixture()) - super(OpenDaylightQosDriverTestCase, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.qos_driver = qos_driver.OpenDaylightQosDriver() - - def _get_mock_context(self): - current = {'tenant_id': 'tenant_id'} - context = mock.Mock(current=current) - context.session = self.db_session - return context - - def _get_mock_qos_operation_data(self): - data = {'description': u"qos_policy", - 'rules': [], - 'tenant_id': 'test-tenant', - 'shared': False, - 'id': 'qos-policy1', - 'name': u"policy1"} - qos_data = mock.Mock() - to_dict = mock.Mock(return_value=data) - qos_data.to_dict = to_dict - return qos_data - - def _call_operation_object(self, operation, object_type): - qos_data = self._get_mock_qos_operation_data() - context = self._get_mock_context() - method = getattr(self.qos_driver, '%s_%s' % (operation, - object_type)) - method(context, qos_data) - - def _test_qos_policy(self, operation): - self._call_operation_object(operation=operation, - object_type='policy_precommit') - qos_data = self._get_mock_qos_operation_data() - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - self.assertEqual(operation, row['operation']) - self.assertEqual(qos_data.to_dict()['id'], row['object_uuid']) - - def test_qos_policy_create(self): - self._test_qos_policy(odl_const.ODL_CREATE) - - def test_qos_policy_update(self): - self._test_qos_policy(odl_const.ODL_UPDATE) - - def test_qos_policy_delete(self): - self._test_qos_policy(odl_const.ODL_DELETE) diff --git a/networking_odl/tests/unit/sfc/__init__.py b/networking_odl/tests/unit/sfc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/sfc/constants.py b/networking_odl/tests/unit/sfc/constants.py deleted file mode 100644 index 94f431101..000000000 --- a/networking_odl/tests/unit/sfc/constants.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -CLASSIFIERS_BASE_URI = 'sfc/flowclassifiers' -FAKE_FLOW_CLASSIFIER_ID = "4a334cd4-fe9c-4fae-af4b-321c5e2eb051" -FAKE_FLOW_CLASSIFIER = { - "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", - "name": "FC1", - "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", - "description": "Flow rule for classifying TCP traffic", - "protocol": "TCP", - "source_port_range_min": 22, - "source_port_range_max": 4000, - "destination_port_range_min": 80, - "destination_port_range_max": 80, - "source_ip_prefix": "22.12.34.44", - "destination_ip_prefix": "22.12.34.45" -} -PORT_PAIRS_BASE_URI = 'sfc/portpairs' -FAKE_PORT_PAIR_ID = "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" -FAKE_PORT_PAIR = { - "name": "SF1", - "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae", - "tenant_id": "d382007aa9904763a801f68ecf065cf5", - "description": "Firewall SF instance", - "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", - "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345" -} -PORT_PAIR_GROUPS_BASE_URI = 'sfc/portpairgroups' -FAKE_PORT_PAIR_GROUP_ID = "4512d643-24fc-4fae-af4b-321c5e2eb3d1" -FAKE_PORT_PAIR_GROUP = { - "name": "Firewall_PortPairGroup", - "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1", - "tenant_id": "d382007aa9904763a801f68ecf065cf5", - "description": "Grouping Firewall SF instances", - "port_pairs": [ - {"id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae"} - ] -} -PORT_CHAINS_BASE_URI = 'sfc/portchains' -FAKE_PORT_CHAIN_ID = "1278dcd4-459f-62ed-754b-87fc5e4a6751" -FAKE_PORT_CHAIN = { - "name": "PC2", - "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751", - "tenant_id": "d382007aa9904763a801f68ecf065cf5", - "description": "Steering TCP and UDP traffic first to Firewall " - "and then to Loadbalancer", - "flow_classifiers": [ - {"id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051"}, - {"id": "105a4b0a-73d6-11e5-b392-2c27d72acb4c"} - ], - "port_pair_groups": [ - {"id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1"}, - {"id": "4a634d49-76dc-4fae-af4b-321c5e23d651"} - ] -} diff --git a/networking_odl/tests/unit/sfc/flowclassifier/__init__.py b/networking_odl/tests/unit/sfc/flowclassifier/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v1.py b/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v1.py deleted file mode 100644 index 654b66c5d..000000000 --- a/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v1.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from mock import patch -from neutron.tests import base - -from networking_odl.common.client import OpenDaylightRestClient as client -from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v1 as sfc_fc -from networking_odl.tests import base as odl_base -from networking_odl.tests.unit.sfc import constants as sfc_const - - -class TestOpenDaylightSFCFlowClassifierDriverV1(base.DietTestCase): - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.mocked_fc_context = patch( - 'networking_sfc.services.flowclassifier.common.context' - '.FlowClassifierContext').start().return_value - super(TestOpenDaylightSFCFlowClassifierDriverV1, self).setUp() - - self.driver = sfc_fc.OpenDaylightSFCFlowClassifierDriverV1() - self.driver.initialize() - self.mocked_fc_context.current = sfc_const.FAKE_FLOW_CLASSIFIER - - @patch.object(client, 'sendjson') - def test_create_flow_classifier(self, mocked_sendjson): - expected = {"flowclassifier": sfc_const.FAKE_FLOW_CLASSIFIER} - self.driver.create_flow_classifier(self.mocked_fc_context) - mocked_sendjson.assert_called_once_with( - 'post', sfc_const.CLASSIFIERS_BASE_URI, expected) - - @patch.object(client, 'sendjson') - def test_update_flow_classifier(self, mocked_sendjson): - expected = {"flowclassifier": sfc_const.FAKE_FLOW_CLASSIFIER} - self.driver.update_flow_classifier(self.mocked_fc_context) - mocked_sendjson.assert_called_once_with( - 'put', sfc_const.CLASSIFIERS_BASE_URI + - '/' + sfc_const.FAKE_FLOW_CLASSIFIER_ID, expected) - - @patch.object(client, 'try_delete') - def test_delete_flow_classifier(self, mocked_try_delete): - self.driver.delete_flow_classifier(self.mocked_fc_context) - mocked_try_delete.assert_called_once_with( - sfc_const.CLASSIFIERS_BASE_URI + '/' + - sfc_const.FAKE_FLOW_CLASSIFIER_ID) diff --git a/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py b/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py deleted file mode 100644 index 94a9cbb22..000000000 --- a/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2017 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from mock import patch -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from neutron.db import api as neutron_db_api - -from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2 as sfc_fc -from networking_odl.tests import base as odl_base -from networking_odl.tests.unit import base_v2 -from networking_odl.tests.unit.sfc import constants as sfc_const - - -class TestOpenDaylightSFCFlowClassifierDriverV2( - base_v2.OpenDaylightConfigBase): - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestOpenDaylightSFCFlowClassifierDriverV2, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.handler = sfc_fc.OpenDaylightSFCFlowClassifierDriverV2() - self.handler.initialize() - - def _get_mock_context(self): - mocked_fc_context = patch( - 'networking_sfc.services.flowclassifier.common.context' - '.FlowClassifierContext').start().return_value - - mocked_fc_context.current = sfc_const.FAKE_FLOW_CLASSIFIER - mocked_fc_context.session = self.db_session - mocked_fc_context._plugin_context = mocked_fc_context - return mocked_fc_context - - def _call_operation_object(self, operation, timing): - method = getattr(self.handler, - '%s_flow_classifier_%s' % (operation, timing)) - method(self._get_mock_context()) - - def _test_event(self, operation, timing): - self._call_operation_object(operation, timing) - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - - if timing == 'precommit': - self.assertEqual(operation, row['operation']) - self.assertEqual( - odl_const.ODL_SFC_FLOW_CLASSIFIER, row['object_type']) - elif timing == 'after': - self.assertIsNone(row) - - # TODO(yamahata): utilize test scenarios - def test_create_flow_classifier_precommit(self): - self._test_event("create", "precommit") - - def test_create_flow_classifier_postcommit(self): - self._test_event("create", "postcommit") - - def test_update_flow_classifier_precommit(self): - self._test_event("update", "precommit") - - def test_update_flow_classifier_postcommit(self): - self._test_event("update", "postcommit") - - def test_delete_flow_classifier_precommit(self): - self._test_event("delete", "precommit") - - def test_delete_flow_classifier_postcommit(self): - self._test_event("delete", "postcommit") diff --git a/networking_odl/tests/unit/sfc/test_sfc_driver_v1.py b/networking_odl/tests/unit/sfc/test_sfc_driver_v1.py deleted file mode 100644 index fac544143..000000000 --- a/networking_odl/tests/unit/sfc/test_sfc_driver_v1.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2016 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import patch - -from networking_odl.common.client import OpenDaylightRestClient as client -from networking_odl.sfc import sfc_driver_v1 -from networking_odl.tests import base as odl_base -from networking_odl.tests.unit.sfc import constants as sfc_const - -from neutron.tests import base - - -class TestOpenDaylightSFCDriverV1(base.DietTestCase): - - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - self.mocked_pp_context = patch( - 'networking_sfc.services.sfc.common.context.PortPairContext' - ).start().return_value - - self.mocked_ppg_context = patch( - 'networking_sfc.services.sfc.common.context.PortPairGroupContext' - ).start().return_value - - self.mocked_pc_context = patch( - 'networking_sfc.services.sfc.common.context.PortChainContext' - ).start().return_value - super(TestOpenDaylightSFCDriverV1, self).setUp() - - self.driver = sfc_driver_v1.OpenDaylightSFCDriverV1() - self.driver.initialize() - self.mocked_pp_context.current = sfc_const.FAKE_PORT_PAIR - self.mocked_ppg_context.current = sfc_const.FAKE_PORT_PAIR_GROUP - self.mocked_pc_context.current = sfc_const.FAKE_PORT_CHAIN - - @patch.object(client, 'sendjson') - def test_create_port_pair(self, mocked_sendjson): - expected = {"portpair": sfc_const.FAKE_PORT_PAIR} - self.driver.create_port_pair(self.mocked_pp_context) - mocked_sendjson.assert_called_once_with( - 'post', sfc_const.PORT_PAIRS_BASE_URI, expected) - - @patch.object(client, 'sendjson') - def test_update_port_pair(self, mocked_sendjson): - expected = {"portpair": sfc_const.FAKE_PORT_PAIR} - self.driver.update_port_pair(self.mocked_pp_context) - mocked_sendjson.assert_called_once_with( - 'put', sfc_const.PORT_PAIRS_BASE_URI + '/' + - sfc_const.FAKE_PORT_PAIR_ID, expected) - - @patch.object(client, 'try_delete') - def test_delete_port_pair(self, mocked_try_delete): - self.driver.delete_port_pair(self.mocked_pp_context) - mocked_try_delete.assert_called_once_with( - sfc_const.PORT_PAIRS_BASE_URI + '/' + sfc_const.FAKE_PORT_PAIR_ID) - - @patch.object(client, 'sendjson') - def test_create_port_pair_group(self, mocked_sendjson): - expected = {"portpairgroup": sfc_const.FAKE_PORT_PAIR_GROUP} - self.driver.create_port_pair_group(self.mocked_ppg_context) - mocked_sendjson.assert_called_once_with( - 'post', sfc_const.PORT_PAIR_GROUPS_BASE_URI, expected) - - @patch.object(client, 'sendjson') - def test_update_port_pair_group(self, mocked_sendjson): - expected = {"portpairgroup": sfc_const.FAKE_PORT_PAIR_GROUP} - self.driver.update_port_pair_group(self.mocked_ppg_context) - mocked_sendjson.assert_called_once_with( - 'put', sfc_const.PORT_PAIR_GROUPS_BASE_URI + '/' + - sfc_const.FAKE_PORT_PAIR_GROUP_ID, expected) - - @patch.object(client, 'try_delete') - def test_delete_port_pair_group(self, mocked_try_delete): - self.driver.delete_port_pair_group(self.mocked_ppg_context) - mocked_try_delete.assert_called_once_with( - sfc_const.PORT_PAIR_GROUPS_BASE_URI + '/' + - sfc_const.FAKE_PORT_PAIR_GROUP_ID) - - @patch.object(client, 'sendjson') - def test_create_port_chain(self, mocked_sendjson): - expected = {"portchain": sfc_const.FAKE_PORT_CHAIN} - self.driver.create_port_chain(self.mocked_pc_context) - mocked_sendjson.assert_called_once_with( - 'post', sfc_const.PORT_CHAINS_BASE_URI, expected) - - @patch.object(client, 'sendjson') - def test_update_port_chain(self, mocked_sendjson): - expected = {"portchain": sfc_const.FAKE_PORT_CHAIN} - self.driver.update_port_chain(self.mocked_pc_context) - mocked_sendjson.assert_called_once_with( - 'put', sfc_const.PORT_CHAINS_BASE_URI + '/' + - sfc_const.FAKE_PORT_CHAIN_ID, expected) - - @patch.object(client, 'try_delete') - def test_delete_port_chain(self, mocked_try_delete): - self.driver.delete_port_chain(self.mocked_pc_context) - mocked_try_delete.assert_called_once_with( - sfc_const.PORT_CHAINS_BASE_URI + '/' + - sfc_const.FAKE_PORT_CHAIN_ID) diff --git a/networking_odl/tests/unit/sfc/test_sfc_driver_v2.py b/networking_odl/tests/unit/sfc/test_sfc_driver_v2.py deleted file mode 100644 index 6661abf23..000000000 --- a/networking_odl/tests/unit/sfc/test_sfc_driver_v2.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2017 Brocade Communication Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from mock import patch - -from neutron.db import api as neutron_db_api - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.sfc import sfc_driver_v2 as sfc -from networking_odl.tests import base as odl_base -from networking_odl.tests.unit import base_v2 -from networking_odl.tests.unit.sfc import constants as sfc_const - - -class TestOpenDaylightSFCDriverV2(base_v2.OpenDaylightConfigBase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestOpenDaylightSFCDriverV2, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.handler = sfc.OpenDaylightSFCDriverV2() - self.handler.initialize() - - def _get_mock_portpair_operation_context(self): - mocked_fc_context = patch( - 'networking_sfc.services.sfc.common.context.PortPairContext' - ).start().return_value - - mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR - mocked_fc_context.session = self.db_session - mocked_fc_context._plugin_context = mocked_fc_context - return mocked_fc_context - - def _get_mock_portpairgroup_operation_context(self): - mocked_fc_context = patch( - 'networking_sfc.services.sfc.common.context.PortPairGroupContext' - ).start().return_value - - mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR_GROUP - mocked_fc_context.session = self.db_session - mocked_fc_context._plugin_context = mocked_fc_context - return mocked_fc_context - - def _get_mock_portchain_operation_context(self): - mocked_fc_context = patch( - 'networking_sfc.services.sfc.common.context.PortChainContext' - ).start().return_value - - mocked_fc_context.current = sfc_const.FAKE_PORT_CHAIN - mocked_fc_context.session = self.db_session - mocked_fc_context._plugin_context = mocked_fc_context - return mocked_fc_context - - def _get_mock_operation_context(self, object_type): - getter = getattr(self, '_get_mock_%s_operation_context' % object_type) - return getter() - - def _call_operation_object(self, operation, timing, resource_str, context): - method = getattr(self.handler, - '%s_%s_%s' % (operation, resource_str, timing)) - method(context) - - def _test_event(self, operation, timing, resource_str, - object_type): - context = self._get_mock_operation_context(object_type) - self._call_operation_object(operation, timing, resource_str, context) - if timing == 'precommit': - self.db_session.flush() - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - - if timing == 'precommit': - self.assertEqual(operation, row['operation']) - self.assertEqual(object_type, row['object_type']) - elif timing == 'after': - self.assertIsNone(row) - - # TODO(yamahata): utilize test scenarios - def test_create_port_pair_precommit(self): - self._test_event("create", "precommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_create_port_pair_postcommit(self): - self._test_event("create", "postcommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_update_port_pair_precommit(self): - self._test_event("update", "precommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_update_port_pair_postcommit(self): - self._test_event("update", "postcommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_delete_port_pair_precommit(self): - self._test_event("delete", "precommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_delete_port_pair_postcommit(self): - self._test_event("delete", "postcommit", "port_pair", - odl_const.ODL_SFC_PORT_PAIR) - - def test_create_port_pair_group_precommit(self): - self._test_event("create", "precommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_create_port_pair_group_postcommit(self): - self._test_event("create", "postcommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_update_port_pair_group_precommit(self): - self._test_event("update", "precommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_update_port_pair_group_postcommit(self): - self._test_event("update", "postcommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_delete_port_pair_group_precommit(self): - self._test_event("delete", "precommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_delete_port_pair_group_postcommit(self): - self._test_event("delete", "postcommit", "port_pair_group", - odl_const.ODL_SFC_PORT_PAIR_GROUP) - - def test_create_port_chain_precommit(self): - self._test_event("create", "precommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) - - def test_create_port_chain_postcommit(self): - self._test_event("create", "postcommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) - - def test_update_port_chain_precommit(self): - self._test_event("update", "precommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) - - def test_update_port_chain_postcommit(self): - self._test_event("update", "postcommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) - - def test_delete_port_chain_precommit(self): - self._test_event("delete", "precommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) - - def test_delete_port_chain_postcommit(self): - self._test_event("delete", "postcommit", "port_chain", - odl_const.ODL_SFC_PORT_CHAIN) diff --git a/networking_odl/tests/unit/test_base_db.py b/networking_odl/tests/unit/test_base_db.py deleted file mode 100644 index c5e801dce..000000000 --- a/networking_odl/tests/unit/test_base_db.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2016 Intel Corporation. -# Copyright 2016 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_odl.db import models - -from neutron.db import api as neutron_db_api -from neutron.tests.unit.testlib_api import SqlTestCaseLight - - -class ODLBaseDbTestCase(SqlTestCaseLight): - def setUp(self): - super(ODLBaseDbTestCase, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.addCleanup(self._db_cleanup) - - def _db_cleanup(self): - self.db_session.query(models.OpenDaylightJournal).delete() - self.db_session.query(models.OpenDaylightPeriodicTask).delete() diff --git a/networking_odl/tests/unit/trunk/__init__.py b/networking_odl/tests/unit/trunk/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/tests/unit/trunk/test_trunk_driver_v1.py b/networking_odl/tests/unit/trunk/test_trunk_driver_v1.py deleted file mode 100644 index 0b48d94bf..000000000 --- a/networking_odl/tests/unit/trunk/test_trunk_driver_v1.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from neutron.tests import base as base_test - -from networking_odl.common.client import OpenDaylightRestClient as client -from networking_odl.common import constants as odl_const -from networking_odl.tests import base as odl_base -from networking_odl.trunk import trunk_driver_v1 as trunk_driver - -from neutron.services.trunk import callbacks -from neutron.services.trunk import constants as trunk_consts - -from neutron_lib.callbacks import events -from oslo_config import cfg - - -FAKE_TRUNK = { - 'status': 'ACTIVE', - 'sub_ports': [{'segmentation_type': 'vlan', - 'port_id': 'fake_port_id', - 'segmentation_id': 101}, - {'segmentation_type': 'vlan', - 'port_id': 'fake_port_id', - 'segmentation_id': 102}], - 'name': 'trunk0', - 'admin_state_up': 'true', - 'tenant_id': 'fake_tenant_id', - 'created_at': '2016-11-16T10:17:32Z', - 'updated_at': '2016-11-16T10:17:44Z', - 'revision_number': 2, - 'project_id': 'fake_project_id', - 'port_id': 'fake_port_id', - 'id': 'fake_id', - 'description': 'fake trunk port'} - - -class TestTrunkHandler(base_test.BaseTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestTrunkHandler, self).setUp() - self.handler = (trunk_driver. - OpenDaylightTrunkHandlerV1()) - - def _fake_trunk_event_payload(self): - payload = callbacks.TrunkPayload( - mock.Mock(), 'fake_id', - mock.Mock(return_value=FAKE_TRUNK), - mock.Mock(return_value=FAKE_TRUNK), - mock.Mock(return_value=FAKE_TRUNK['sub_ports'])) - payload.current_trunk.status = trunk_consts.DOWN_STATUS - payload.current_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) - payload.original_trunk.status = trunk_consts.DOWN_STATUS - payload.original_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) - return payload - - @mock.patch.object(client, 'sendjson') - def test_create_trunk(self, mocked_sendjson): - fake_payload = self._fake_trunk_event_payload() - expected = {odl_const.ODL_TRUNK: fake_payload.current_trunk.to_dict()} - self.handler.trunk_event(mock.ANY, events.AFTER_CREATE, - mock.ANY, fake_payload) - mocked_sendjson.assert_called_once_with('post', odl_const.ODL_TRUNKS, - expected) - - @mock.patch.object(client, 'sendjson') - def test_update_trunk(self, mocked_sendjson): - fake_payload = self._fake_trunk_event_payload() - expected = {odl_const.ODL_TRUNK: fake_payload.current_trunk.to_dict()} - self.handler.trunk_event(mock.ANY, events.AFTER_UPDATE, - mock.ANY, fake_payload) - url = odl_const.ODL_TRUNKS + '/' + fake_payload.trunk_id - mocked_sendjson.assert_called_once_with('put', url, expected) - - @mock.patch.object(client, 'sendjson') - def test_subport(self, mocked_sendjson): - fake_payload = self._fake_trunk_event_payload() - expected = {odl_const.ODL_TRUNK: fake_payload.current_trunk.to_dict()} - self.handler.subport_event(mock.ANY, mock.ANY, mock.ANY, fake_payload) - url = odl_const.ODL_TRUNKS + '/' + fake_payload.trunk_id - mocked_sendjson.assert_called_once_with('put', url, expected) - - @mock.patch.object(client, 'try_delete') - def test_delete_trunk(self, mocked_try_delete): - fake_payload = self._fake_trunk_event_payload() - self.handler.trunk_event(mock.ANY, events.AFTER_DELETE, - mock.ANY, fake_payload) - url = odl_const.ODL_TRUNKS + '/' + fake_payload.trunk_id - mocked_try_delete.assert_called_once_with(url) - - -class TestTrunkDriver(base_test.BaseTestCase): - def setUp(self): - self.useFixture(odl_base.OpenDaylightRestClientFixture()) - super(TestTrunkDriver, self).setUp() - - def test_is_loaded(self): - driver = trunk_driver.OpenDaylightTrunkDriverV1.create() - cfg.CONF.set_override('mechanism_drivers', - ["logger", odl_const.ODL_ML2_MECH_DRIVER_V1], - group='ml2') - self.assertTrue(driver.is_loaded) - - cfg.CONF.set_override('mechanism_drivers', - ['logger'], - group='ml2') - self.assertFalse(driver.is_loaded) - - cfg.CONF.set_override('core_plugin', 'some_plugin') - self.assertFalse(driver.is_loaded) diff --git a/networking_odl/tests/unit/trunk/test_trunk_driver_v2.py b/networking_odl/tests/unit/trunk/test_trunk_driver_v2.py deleted file mode 100644 index b142d7f83..000000000 --- a/networking_odl/tests/unit/trunk/test_trunk_driver_v2.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - - -from neutron.db import api as neutron_db_api - -from networking_odl.common import constants as odl_const -from networking_odl.db import db -from networking_odl.tests.unit import base_v2 -from networking_odl.trunk import trunk_driver_v2 as trunk_driver - -from neutron.services.trunk import callbacks -from neutron.services.trunk import constants as trunk_consts - -from oslo_config import cfg - -FAKE_TRUNK = { - 'status': 'ACTIVE', - 'sub_ports': [{'segmentation_type': 'vlan', - 'port_id': 'fake_port_id', - 'segmentation_id': 101}, - {'segmentation_type': 'vlan', - 'port_id': 'fake_port_id', - 'segmentation_id': 102}], - 'name': 'trunk0', - 'admin_state_up': 'true', - 'tenant_id': 'fake_tenant_id', - 'created_at': '2016-11-16T10:17:32Z', - 'updated_at': '2016-11-16T10:17:44Z', - 'revision_number': 2, - 'project_id': 'fake_project_id', - 'port_id': 'fake_port_id', - 'id': 'fake_id', - 'description': 'fake trunk port'} - - -class TestTrunkHandler(base_v2.OpenDaylightConfigBase): - def setUp(self): - super(TestTrunkHandler, self).setUp() - self.db_session = neutron_db_api.get_writer_session() - self.handler = (trunk_driver. - OpenDaylightTrunkHandlerV2()) - - def _get_mock_context(self): - context = mock.Mock() - context.session = self.db_session - return context - - def _fake_trunk_payload(self): - payload = callbacks.TrunkPayload( - self._get_mock_context(), 'fake_id', - mock.Mock(return_value=FAKE_TRUNK), - mock.Mock(return_value=FAKE_TRUNK), - mock.Mock(return_value=FAKE_TRUNK['sub_ports'])) - payload.current_trunk.status = trunk_consts.DOWN_STATUS - payload.current_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) - payload.original_trunk.status = trunk_consts.DOWN_STATUS - payload.original_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) - return payload - - def _call_operation_object(self, operation, timing, fake_payload): - method = getattr(self.handler, 'trunk_%s_%s' % (operation, timing)) - method(mock.ANY, mock.ANY, mock.ANY, fake_payload) - - def _test_event(self, operation, timing): - fake_payload = self._fake_trunk_payload() - self._call_operation_object(operation, timing, fake_payload) - if timing == 'precommit': - self.db_session.flush() - - row = db.get_oldest_pending_db_row_with_lock(self.db_session) - - if timing == 'precommit': - self.assertEqual(operation, row['operation']) - self.assertEqual(odl_const.ODL_TRUNK, row['object_type']) - self.assertEqual(fake_payload.trunk_id, row['object_uuid']) - elif timing == 'after': - self.assertIsNone(row) - - def test_trunk_create_precommit(self): - self._test_event("create", "precommit") - - def test_trunk_create_postcommit(self): - self._test_event("create", "postcommit") - - def test_trunk_update_precommit(self): - self._test_event("update", "precommit") - - def test_trunk_update_postcommit(self): - self._test_event("update", "postcommit") - - def test_trunk_delete_precommit(self): - self._test_event("delete", "precommit") - - def test_trunk_delete_postcommit(self): - self._test_event("delete", "postcommit") - - -class TestTrunkDriver(base_v2.OpenDaylightConfigBase): - def setUp(self): - super(TestTrunkDriver, self).setUp() - - def test_is_loaded(self): - driver = trunk_driver.OpenDaylightTrunkDriverV2.create() - cfg.CONF.set_override('mechanism_drivers', - ["logger", odl_const.ODL_ML2_MECH_DRIVER_V2], - group='ml2') - self.assertTrue(driver.is_loaded) - - cfg.CONF.set_override('mechanism_drivers', - ['logger'], - group='ml2') - self.assertFalse(driver.is_loaded) - - cfg.CONF.set_override('core_plugin', 'some_plugin') - self.assertFalse(driver.is_loaded) diff --git a/networking_odl/trunk/__init__.py b/networking_odl/trunk/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/networking_odl/trunk/constants.py b/networking_odl/trunk/constants.py deleted file mode 100644 index d75d4b612..000000000 --- a/networking_odl/trunk/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from neutron.services.trunk import constants as t_consts -from neutron_lib.api.definitions import portbindings - - -SUPPORTED_INTERFACES = ( - portbindings.VIF_TYPE_OVS, - portbindings.VIF_TYPE_VHOST_USER, -) - -SUPPORTED_SEGMENTATION_TYPES = ( - t_consts.VLAN, -) diff --git a/networking_odl/trunk/trunk_driver_v1.py b/networking_odl/trunk/trunk_driver_v1.py deleted file mode 100644 index 0c92692e8..000000000 --- a/networking_odl/trunk/trunk_driver_v1.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron_lib.callbacks import events -from neutron_lib.callbacks import registry -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils - -from neutron.services.trunk import constants as t_consts -from neutron.services.trunk.drivers import base as trunk_base - -from networking_odl.common import client as odl_client -from networking_odl.common import constants as odl_const -from networking_odl.trunk import constants as odltrunk_const - - -LOG = logging.getLogger(__name__) - -# NOTE: Status handling -# V1 driver assumes if status=ACTIVE by default and sets it before making -# Create/Update rest calls to ODL. -# In case of failure in rest, it resets it to DEGRADED. - - -@registry.has_registry_receivers -class OpenDaylightTrunkHandlerV1(object): - def __init__(self): - self.client = odl_client.OpenDaylightRestClient.create_client() - LOG.info('initialized trunk driver for OpendayLight') - - def trunk_create_postcommit(self, trunk): - trunk.update(status=t_consts.ACTIVE_STATUS) - trunk_dict = trunk.to_dict() - try: - self.client.sendjson('post', odl_const.ODL_TRUNKS, - {odl_const.ODL_TRUNK: trunk_dict}) - except Exception: - with excutils.save_and_reraise_exception(): - trunk.update(status=t_consts.DEGRADED_STATUS) - - def trunk_delete_postcommit(self, trunk): - trunk_dict = trunk.to_dict() - url = odl_const.ODL_TRUNKS + '/' + trunk_dict['id'] - self.client.try_delete(url) - - def trunk_update_postcommit(self, updated): - updated.update(status=t_consts.ACTIVE_STATUS) - trunk_dict = updated.to_dict() - try: - url = odl_const.ODL_TRUNKS + '/' + trunk_dict['id'] - self.client.sendjson('put', url, - {odl_const.ODL_TRUNK: trunk_dict}) - except Exception: - with excutils.save_and_reraise_exception(): - updated.update(status=t_consts.DEGRADED_STATUS) - - @registry.receives(t_consts.TRUNK, (events.AFTER_CREATE, - events.AFTER_DELETE, - events.AFTER_UPDATE)) - def trunk_event(self, resource, event, trunk_plugin, payload): - if event == events.AFTER_CREATE: - self.trunk_create_postcommit(payload.current_trunk) - if event == events.AFTER_UPDATE: - self.trunk_update_postcommit(payload.current_trunk) - elif event == events.AFTER_DELETE: - self.trunk_delete_postcommit(payload.original_trunk) - - @registry.receives(t_consts.SUBPORTS, (events.AFTER_CREATE, - events.AFTER_DELETE)) - def subport_event(self, resource, event, trunk_plugin, payload): - self.trunk_update_postcommit(payload.current_trunk) - - -class OpenDaylightTrunkDriverV1(trunk_base.DriverBase): - @property - def is_loaded(self): - try: - return (odl_const.ODL_ML2_MECH_DRIVER_V1 in - cfg.CONF.ml2.mechanism_drivers) - except cfg.NoSuchOptError: - return False - - @registry.receives(t_consts.TRUNK_PLUGIN, [events.AFTER_INIT]) - def register(self, resource, event, trigger, **kwargs): - super(OpenDaylightTrunkDriverV1, self).register( - resource, event, trigger, **kwargs) - self._handler = OpenDaylightTrunkHandlerV1() - - @classmethod - def create(cls): - return cls(odl_const.ODL_ML2_MECH_DRIVER_V1, - odltrunk_const.SUPPORTED_INTERFACES, - odltrunk_const.SUPPORTED_SEGMENTATION_TYPES, - None, - can_trunk_bound_port=True) diff --git a/networking_odl/trunk/trunk_driver_v2.py b/networking_odl/trunk/trunk_driver_v2.py deleted file mode 100644 index 85124be84..000000000 --- a/networking_odl/trunk/trunk_driver_v2.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron_lib.callbacks import events -from neutron_lib.callbacks import registry -from oslo_config import cfg -from oslo_log import helpers as log_helpers -from oslo_log import log as logging - -from neutron.services.trunk import constants as t_consts -from neutron.services.trunk.drivers import base as trunk_base - -from networking_odl.common import config as odl_conf -from networking_odl.common import constants as odl_const -from networking_odl.journal import full_sync -from networking_odl.journal import journal -from networking_odl.trunk import constants as odltrunk_const - -LOG = logging.getLogger(__name__) - -TRUNK_RESOURCES = { - odl_const.ODL_TRUNK: odl_const.ODL_TRUNKS -} - - -@registry.has_registry_receivers -class OpenDaylightTrunkHandlerV2(object): - def __init__(self): - cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl") - self.journal = journal.OpenDaylightJournalThread() - full_sync.register(t_consts.TRUNK, TRUNK_RESOURCES) - LOG.info('initialized trunk driver for OpendayLight') - - @staticmethod - def _record_in_journal(context, trunk_id, operation, data): - journal.record(context, odl_const.ODL_TRUNK, trunk_id, operation, data) - - # TODO(vthapar) Revisit status updates once websockets are fully - # implemented - https://review.openstack.org/#/c/421127/ - @log_helpers.log_method_call - def trunk_create_precommit(self, resource, event, trunk_plugin, payload): - data = payload.current_trunk.to_dict() - data['status'] = t_consts.ACTIVE_STATUS - self._record_in_journal(payload.context, payload.trunk_id, - odl_const.ODL_CREATE, data) - - @log_helpers.log_method_call - def trunk_update_precommit(self, resource, event, trunk_plugin, payload): - payload.current_trunk.update(status=t_consts.ACTIVE_STATUS) - data = payload.current_trunk.to_dict() - self._record_in_journal(payload.context, payload.trunk_id, - odl_const.ODL_UPDATE, data) - - @log_helpers.log_method_call - def trunk_delete_precommit(self, resource, event, trunk_plugin, payload): - # fill in data with parent ids, will be used in parent validations - trunk_dict = payload.original_trunk.to_dict() - data = [subport['port_id'] for subport in trunk_dict['sub_ports']] - data.append(trunk_dict['port_id']) - self._record_in_journal(payload.context, payload.trunk_id, - odl_const.ODL_DELETE, data) - - @log_helpers.log_method_call - def trunk_create_postcommit(self, resource, event, trunk_plugin, payload): - payload.current_trunk.update(status=t_consts.ACTIVE_STATUS) - self.journal.set_sync_event() - - @log_helpers.log_method_call - def trunk_update_postcommit(self, resource, event, trunk_plugin, payload): - payload.current_trunk.update(status=t_consts.ACTIVE_STATUS) - self.journal.set_sync_event() - - @log_helpers.log_method_call - def trunk_delete_postcommit(self, resource, event, trunk_plugin, payload): - self.journal.set_sync_event() - - -@registry.has_registry_receivers -class OpenDaylightTrunkDriverV2(trunk_base.DriverBase): - @property - def is_loaded(self): - try: - return (odl_const.ODL_ML2_MECH_DRIVER_V2 in - cfg.CONF.ml2.mechanism_drivers) - except cfg.NoSuchOptError: - return False - - @registry.receives(t_consts.TRUNK_PLUGIN, [events.AFTER_INIT]) - def register(self, resource, event, trigger, **kwargs): - super(OpenDaylightTrunkDriverV2, self).register( - resource, event, trigger, **kwargs) - self._handler = OpenDaylightTrunkHandlerV2() - registry.subscribe(self._handler.trunk_create_precommit, - t_consts.TRUNK, events.PRECOMMIT_CREATE) - registry.subscribe(self._handler.trunk_create_postcommit, - t_consts.TRUNK, events.AFTER_CREATE) - registry.subscribe(self._handler.trunk_update_precommit, - t_consts.TRUNK, events.PRECOMMIT_UPDATE) - registry.subscribe(self._handler.trunk_update_postcommit, - t_consts.TRUNK, events.AFTER_UPDATE) - registry.subscribe(self._handler.trunk_delete_precommit, - t_consts.TRUNK, events.PRECOMMIT_DELETE) - registry.subscribe(self._handler.trunk_delete_postcommit, - t_consts.TRUNK, events.AFTER_DELETE) - for event_ in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE): - registry.subscribe(self._handler.trunk_update_precommit, - t_consts.SUBPORTS, event_) - for event_ in (events.AFTER_CREATE, events.AFTER_DELETE): - registry.subscribe(self._handler.trunk_update_postcommit, - t_consts.SUBPORTS, event_) - - @classmethod - def create(cls): - return cls(odl_const.ODL_ML2_MECH_DRIVER_V2, - odltrunk_const.SUPPORTED_INTERFACES, - odltrunk_const.SUPPORTED_SEGMENTATION_TYPES, - None, - can_trunk_bound_port=True) diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 4b345ed8c..000000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,31 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* odl.yaml is a task that is run in gates against OpenStack with - Neutron service configured with ODL plugin - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index aab343c51..000000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - diff --git a/rally-jobs/odl.yaml b/rally-jobs/odl.yaml deleted file mode 100644 index 86852e1c8..000000000 --- a/rally-jobs/odl.yaml +++ /dev/null @@ -1,296 +0,0 @@ ---- - NeutronNetworks.create_and_list_networks: - - - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_subnets: - - - args: - subnets_per_network: 2 - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - subnet: -1 - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: - port_create_args: - ports_per_network: 2 - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 5 - users_per_tenant: 5 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - Quotas.neutron_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 40 - concurrency: 20 - context: - users: - tenants: 20 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 68ad5483c..000000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All \*.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/__init__.py b/rally-jobs/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml b/releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml deleted file mode 100644 index 026a7f25e..000000000 --- a/releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -prelude: > - Add ODL Beryllium SR4 release definition. -features: - - Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot - definition and remove Beryllium 0.4.4 snapshot as OpenDaylight - Beryllium 0.4.4 SR4 has been released. diff --git a/releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml b/releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml deleted file mode 100644 index 8b54bfa34..000000000 --- a/releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > - Host Configuration data population from agentless - OpenDayLight. -features: - - This configuration is used to get the information - about physical host type and other config data like - supported vnic types stored in ovsdb. Networking-odl - can fetch this info from OpenDaylight via REST API - request and feed agents_db table in neutron, which - will be used by neutron scheduler. diff --git a/releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml b/releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml deleted file mode 100644 index f9fb4d6b6..000000000 --- a/releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - BGPVPN Version 2 Driver for OpenDaylight. -features: - - | - A new version of BGPVPN driver that integrate OpenStack Neutron - BGPVPN API with OpenDaylight backend. It supports CRUD operations - for BGPVPN and enables networks and routers to be associated to - such BGPVPNs. This driver uses journaling mechanism, unlike v1 driver, - which will first log the operation in journal table before execution. diff --git a/releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml b/releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml deleted file mode 100644 index 5d77f7de8..000000000 --- a/releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - The QoS V1 driver is deprecated in the Pike cycle and will be removed - in the Queens release. \ No newline at end of file diff --git a/releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml b/releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml deleted file mode 100644 index c0f331d19..000000000 --- a/releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -prelude: > - Changed devstack default to V2 driver. -other: - - Starting with Ocata, Devstack will use V2 drivers (where available) by - default. To force the use of V1 architecture drivers you can specify - 'ODL_V2DRIVER=False' in the local.conf file. diff --git a/releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml b/releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml deleted file mode 100644 index d5273ad5b..000000000 --- a/releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -prelude: > - Added FLAT type networks support. -features: - - In addition to existing supported types, networks - of type FLAT can be also used with ODL. diff --git a/releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml b/releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml deleted file mode 100644 index e0839d7dd..000000000 --- a/releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - Full sync supports and ODL controller with no Neutron - resources on it. - This support is for the V2 driver, as V1 driver already - supports this. -features: - - The full sync process looks for a "canary" network on - the ODL controller side. - If such a network is found, it doesn't do anything. - If the network is missing then all the neutron - resources are re-created on ODL. - This supports cases when ODL controller comes online - with no Neutron resources on it (also referred to as - "cold reboot", but can happen on various cases). diff --git a/releasenotes/notes/functional-test-b0855d6f1d85da30.yaml b/releasenotes/notes/functional-test-b0855d6f1d85da30.yaml deleted file mode 100644 index a94628e52..000000000 --- a/releasenotes/notes/functional-test-b0855d6f1d85da30.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -prelude: > - The new class of test cases, functional test, has been - added. So was help scripts to setup necessary environment. -other: - - The functional tests were added. It's new class of test cases, - which requires pre-configured environment. Environment to - run such tests can be configured by tool in - networking-odl/tools.configure_for_func_testing.sh diff --git a/releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml b/releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml deleted file mode 100644 index 1d489a575..000000000 --- a/releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - Journal recovery for the V2 driver handles failed - journal entries. -features: - - The journal recovery mechanism handles failed journal - entries by inspecting ODL and deciding on the correct - course of action. - This support should be sufficient for the majority of - entry failures. diff --git a/releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml b/releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml deleted file mode 100644 index d402c787c..000000000 --- a/releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - L2Gateway Driver v2 or networking-odl. -features: - - | - A new version of L2Gateway driver that integrate OpenStack neutron - L2Gateway API with OpenDaylight backend. It supports CRUD operations - for l2gateway and l2gateway_connection. This driver uses journalling - mechanism, unlike v1 driver, which will first log the operation in - journal table before execution. diff --git a/releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml b/releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml deleted file mode 100644 index 5d4d4c9ce..000000000 --- a/releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > - Complement the implementation of odl lbaas driver_v2. -features: - - Complement the implementation of odl lbaas driver_v2. - It supports CRUD operations for loadbalancer, listener, - pool, member and healthmonitor. -fixes: - - Includes the following bug fixes - Bug 1640076 - Using odl lbaas driver_v2 to create listener failed. - Bug 1633030 - Using odl lbaas driver_v2 to create loadbalancer failed. - Bug 1613583 - Odl lbaas driver_v2 Line 61 url_path error. - Bug 1613583 - Using ODL lbaas driver_v2 to create member failed. \ No newline at end of file diff --git a/releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml b/releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml deleted file mode 100644 index 7d96d3b96..000000000 --- a/releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -prelude: > - Maintenance thread for the V2 driver. -features: - - The maintenance thread was introduced in the V2 driver - in order to perform various journal maintenance tasks, - such as - - * Stale lock release - * Completed entry cleanup - * Full sync - * Journal recovery - - The thread runs in a configurable interval and is HA - safe so at most one will be executing regardless of how - many threads are running concurrently. -upgrade: - - Maintenace lock table was added to synchronize multiple - threads. - diff --git a/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml b/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml deleted file mode 100644 index 634d49033..000000000 --- a/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -prelude: > - Network Statistics From OpenDaylight. -features: - - Add a ceilometer driver to collect network - statistics information using REST APIs exposed by - network-statistics module in OpenDaylight. diff --git a/releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml b/releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml deleted file mode 100644 index 54f0a043b..000000000 --- a/releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > - The default setting for OpenDayligut openstack - service provider was changed from ovsdb netvirt - (odl-ovsdb-openstack) to new - netvirt(odl-netvirt-openstack) for OpenDaylight - Boron/Carbon or later. -other: - - With devstack by default with OpenDaylight after - Boron version, new netvirt openstack service - provider(odl-netvirt-openstack) is used instead - of legacy netvirt(odl-ovsdb-openstack). - diff --git a/releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml b/releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml deleted file mode 100644 index 48e5bcfc0..000000000 --- a/releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -prelude: > - Remove LbaaS v1 driver, as LbaaS removed v1 API. -upgrade: - - Upgrade to use LBaaS v2 driver and migrate to use - LBaaS v2 driver. -deprecations: - - LBaaS v1 API driver for ODL is removed. - * LBaaS v2 API driver diff --git a/releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml b/releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml deleted file mode 100644 index a18037f0f..000000000 --- a/releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -prelude: > - OpenDaylight feature negotiation allows for networking_odl to adapt its - behavior to the features supported by the specific ODL version. -features: - - Networking-odl first attempts to read the ODL features from the - odl_features config value. If this config value is not present, - networking-odl requests the features from ODL via REST call. Note that this - occurs during the plugin initialize and if ODL is unreachable - networking-odl will keep trying until successful, essentially blocking - networking-odl initialization (and functionality) until successful. - As such, it is recommended that in production environments you manually - configure the odl_features config value. If you are not sure which features - your ODL supports, please consult the ODL documentation or you can retrieve - the list like this, - $ curl -u : http://:8080/restconf/operational/neutron:neutron/features | python -mjson.tool - Note that the features returned in the json have a namespace which should - be omitted from the config value. So, if you got to features, say - neutron-extensions:feature1 and neutron-extensions:feature2, the config - file should have, - odl_features=feature1,feature2 diff --git a/releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml b/releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml deleted file mode 100644 index bb5711137..000000000 --- a/releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -prelude: > - Change the default value of port_binding_controller - from network-topology to pseudo-agentdb-binding - as networking-topology will be deprecated. -upgrade: - - pseudo-agentdb-binding is supported by the version - of OpenDaylight Boron(0.5.x) or later. - So for the version of OpenDaylight Beryllium or earlier, - the option, port_binding_controller, needs to be - explicitly configured to be legacy-port-binding or - network-topology(deprecated). -deprecations: - - port binding controller, network-topology, is - deprecated with OpenStack Ocata and will be removed - in future openstack version. diff --git a/releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml b/releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml deleted file mode 100644 index ccecf850e..000000000 --- a/releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -prelude: > - Agentless Port binding controller using agentdb - for persistency with ODL provided host configuration. -features: - - Reads host configuration from ODL using a REST/get - and stores the information in Neutron agentdb for - persistency. This host configuration is read back - from agentdb and applied during port binding. - Without this feature several out-of-sync race - conditions were caused due to incorrect host - information. -fixes: - - Includes the following bug fixes - Bug 1608659 - pseudo_agentdb_binding AttributeError. - diff --git a/releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml b/releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml deleted file mode 100644 index 9330e634f..000000000 --- a/releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -prelude: > - QoS Driver V1 for networking-odl. -features: - - A new driver to integrate OpenStack neutron QoS API with - OpenDayLight backend. It supports CRUD operations for - QoS policy and its associated rules. The QoS driver in - tree is of version v1, which does not log the operation - request in journal table. diff --git a/releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml b/releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml deleted file mode 100644 index 8ac197d61..000000000 --- a/releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -prelude: > - QoS Driver V2 for networking-odl -features: - - A new version of QoS driver that integrate OpenStack - neutron QoS API with OpenDaylight backend. This driver - uses journaling mechanism unlike v1 driver, which will - first log the operation in journal table before execution. diff --git a/releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml b/releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml deleted file mode 100644 index 9d413d86d..000000000 --- a/releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > - Eliminate network topology based port binding -upgrade: - - If network topology based port binding, - network-topology, is used, migrate to pseodu agent - based port binding, pseudo-agentdb-binding. -deprecations: - - network topology based port binding was removed. - So is network-topology value for port_binding_controllers. - Migrate pseudo-agentdb-binding port binding. diff --git a/releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml b/releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml deleted file mode 100644 index 10e4a91f5..000000000 --- a/releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -prelude: > - As the QoS v2 driver adapted new framework from OpenStack neutron's - qos driver framework, QoS v1 driver using notification_drivers is no - longer needed. -upgrade: - - Removing QoS V1 driver which is using deprecated notification - driver framework from OpenStack Neutron's QoS driver base. diff --git a/releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml b/releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml deleted file mode 100644 index 91723864b..000000000 --- a/releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - Networking SFC V1 driver for networking-odl. -features: - - First version of the driver to support networking-sfc - API through OpenDaylight controller. This driver - support CRUD operation for flow classifier, port-pair, - port-pair-group and port-pair-chain. This is version 1 - driver and does not support the journal based - implementation. diff --git a/releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml b/releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml deleted file mode 100644 index 3f3e58b48..000000000 --- a/releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -prelude: > - Networking SFC V2 driver for networking-odl. -features: - - Second version of the driver to support networking-sfc - API through OpenDaylight controller. This driver - support CRUD operation for flow classifier, port-pair, - port-pair-group and port-pair-chain. This is version 2 - driver and it does support the journal based - implementation, where operations are committed in the - data store first and then journal thread sycn it with - OpenDaylight. This implementation guarantee the ordering - of the CRUD events. - networking-sfc ocata or later is required. - https://review.openstack.org/#/c/363893/ is the corresponding patch - of networking-sfc in Ocata cycle. diff --git a/releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml b/releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml deleted file mode 100644 index 773fd8cd7..000000000 --- a/releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -prelude: > - Trunk Drivers v1 and v2 for networking-odl. -features: - - | - A new driver to integrate OpenStack TrunkPort API with OpenDayLight - backend. It supports CRUD operations for TrunkPorts. The version v2 - driver will first log the call in journal table before execution. - Version v1 driver doesn't log any calls. diff --git a/releasenotes/notes/version-bump-16230eadac71cbb0.yaml b/releasenotes/notes/version-bump-16230eadac71cbb0.yaml deleted file mode 100644 index 3ed17fced..000000000 --- a/releasenotes/notes/version-bump-16230eadac71cbb0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -prelude: > - networking-odl adopts version number aligned - with neutron from Pike release. - The version number is bumped 11.x.x. -other: - - version is bumped to 11:pike from 4:ocata. diff --git a/releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml b/releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml deleted file mode 100644 index d9ba8ec29..000000000 --- a/releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -prelude: > - Support for vlan-transparency. -features: - - The extension `vlan-transparent` is supported for Newton - release, unconditionally only vxlan is considered to - support its extension independent of ODL openstack - provider. It's future work to allow ODL openstack - provider to report list of supported network types - at start up statically. -issues: - - Currently only network type of VXLAN is statically - considered to support vlan-transparent independently - of OpenDaylight openstack provider. - It should use capability report by OpenDaylight - openstack provider statically instead of static hard - code. -other: - - For details please read - 'VLAN trunking networks for NFV - '_. diff --git a/releasenotes/notes/websocket-client-7c8117671aeea181.yaml b/releasenotes/notes/websocket-client-7c8117671aeea181.yaml deleted file mode 100644 index ff7764c9e..000000000 --- a/releasenotes/notes/websocket-client-7c8117671aeea181.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -prelude: > - Websocket-client provides framework to create - webscket clients for ODL. -features: - - Features include callback on new notifications - and callback on reconnection which includes - status information. \ No newline at end of file diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 6902fe721..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Networking OpenDaylight Release Notes documentation build configuration file, created by -# sphinx-quickstart on Fri Jul 22 14:54:21 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# openstackdocstheme options -repository_name = 'openstack/networking-odl' -bug_project = 'networking-odl' -bug_tag = 'doc' -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Networking OpenDaylight Release Notes' -copyright = u'2016, networking-odl developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -import pbr.version -version_info = pbr.version.VersionInfo('networking-odl') -# The full version, including alpha/beta/rc tags. -release = version_info.canonical_version_string() -# The short X.Y version. -version = version_info.version_string_with_vcs() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'NetworkingOpenDaylightReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'NetworkingOpenDaylightReleaseNotes.tex', u'Networking OpenDaylight Release Notes Documentation', - u'networking-odl developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'networkingopendaylightreleasenotes', u'Networking OpenDaylight Release Notes Documentation', - [u'networking-odl developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'NetworkingOpenDaylightReleaseNotes', u'Networking OpenDaylight Release Notes Documentation', - u'networking-odl developers', 'NetworkingOpenDaylightReleaseNotes', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 1c31f7f9e..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. Networking OpenDaylight Release Notes documentation master file, created by - sphinx-quickstart on Fri Jul 22 14:54:21 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Networking OpenDaylight Release Notes's documentation! -================================================================= - -Contents: - -.. toctree:: - :maxdepth: 2 - - unreleased - ocata - newton diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 79b7b7fb8..000000000 --- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,26 +0,0 @@ -# Gérald LONLAS , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Networking OpenDaylight Release Notes 2.0.1.dev217\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2016-10-23 11:36+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-22 05:33+0000\n" -"Last-Translator: Gérald LONLAS \n" -"Language-Team: French\n" -"Language: fr\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" - -msgid "Contents:" -msgstr "Contenu :" - -msgid "Current Series Release Notes" -msgstr "Note de la release actuelle" - -msgid "Welcome to Networking OpenDaylight Release Notes's documentation!" -msgstr "" -"Bienvenue dans la documentation de la note de Release de Networking " -"OpenDaylight" diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed25..000000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9d..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f42763d96..000000000 --- a/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -Babel!=2.4.0,>=2.3.4 # BSD -stevedore>=1.20.0 # Apache-2.0 -debtcollector>=1.2.0 # Apache-2.0 -neutron-lib>=1.9.0 # Apache-2.0 -websocket-client>=0.32.0 # LGPLv2+ diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 1f4a04200..000000000 --- a/setup.cfg +++ /dev/null @@ -1,107 +0,0 @@ -[metadata] -name = networking-odl -summary = OpenStack Networking -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/networking-odl/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - networking_odl -data_files = - etc/neutron = - etc/neutron/plugins/ml2/ml2_conf_odl.ini - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -# NOTE(asomya): The V2 ML2 driver and service_plugin is experimental and only -# for testing and evaluation purposes. Once the driver/service_plugin has been -# proven to be reliable, the current driver/service_plugin will be replaced by -# the V2 versions. Please take care to only specify a single version of the ML2 -# driver and service_plugin in the configuration files. Mix-matching between -# different versions of the ML2 drivers/service_plugins will cause a lot of -# issues in your environment. -# -# Allowed configuration settings: -# -# [ml2] -# mechanism_drivers = opendaylight -# [DEFAULT] -# service_plugins = odl-router -# -# OR -# -# [ml2] -# mechanism_drivers = opendaylight_v2 -# [DEFAULT] -# service_plugins = odl-router_v2 -console_scripts = - neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main -neutron.ml2.mechanism_drivers = - opendaylight = networking_odl.ml2.mech_driver:OpenDaylightMechanismDriver - opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver -neutron.service_plugins = - odl-router = networking_odl.l3.l3_odl:OpenDaylightL3RouterPlugin - odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin -neutron.db.alembic_migrations = - networking-odl = networking_odl.db.migration:alembic_migrations -networking_odl.ml2.port_binding_controllers = - legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager - pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController -oslo.config.opts = - ml2_odl = networking_odl.common.config:list_opts -networking_sfc.sfc.drivers = - odl = networking_odl.sfc.sfc_driver_v1:OpenDaylightSFCDriverV1 - odl_v2 = networking_odl.sfc.sfc_driver_v2:OpenDaylightSFCDriverV2 -networking_sfc.flowclassifier.drivers = - odl = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v1:OpenDaylightSFCFlowClassifierDriverV1 - odl_v2 = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v2:OpenDaylightSFCFlowClassifierDriverV2 -network.statistics.drivers = - opendaylight.v2 = networking_odl.ceilometer.network.statistics.opendaylight_v2.driver:OpenDaylightDriver - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[build_releasenotes] -build-dir = releasenotes/build -source-dir = releasenotes/source -all_files = 1 - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = networking_odl/locale/networking-odl.pot - -[compile_catalog] -directory = networking_odl/locale -domain = networking-odl - -[update_catalog] -domain = networking-odl -output_dir = networking_odl/locale -input_file = networking_odl/locale/networking-odl.pot - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d84432..000000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index a8781dd2c..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,24 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -coverage!=4.4,>=4.0 # Apache-2.0 -doc8 # Apache-2.0 -flake8-import-order==0.12 # LGPLv3 -python-subunit>=0.0.18 # Apache-2.0/BSD -sphinx>=1.6.2 # BSD -openstackdocstheme>=1.11.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -pylint==1.4.5 # GPLv2 -testrepository>=0.0.18 # Apache-2.0/BSD -testresources>=0.2.4 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -WebTest>=2.0 # MIT -testtools>=1.4.0 # MIT -bashate>=0.2 # Apache-2.0 - -# releasenotes -reno!=2.3.1,>=1.8.0 # Apache-2.0 diff --git a/tools/check_bash.sh b/tools/check_bash.sh deleted file mode 100644 index e9d178eeb..000000000 --- a/tools/check_bash.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /bin/sh - -# Copyright (C) 2014 VA Linux Systems Japan K.K. -# Copyright (C) 2014 YAMAMOTO Takashi -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# The purpose of this script is to avoid casual introduction of more -# bash dependency. Please consider alternatives before commiting code -# which uses bash specific features. - -# Ignore comments, but include shebangs -OBSERVED=$(grep -E '^([^#]|#!).*bash' tox.ini tools/* | wc -l) -EXPECTED=5 -if [ ${EXPECTED} -ne ${OBSERVED} ]; then - echo Unexpected number of bash usages are detected. - echo Please read the comment in $0 - exit 1 -fi -exit 0 diff --git a/tools/check_i18n.py b/tools/check_i18n.py deleted file mode 100644 index 697ad180d..000000000 --- a/tools/check_i18n.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import print_function - -import compiler -import imp -import os.path -import sys - - -def is_localized(node): - """Check message wrapped by _()""" - if isinstance(node.parent, compiler.ast.CallFunc): - if isinstance(node.parent.node, compiler.ast.Name): - if node.parent.node.name == '_': - return True - return False - - -class ASTWalker(compiler.visitor.ASTVisitor): - - def default(self, node, *args): - for child in node.getChildNodes(): - child.parent = node - compiler.visitor.ASTVisitor.default(self, node, *args) - - -class Visitor(object): - - def __init__(self, filename, i18n_msg_predicates, - msg_format_checkers, debug): - self.filename = filename - self.debug = debug - self.error = 0 - self.i18n_msg_predicates = i18n_msg_predicates - self.msg_format_checkers = msg_format_checkers - with open(filename) as f: - self.lines = f.readlines() - - def visitConst(self, node): - if not isinstance(node.value, str): - return - - if is_localized(node): - for (checker, msg) in self.msg_format_checkers: - if checker(node): - print('%s:%d %s: %s Error: %s' % - (self.filename, node.lineno, - self.lines[node.lineno - 1][:-1], - checker.__name__, msg), - file=sys.stderr) - self.error = 1 - return - if debug: - print('%s:%d %s: %s' % - (self.filename, node.lineno, - self.lines[node.lineno - 1][:-1], - "Pass")) - else: - for (predicate, action, msg) in self.i18n_msg_predicates: - if predicate(node): - if action == 'skip': - if debug: - print('%s:%d %s: %s' % - (self.filename, node.lineno, - self.lines[node.lineno - 1][:-1], - "Pass")) - return - elif action == 'error': - print('%s:%d %s: %s Error: %s' % - (self.filename, node.lineno, - self.lines[node.lineno - 1][:-1], - predicate.__name__, msg), - file=sys.stderr) - self.error = 1 - return - elif action == 'warn': - print('%s:%d %s: %s' % - (self.filename, node.lineno, - self.lines[node.lineno - 1][:-1], - "Warn: %s" % msg)) - return - print('Predicate with wrong action!', file=sys.stderr) - - -def is_file_in_black_list(black_list, f): - for f in black_list: - if os.path.abspath(input_file).startswith( - os.path.abspath(f)): - return True - return False - - -def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): - input_mod = compiler.parseFile(input_file) - v = compiler.visitor.walk(input_mod, - Visitor(input_file, - i18n_msg_predicates, - msg_format_checkers, - debug), - ASTWalker()) - return v.error - - -if __name__ == '__main__': - input_path = sys.argv[1] - cfg_path = sys.argv[2] - try: - cfg_mod = imp.load_source('', cfg_path) - except Exception: - print("Load cfg module failed", file=sys.stderr) - sys.exit(1) - - i18n_msg_predicates = cfg_mod.i18n_msg_predicates - msg_format_checkers = cfg_mod.msg_format_checkers - black_list = cfg_mod.file_black_list - - debug = False - if len(sys.argv) > 3: - if sys.argv[3] == '-d': - debug = True - - if os.path.isfile(input_path): - sys.exit(check_i18n(input_path, - i18n_msg_predicates, - msg_format_checkers, - debug)) - - error = 0 - for dirpath, dirs, files in os.walk(input_path): - for f in files: - if not f.endswith('.py'): - continue - input_file = os.path.join(dirpath, f) - if is_file_in_black_list(black_list, input_file): - continue - if check_i18n(input_file, - i18n_msg_predicates, - msg_format_checkers, - debug): - error = 1 - sys.exit(error) diff --git a/tools/check_i18n_test_case.txt b/tools/check_i18n_test_case.txt deleted file mode 100644 index 3d1391d94..000000000 --- a/tools/check_i18n_test_case.txt +++ /dev/null @@ -1,67 +0,0 @@ -# test-case for check_i18n.py -# python check_i18n.py check_i18n.txt -d - -# message format checking -# capital checking -msg = _("hello world, error") -msg = _("hello world_var, error") -msg = _('file_list xyz, pass') -msg = _("Hello world, pass") - -# format specifier checking -msg = _("Hello %s world %d, error") -msg = _("Hello %s world, pass") -msg = _("Hello %(var1)s world %(var2)s, pass") - -# message has been localized -# is_localized -msg = _("Hello world, pass") -msg = _("Hello world, pass") % var -LOG.debug(_('Hello world, pass')) -LOG.info(_('Hello world, pass')) -raise x.y.Exception(_('Hello world, pass')) -raise Exception(_('Hello world, pass')) - -# message need be localized -# is_log_callfunc -LOG.debug('hello world, error') -LOG.debug('hello world, error' % xyz) -sys.append('hello world, warn') - -# is_log_i18n_msg_with_mod -LOG.debug(_('Hello world, error') % xyz) - -# default warn -msg = 'hello world, warn' -msg = 'hello world, warn' % var - -# message needn't be localized -# skip only one word -msg = '' -msg = "hello,pass" - -# skip dict -msg = {'hello world, pass': 1} - -# skip list -msg = ["hello world, pass"] - -# skip subscript -msg['hello world, pass'] - -# skip xml marker -msg = ", pass" - -# skip sql statement -msg = "SELECT * FROM xyz WHERE hello=1, pass" -msg = "select * from xyz, pass" - -# skip add statement -msg = 'hello world' + e + 'world hello, pass' - -# skip doc string -""" -Hello world, pass -""" -class Msg: - pass diff --git a/tools/clean.sh b/tools/clean.sh deleted file mode 100755 index 27bc219f9..000000000 --- a/tools/clean.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes -rm -rf */*.deb -rm -rf ./plugins/**/build/ ./plugins/**/dist -rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* diff --git a/tools/coding-checks.sh b/tools/coding-checks.sh deleted file mode 100755 index b92de05e0..000000000 --- a/tools/coding-checks.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/sh -# this was stoken from Neutron with the change neutron -> networking_odl - -set -eu - -usage () { - echo "Usage: $0 [OPTION]..." - echo "Run Neutron's coding check(s)" - echo "" - echo " -Y, --pylint [] Run pylint check on the entire neutron module or just files changed in basecommit (e.g. HEAD~1)" - echo " -h, --help Print this usage message" - echo - exit 0 -} - -process_options () { - i=1 - while [ $i -le $# ]; do - eval opt=\$$i - case $opt in - -h|--help) usage;; - -Y|--pylint) pylint=1;; - *) scriptargs="$scriptargs $opt" - esac - i=$((i+1)) - done -} - -run_pylint () { - local target="${scriptargs:-all}" - - if [ "$target" = "all" ]; then - files="networking_odl" - else - case "$target" in - *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; - *) echo "$target is an unrecognized basecommit"; exit 1;; - esac - fi - - echo "Running pylint..." - echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." - if [ -n "${files}" ]; then - pylint --rcfile=.pylintrc --output-format=colorized ${files} - else - echo "No python changes in this commit, pylint check not required." - exit 0 - fi -} - -scriptargs= -pylint=1 - -process_options $@ - -if [ $pylint -eq 1 ]; then - run_pylint - exit 0 -fi diff --git a/tools/configure_for_func_testing.sh b/tools/configure_for_func_testing.sh deleted file mode 100755 index c22466718..000000000 --- a/tools/configure_for_func_testing.sh +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env bash - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -set -e - - -# Control variable used to determine whether to execute this script -# directly or allow the gate_hook to import. -IS_GATE=${IS_GATE:-False} -USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True} - - -if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then - >&2 echo "Usage: $0 /path/to/devstack [-i] -Configure a host to run Networking ODL's functional test suite. - --i Install Networking ODL's package dependencies. By default, it is assumed - that devstack has already been used to deploy Networking ODL to the - target host and that package dependencies need not be installed. - -Warning: This script relies on devstack to perform extensive -modification to the underlying host. It is recommended that it be -invoked only on a throw-away VM." - exit 1 -fi - - -# Skip the first argument -OPTIND=2 -while getopts ":i" opt; do - case $opt in - i) - INSTALL_BASE_DEPENDENCIES=True - ;; - esac - -done - -# Default to environment variables to permit the gate_hook to override -# when sourcing. -VENV=${VENV:-dsvm-functional} -DEVSTACK_PATH=${DEVSTACK_PATH:-$(cd "$1" && pwd)} -PROJECT_NAME=${PROJECT_NAME:-networking-odl} -REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)} -INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False} -# The gate should automatically install dependencies. -INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE} - - -if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then - >&2 echo "Unable to find devstack at '$DEVSTACK_PATH'. Please verify that the specified path points to a valid devstack repo." - exit 1 -fi - - -set -x - - -function _init { - # Subsequently-called devstack functions depend on the following variables. - HOST_IP=127.0.0.1 - FILES=$DEVSTACK_PATH/files - TOP_DIR=$DEVSTACK_PATH - - source $DEVSTACK_PATH/inc/meta-config - extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto - source $DEVSTACK_PATH/stackrc - - # Allow the gate to override values set by stackrc. - DEST=${GATE_DEST:-$DEST} - STACK_USER=${GATE_STACK_USER:-$STACK_USER} - REQUIREMENTS_DIR=$DEST/requirements - if [[ -n "$SCREEN_LOGDIR" ]]; then - mkdir -p $SCREEN_LOGDIR - fi -} - - -function _install_base_deps { - echo_summary "Installing base dependencies" - - INSTALL_TESTONLY_PACKAGES=True - PACKAGES=$(get_packages general) - # Do not install 'python-' prefixed packages other than - # python-dev*. Networking ODL's functional testing relies on deployment - # to a tox env so there is no point in installing python - # dependencies system-wide. - PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g') - install_package $PACKAGES -} - - -# _install_databases [install_pg] -function _install_databases { - local install_pg=${1:-True} - - echo_summary "Installing databases" - - # Avoid attempting to configure the db if it appears to already - # have run. The setup as currently defined is not idempotent. - if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then - echo_summary "DB config appears to be complete, skipping." - return 0 - fi - - MYSQL_PASSWORD=${MYSQL_PASSWORD:-secretmysql} - DATABASE_PASSWORD=${DATABASE_PASSWORD:-secretdatabase} - - source $DEVSTACK_PATH/lib/database - - enable_service mysql - initialize_database_backends - install_database - configure_database_mysql - - if [[ "$install_pg" == "True" ]]; then - # acl package includes setfacl. - install_package acl - enable_service postgresql - initialize_database_backends - install_database - configure_database_postgresql - fi - - # Set up the 'openstack_citest' user and database in each backend - tmp_dir=$(mktemp -d) - trap "rm -rf $tmp_dir" EXIT - - cat << EOF > $tmp_dir/mysql.sql -CREATE DATABASE openstack_citest; -CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; -CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest'; -GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost'; -GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'; -FLUSH PRIVILEGES; -EOF - /usr/bin/mysql -u root < $tmp_dir/mysql.sql - - if [[ "$install_pg" == "True" ]]; then - cat << EOF > $tmp_dir/postgresql.sql -CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest'; -CREATE DATABASE openstack_citest WITH OWNER openstack_citest; -EOF - - # User/group postgres needs to be given access to tmp_dir - setfacl -m g:postgres:rwx $tmp_dir - sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql - fi -} - - -function _install_infra { - echo_summary "Installing infra" - - pip_install -U virtualenv - source $DEVSTACK_PATH/lib/infra - install_infra -} - - -function _install_opendaylight { - echo_summary "Install OpenDaylight" - - # fake up necessary environment for odl to install/configure - source $DEVSTACK_PATH/lib/neutron-legacy - neutron_plugin_configure_common - _create_neutron_conf_dir - mkdir -p $NEUTRON_CONF_DIR - touch $NEUTRON_CONF - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - touch /$Q_PLUGIN_CONF_FILE - - NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$REPO_BASE/networking-odl} - ODL_V2DRIVER=${ODL_V2DRIVER:-True} - # openstack service provider isn't needed, only ODL neutron northbound - # is necessary for functional test - ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-neutron-logger - if [[ "$VENV" =~ "dsvm-fullstack" ]]; then - export ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack,odl-neutron-logger - fi - ODL_BOOT_WAIT_URL=controller/nb/v2/neutron/networks - source $NETWORKING_ODL_DIR/devstack/settings.odl - - set +e - curl -o /dev/null --fail --silent --head -u \ - ${ODL_USERNAME}:${ODL_PASSWORD} \ - http://${ODL_MGR_HOST}:${ODL_PORT}/${ODL_BOOT_WAIT_URL} - local result=$? - set -e - if [ $result -eq 0 ]; then - echo_summary "OpenDaylight config appears to be complete, skipping" - return 0 - fi - - # start_odl tries to run under screen session. ensure screen is running - USE_SCREEN=$(trueorfalse True USE_SCREEN) - if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - if ! (etype -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"); then - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - fi - fi - - enable_service odl-server - if [[ "$VENV" =~ "fullstack" ]]; then - enable_service odl-compute - # They are needed by configure_opendaylight_l3 - FLOATING_RANGE=172.24.5.0/24 - PUBLIC_NETWORK_GATEWAY=172.24.5.1 - fi - source $NETWORKING_ODL_DIR/devstack/plugin.sh stack install - source $NETWORKING_ODL_DIR/devstack/plugin.sh stack post-config -} - - -function _install_post_devstack { - echo_summary "Performing post-devstack installation" - - _install_databases - # networkign-odl devstack plugin requires infra - _install_infra - _install_opendaylight - - if is_ubuntu; then - install_package isc-dhcp-client - install_package netcat-openbsd - elif is_fedora; then - install_package dhclient - else - exit_distro_not_supported "installing dhclient package" - fi -} - - -function _configure_iptables_rules { - # For linuxbridge agent fullstack tests we need to add special rules to - # iptables for connection of agents to rabbitmq: - CHAIN_NAME="openstack-INPUT" - sudo iptables -n --list $CHAIN_NAME 1> /dev/null 2>&1 || CHAIN_NAME="INPUT" - sudo iptables -I $CHAIN_NAME -s 240.0.0.0/8 -p tcp -m tcp -d 240.0.0.0/8 --dport 5672 -j ACCEPT -} - - -function configure_host_for_func_testing { - echo_summary "Configuring host for functional testing" - - if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then - # Installing of the following can be achieved via devstack by - # installing Networking ODL, so their installation is conditional to - # minimize the work to do on a devstack-configured host. - _install_base_deps - fi - _install_post_devstack -} - - -_init - - -if [[ "$IS_GATE" != "True" ]]; then - if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then - _install_databases nopg - else - configure_host_for_func_testing - fi -fi - -if [[ "$VENV" =~ "dsvm-fullstack" ]]; then - _configure_iptables_rules -fi diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py deleted file mode 100644 index 0ec3cf62c..000000000 --- a/tools/i18n_cfg.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import compiler -import re - - -def is_log_callfunc(n): - """LOG.xxx('hello %s' % xyz) and LOG('hello')""" - if isinstance(n.parent, compiler.ast.Mod): - n = n.parent - if isinstance(n.parent, compiler.ast.CallFunc): - if isinstance(n.parent.node, compiler.ast.Getattr): - if isinstance(n.parent.node.getChildNodes()[0], - compiler.ast.Name): - if n.parent.node.getChildNodes()[0].name == 'LOG': - return True - return False - - -def is_log_i18n_msg_with_mod(n): - """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)""" - if not isinstance(n.parent.parent, compiler.ast.Mod): - return False - n = n.parent.parent - if isinstance(n.parent, compiler.ast.CallFunc): - if isinstance(n.parent.node, compiler.ast.Getattr): - if isinstance(n.parent.node.getChildNodes()[0], - compiler.ast.Name): - if n.parent.node.getChildNodes()[0].name == 'LOG': - return True - return False - - -def is_wrong_i18n_format(n): - """Check _('hello %s' % xyz)""" - if isinstance(n.parent, compiler.ast.Mod): - n = n.parent - if isinstance(n.parent, compiler.ast.CallFunc): - if isinstance(n.parent.node, compiler.ast.Name): - if n.parent.node.name == '_': - return True - return False - - -""" -Used for check message need be localized or not. -(predicate_func, action, message) -""" -i18n_msg_predicates = [ - # Skip ['hello world', 1] - (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''), - # Skip {'hellow world', 1} - (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''), - # Skip msg['hello world'] - (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''), - # Skip doc string - (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''), - # Skip msg = "hello", in normal, message should more than one word - (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''), - # Skip msg = 'hello world' + vars + 'world hello' - (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''), - # Skip xml markers msg = "" - (lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''), - # Skip sql statement - (lambda n: len( - re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0, - 'skip', ''), - # LOG.xxx() - (is_log_callfunc, 'error', 'Message must be localized'), - # _('hello %s' % xyz) should be _('hello %s') % xyz - (is_wrong_i18n_format, 'error', - ("Message format was wrong, _('hello %s' % xyz) " - "should be _('hello %s') % xyz")), - # default - (lambda n: True, 'warn', 'Message might need localized') -] - - -""" -Used for checking message format. (checker_func, message) -""" -msg_format_checkers = [ - # If message contain more than on format specifier, it should use - # mapping key - (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1, - "The message shouldn't contain more than one format specifier"), - # Check capital - (lambda n: n.value.split(' ')[0].count('_') == 0 and - n.value[0].isalpha() and - n.value[0].islower(), - "First letter must be capital"), - (is_log_i18n_msg_with_mod, - 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)') -] - - -file_black_list = ["./neutron/tests/unit", - "./neutron/openstack", - "./neutron/plugins/bigswitch/tests"] diff --git a/tools/install_venv.py b/tools/install_venv.py deleted file mode 100644 index 8507ea2ca..000000000 --- a/tools/install_venv.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2010 OpenStack Foundation. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Installation script for Neutron's development virtualenv -""" -from __future__ import print_function - -import os -import sys - -import install_venv_common as install_venv - - -def print_help(): - help = """ - Neutron development environment setup is complete. - - Neutron development uses virtualenv to track and manage Python dependencies - while in development and testing. - - To activate the Neutron virtualenv for the extent of your current shell - session you can run: - - $ source .venv/bin/activate - - Or, if you prefer, you can run commands in the virtualenv on a case by case - basis by running: - - $ tools/with_venv.sh - - Also, make test will automatically use the virtualenv. - """ - print(help) - - -def main(argv): - root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - venv = os.path.join(root, '.venv') - pip_requires = os.path.join(root, 'requirements.txt') - test_requires = os.path.join(root, 'test-requirements.txt') - py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - project = 'Neutron' - install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, - py_version, project) - options = install.parse_args(argv) - install.check_python_version() - install.check_dependencies() - install.create_virtualenv(no_site_packages=options.no_site_packages) - install.install_dependencies() - print_help() - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/tools/ostestr_compat_shim.sh b/tools/ostestr_compat_shim.sh deleted file mode 100755 index 195cbd248..000000000 --- a/tools/ostestr_compat_shim.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# preserve old behavior of using an arg as a regex when '--' is not present -case $@ in - (*--*) ostestr $@;; - ('') ostestr;; - (*) ostestr --regex "$@" -esac diff --git a/tools/tox_install.sh b/tools/tox_install.sh deleted file mode 100755 index fbf6758b4..000000000 --- a/tools/tox_install.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -x - -DIR=$(dirname $0) -${DIR}/tox_install_project.sh ceilometer ceilometer $* -${DIR}/tox_install_project.sh neutron neutron $* -${DIR}/tox_install_project.sh neutron-fwaas neutron_fwaas $* -${DIR}/tox_install_project.sh neutron-lbaas neutron_lbaas $* -${DIR}/tox_install_project.sh networking-l2gw networking_l2gw $* -${DIR}/tox_install_project.sh networking-sfc networking_sfc $* -${DIR}/tox_install_project.sh networking-bgpvpn networking_bgpvpn $* -CONSTRAINTS_FILE=$1 -shift - -install_cmd="pip install" -if [ $CONSTRAINTS_FILE != "unconstrained" ]; then - install_cmd="$install_cmd -c$CONSTRAINTS_FILE" -fi - -$install_cmd -U $* -exit $? diff --git a/tools/tox_install_project.sh b/tools/tox_install_project.sh deleted file mode 100755 index 8d1af0edb..000000000 --- a/tools/tox_install_project.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash - -# Many of neutron's repos suffer from the problem of depending on neutron, -# but it not existing on pypi. - -# This wrapper for tox's package installer will use -# the local tree in home directory if exists, -# else the existing package if it exists, -# else use zuul-cloner if that program exists, -# else grab it from project master via https://git.openstack.org/openstack, -# That last case should only happen with devs running unit tests locally. - -# From the tox.ini config page: -# install_command=ARGV -# default: -# pip install {opts} {packages} - -PROJ=$1 -MOD=$2 -shift 2 - -ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner -neutron_installed=$(echo "import ${MOD}" | python 2>/dev/null ; echo $?) -BRANCH_NAME=master -NEUTRON_DIR=$HOME/${PROJ} - -set -e -set -x - -CONSTRAINTS_FILE=$1 -shift - -install_cmd="pip install" -if [ $CONSTRAINTS_FILE != "unconstrained" ]; then - install_cmd="$install_cmd -c$CONSTRAINTS_FILE" -fi - -# The devstack based functional tests have neutron checked out in -# $NEUTRON_DIR on the test systems - with the change to test in it. -# Use this directory if it exists, so that this script installs the -# neutron version to test here. -# Note that the functional tests use sudo to run tox and thus -# variables used for zuul-cloner to check out the correct version are -# lost. -if [ -d "$NEUTRON_DIR" ]; then - echo "FOUND ${PROJ} code at $NEUTRON_DIR - using" - $install_cmd -U -e $NEUTRON_DIR -elif [ $neutron_installed -eq 0 ]; then - echo "ALREADY INSTALLED" > /tmp/tox_install-${PROJ}.txt - location=$(python -c "import ${MOD}; print(${MOD}.__file__)") - echo "ALREADY INSTALLED at $location" - - echo "${PROJ} already installed; using existing package" -elif [ -x "$ZUUL_CLONER" ]; then - echo "ZUUL CLONER" > /tmp/tox_install-${PROJ}.txt - # Make this relative to current working directory so that - # git clean can remove it. We cannot remove the directory directly - # since it is referenced after $install_cmd -e. - mkdir -p .tmp - PROJECT_DIR=$(/bin/mktemp -d -p $(pwd)/.tmp) - pushd $PROJECT_DIR - $ZUUL_CLONER --cache-dir \ - /opt/git \ - --branch ${BRANCH_NAME} \ - git://git.openstack.org \ - openstack/${PROJ} - cd openstack/${PROJ} - $install_cmd -e . - popd -else - echo "PIP HARDCODE" > /tmp/tox_install-${PROJ}.txt - GIT_REPO="https://git.openstack.org/openstack/${PROJ}" - SRC_DIR="$VIRTUAL_ENV/src/${PROJ}" - git clone --depth 1 --branch $BRANCH_NAME $GIT_REPO $SRC_DIR - $install_cmd -U -e $SRC_DIR -fi diff --git a/tools/with_venv.sh b/tools/with_venv.sh deleted file mode 100755 index dea5c5fc2..000000000 --- a/tools/with_venv.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -TOOLS=`dirname $0` -VENV=$TOOLS/../.venv -source $VENV/bin/activate && "$@" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 736e1603f..000000000 --- a/tox.ini +++ /dev/null @@ -1,155 +0,0 @@ -[tox] -envlist = docs,py35,py27,pep8 -minversion = 1.6 -skipsdist = True - -[testenv] -setenv = VIRTUAL_ENV={envdir} - PYTHONWARNINGS=default::DeprecationWarning -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - OS_FAIL_ON_MISSING_DEPS OS_POST_MORTEM_DEBUGGER TRACE_FAILONLY - OS_TEST_DBAPI_ADMIN_CONNECTION -usedevelop = True -install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -whitelist_externals = bash -commands = {toxinidir}/tools/ostestr_compat_shim.sh {posargs} -# there is also secret magic in ostestr which lets you run in a fail only -# mode. To do this define the TRACE_FAILONLY environmental variable. - -[testenv:dsvm] -# Fake job to define environment variables shared between dsvm jobs -setenv = OS_SUDO_TESTING=1 - OS_FAIL_ON_MISSING_DEPS=1 - OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} -commands = false - -[testenv:functional] -setenv = {[testenv]setenv} - OS_TEST_PATH=./networking_odl/tests/functional -deps = {[testenv]deps} - -r{toxinidir}/networking_odl/tests/functional/requirements.txt - -[testenv:dsvm-functional] -basepython = python2.7 -setenv = {[testenv:functional]setenv} - {[testenv:dsvm]setenv} -deps = - {[testenv:functional]deps} - -[testenv:fullstack] -setenv = {[testenv]setenv} - OS_TEST_TIMEOUT=180 - OS_TEST_PATH=./networking_odl/tests/fullstack -commands = ostestr '{posargs}' -deps = {[testenv]deps} - -r{toxinidir}/networking_odl/tests/fullstack/requirements.txt - -[testenv:dsvm-fullstack] -setenv = {[testenv:fullstack]setenv} - {[testenv:dsvm]setenv} -commands = ostestr '{posargs}' -deps = - {[testenv:fullstack]deps} - -[testenv:pep8] -commands = - flake8 - {toxinidir}/tools/coding-checks.sh --pylint '{posargs}' - doc8 doc/source devstack releasenotes/source rally-jobs - neutron-db-manage --subproject networking-odl check_migration - {[testenv:genconfig]commands} - {[testenv:bashate]commands} - {[testenv:capitald]commands} -whitelist_externals = - bash - mkdir - -[testenv:i18n] -commands = python ./tools/check_i18n.py ./networking_odl ./tools/i18n_cfg.py - -[testenv:venv] -# NOTE(yamahata): translation job can't use zuul-cloner or upper-constraints -install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -commands = {posargs} - -[testenv:cover] -commands = - python setup.py test --coverage --coverage-package-name=networking_odl --testr-args='{posargs}' - coverage report --fail-under=80 --skip-covered - -[testenv:docs] -commands = - doc8 doc/source devstack releasenotes/source rally-jobs - python setup.py build_sphinx - -[testenv:debug] -commands = oslo_debug_helper -t networking_odl/tests {posargs} - -[hacking] -import_exceptions = networking_odl._i18n -local-check-factory = networking_odl.hacking.checks.factory - -[doc8] -# File extensions to check -extensions = .rst -# TODO(yamahata): doc8 work around. remove this when doc8 is fixed. -# doc8(actually docutils) handles relative path inclusion differently from sphinx. -# doc8 wrongly alerts invalid inclusion path with recursive relative inclusion -# https://sourceforge.net/p/docutils/bugs/211/ -ignore-path-errors=doc/source/devref/index.rst;D000 - -[flake8] -# TODO(dougwig) -- uncomment this to test for remaining linkages -# N530 direct neutron imports not allowed -show-source = True -ignore = N530 -# H106: Dont put vim configuration in source files -# H203: Use assertIs(Not)None to check for None -# H904: Delay string interpolations at logging calls -enable-extensions=H106,H203,H904 -exclude=./.*,dist,doc,releasenotes,*lib/python*,*egg,build,tools -import-order-style = pep8 - -[testenv:bashate] -commands = bash -c "find {toxinidir} \ - -not \( -type d -name .\* -prune \) \ - -type f \ - -name \*.sh \ -# E005 file does not begin with #! or have a .sh prefix -# E006 check for lines longer than 79 columns -# E042 local declaration hides errors -# E043 Arithmetic compound has inconsistent return semantics - -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" -whitelist_externals = bash - -[testenv:capitald] -usedevelop = False -skip_install = True -deps = -# Check if "Opendaylight" word is in any file -# Only "OpenDaylight" (with uppercase 'D') should be used -commands = bash -c "! grep \ - --exclude-dir='.*' \ - --exclude-dir='cover' \ - --exclude-dir='__pycache__' \ - --exclude='tox.ini' \ - --exclude='ChangeLog' \ - --exclude='*.py' \ - --exclude='*.pyc' \ - --recursive \ - --line-number \ - Opendaylight \ - {toxinidir}" -whitelist_externals = bash - -[testenv:genconfig] -deps = -r{toxinidir}/requirements.txt -commands = - mkdir -p etc/neutron/plugins/ml2 - oslo-config-generator --namespace ml2_odl --output-file etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample -whitelist_externals = mkdir - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html diff --git a/vagrant/README.rst b/vagrant/README.rst deleted file mode 100644 index ffc0f1b42..000000000 --- a/vagrant/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -======= -Vagrant -======= - - Devstack with Vagrant is to used to deploy OpenStack with ODL. - -Setup Proxy(Optional) ---------------------- - - If your network is behind a firwall, you can update SOCKS5_IP/SOCKS5_PORT and - run ./setup_proxy.sh. - -Vagrant Setup -------------- - -# sudo apt-get install -y virtualbox -# wget --no-check-certificate https://releases.hashicorp.com/vagrant/1.8.6/vagrant_1.8.6_x86_64.deb -# sudo dpkg -i vagrant_1.8.6_x86_64.deb - -Vagrant Cleanup ---------------- - -vagrant destroy -f - - -Integration ------------ - -.. include:: ../../../vagrant/integration/multinode/README.rst diff --git a/vagrant/functional/Vagrantfile b/vagrant/functional/Vagrantfile deleted file mode 100644 index e785843ae..000000000 --- a/vagrant/functional/Vagrantfile +++ /dev/null @@ -1,23 +0,0 @@ -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - if Vagrant.has_plugin?("vagrant-cachier") - config.cache.scope = :box - end - - # share networking-odl - config.vm.synced_folder File.expand_path("../.."), "/home/vagrant/networking-odl" - - config.vm.box = "ubuntu/xenial64" - # config.vm.box = "bento/ubuntu-16.04" - config.vm.network 'private_network', ip: '192.168.0.10' - config.vm.provision "shell", path: "setup-minimum.sh", privileged: true - config.vm.provision "shell", path: "reproduce.sh", privileged: true - - # Increase the memory for the VM. If you need to run devstack, this needs - # to be at least 8192 - config.vm.provider "virtualbox" do |v| - v.memory = 8192 - v.cpus = 4 - end -end diff --git a/vagrant/functional/config-override.sh b/vagrant/functional/config-override.sh deleted file mode 100755 index 6bbc61c0f..000000000 --- a/vagrant/functional/config-override.sh +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/bash -# -# override configurations of reproduce.sh -# - -export NETWORKING_ODL_DIR=/home/vagrant/networking-odl - -# Adjust path to scripts in networking-odl -pre_test_hook () -{ - . $NETWORKING_ODL_DIR/devstack/pre_test_hook.sh -} -declare -fx pre_test_hook -gate_hook () -{ - bash -xe $NETWORKING_ODL_DIR/networking_odl/tests/contrib/gate_hook.sh dsvm-functional -} -declare -fx gate_hook -post_test_hook () -{ - # Don't run tests. - sudo chown -R stack:stack $BASE/new - # sudo -H -u stack tox -e dsvm-function - - # bash -xe $NETWORKING_ODL_DIR/networking_odl/tests/contrib/post_test_hook.sh dsvm-functional dsvm-functional -} -declare -fx post_test_hook - -# we don't need most of projects. networking-odl isn't needed. -export DEVSTACK_LOCAL_CONFIG="" -export DEVSTACK_GATE_SETTINGS="$NETWORKING_ODL_DIR/devstack/devstackgaterc" -export PROJECTS="" -export OVERRIDE_ENABLED_SERVICES="odl-server" -export DEVSTACK_GATE_PROJECTS_OVERRIDE -DEVSTACK_GATE_PROJECTS_OVERRIDE="openstack-infra/devstack-gate" -DEVSTACK_GATE_PROJECTS_OVERRIDE="openstack-dev/devstack $DEVSTACK_GATE_PROJECTS_OVERRIDE" -DEVSTACK_GATE_PROJECTS_OVERRIDE="openstack/requirements $DEVSTACK_GATE_PROJECTS_OVERRIDE" -export ODL_RELEASE_BASE=carbon-snapshot diff --git a/vagrant/functional/reproduce.sh b/vagrant/functional/reproduce.sh deleted file mode 100755 index 1ce018c80..000000000 --- a/vagrant/functional/reproduce.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash -xe -# -# Script to reproduce devstack-gate run. -# -# Prerequisites: -# - Fresh install of current Ubuntu LTS, with basic internet access. -# Note we can and do run devstack-gate on other distros double check -# where your job ran (will be recorded in console.html) to reproduce -# as accurately as possible. -# - Must have python-all-dev, build-essential, git, libssl-dev installed -# from apt, or their equivalents on other distros. -# - Must have virtualenv installed from pip -# - Must be run as root -# - -exec 0 clonemap.yaml << IEOF -clonemap: - - name: openstack-infra/devstack-gate - dest: devstack-gate -IEOF - -/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git git://git.openstack.org openstack-infra/devstack-gate - -cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh -./safe-devstack-vm-gate-wrap.sh diff --git a/vagrant/functional/setup-minimum.sh b/vagrant/functional/setup-minimum.sh deleted file mode 100755 index a7bb2a75f..000000000 --- a/vagrant/functional/setup-minimum.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -xe -# -# Script to install minimum environment to run reproduce.sh -# - -# install pre required packages -apt-get install --yes python-pip -pip install --upgrade pip -pip install --upgrade setuptools -pip install --upgrade virtualenv -pip install --upgrade tox diff --git a/vagrant/integration/multinode/README.rst b/vagrant/integration/multinode/README.rst deleted file mode 100644 index b024723c8..000000000 --- a/vagrant/integration/multinode/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -======= -vagrant -======= - - It is verified to work in the env: - Host: Ubuntu 16.04 desktop 64bit with 16G memory & 256G disk - Vagrant: 1.8.6 - Virtualbox: 5.0.24 - -OpenStack Setup ---------------- - -download primary & subnode configuration from jenkins log. example: -# curl http://logs.openstack.org/22/408422/1/check/gate-tempest-dsvm-networking-odl-multinode-carbon-snapshot-nv/ef988ee/logs/localrc.txt.gz > control.conf -# curl http://logs.openstack.org/22/408422/1/check/gate-tempest-dsvm-networking-odl-multinode-carbon-snapshot-nv/ef988ee/logs/subnode-2/localrc.txt.gz > compute.conf -# vagrant up - -Note: we already include control.conf & compute.conf in this example. - -Run Tempest ------------ - -# vagrant ssh control -# cd tempest -# tempest run --regex tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_mtu_sized_frames diff --git a/vagrant/integration/multinode/Vagrantfile b/vagrant/integration/multinode/Vagrantfile deleted file mode 100644 index c8907e5da..000000000 --- a/vagrant/integration/multinode/Vagrantfile +++ /dev/null @@ -1,24 +0,0 @@ -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! - -Vagrant.configure("2") do |config| - - config.vm.box = "bento/ubuntu-16.04" - - config.vm.provider :virtualbox do |vb| - vb.customize ["modifyvm", :id, "--memory", 8192] - vb.customize ["modifyvm", :id, "--cpus", 4] - vb.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"] - end - - config.vm.define "control" do |config| - config.vm.hostname = "control" - config.vm.network "private_network", ip: "192.168.0.10" - config.vm.provision "shell", path: "setup_control.sh", privileged: false - end - - config.vm.define vm_name = "compute" do |config| - config.vm.hostname = vm_name - config.vm.network "private_network", ip: "192.168.0.20" - config.vm.provision "shell", path: "setup_compute.sh", privileged: false - end -end diff --git a/vagrant/integration/multinode/compute.conf b/vagrant/integration/multinode/compute.conf deleted file mode 100644 index aadf69d0b..000000000 --- a/vagrant/integration/multinode/compute.conf +++ /dev/null @@ -1,57 +0,0 @@ -NETWORK_GATEWAY=10.1.0.1 -USE_SCREEN=False -DEST=/opt/stack/new -# move DATA_DIR outside of DEST to keep DEST a bit cleaner -DATA_DIR=/opt/stack/data -ACTIVE_TIMEOUT=90 -BOOT_TIMEOUT=90 -ASSOCIATE_TIMEOUT=60 -TERMINATE_TIMEOUT=60 -MYSQL_PASSWORD=admin -DATABASE_PASSWORD=admin -RABBIT_PASSWORD=admin -ADMIN_PASSWORD=admin -SERVICE_PASSWORD=admin -SERVICE_TOKEN=ADMIN_TOKEN -# ERROR_ON_CLONE should never be set to FALSE in gate jobs. -# Setting up git trees must be done by zuul -# because it needs specific git references directly from gerrit -# to correctly do testing. Otherwise you are not testing -# the code you have posted for review. -SYSLOG=False -SCREEN_LOGDIR=/opt/stack/new/screen-logs -LOGFILE=/opt/stack/new/devstacklog.txt -VERBOSE=True -FIXED_RANGE=10.1.0.0/20 -IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20 -FLOATING_RANGE=172.24.5.0/24 -PUBLIC_NETWORK_GATEWAY=172.24.5.1 -FIXED_NETWORK_SIZE=4096 -LOG_COLOR=False -# Don't reset the requirements.txt files after g-r updates -CINDER_PERIODIC_INTERVAL=10 -export OS_NO_CACHE=True -# set this until all testing platforms have libvirt >= 1.2.11 -# see bug #1501558 -EBTABLES_RACE_FIX=True -PUBLIC_BRIDGE_MTU=1450 -CINDER_SECURE_DELETE=False -CINDER_VOLUME_CLEAR=none -VOLUME_BACKING_FILE_SIZE=24G -FORCE_CONFIG_DRIVE=False -NOVA_ALLOW_MOVE_TO_SAME_HOST=False -export LIVE_MIGRATION_AVAILABLE=True -export USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=True -SERVICE_HOST=10.210.32.97 -DATABASE_HOST=10.210.32.97 -DATABASE_TYPE=postgresql -GLANCE_HOSTPORT=10.210.32.97:9292 -Q_HOST=10.210.32.97 - -NEUTRON_CREATE_INITIAL_NETWORKS=False -ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,q-dhcp -DATABASE_HOST=$SERVICE_HOST -RABBIT_HOST=$SERVICE_HOST -ODL_MODE=compute -enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl -HOST_IP=10.210.33.110 diff --git a/vagrant/integration/multinode/control.conf b/vagrant/integration/multinode/control.conf deleted file mode 100644 index 1d042d9a8..000000000 --- a/vagrant/integration/multinode/control.conf +++ /dev/null @@ -1,92 +0,0 @@ - -IS_GATE=True - -# Set here the ODL release to use for the Gate job -# By default, it should take latest release -# ODL_RELEASE=carbon-snapshot-0.6.0 - -# Set here which driver, v1 or v2 driver -# By default, it is V2 driver, v1 driver is gettin -# ODL_V2DRIVER=False - -# Set timeout in seconds for http client to ODL neutron northbound -# ODL_TIMEOUT=60 - -# Set here which port binding controller -# There is a patch to remove network-topology, by default it will be -# pseudo-agent -# ODL_PORT_BINDING_CONTROLLER=network-topology - -# Set here which ODL openstack service provider to use -ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-ovsdb-openstack,odl-neutron-logger - -# Switch to using the ODL's L3 implementation -ODL_L3=True - -# TODO(yamahata): only for legacy netvirt -Q_USE_PUBLIC_VETH=True -Q_PUBLIC_VETH_EX=veth-pub-ex -Q_PUBLIC_VETH_INT=veth-pub-int -ODL_PROVIDER_MAPPINGS=br-ex:${Q_PUBLIC_VETH_INT} - -# Enable debug logs for odl ovsdb -ODL_NETVIRT_DEBUG_LOGS=True - -NETWORK_GATEWAY=10.1.0.1 -USE_SCREEN=False -DEST=/opt/stack/new -# move DATA_DIR outside of DEST to keep DEST a bit cleaner -DATA_DIR=/opt/stack/data -ACTIVE_TIMEOUT=90 -BOOT_TIMEOUT=90 -ASSOCIATE_TIMEOUT=60 -TERMINATE_TIMEOUT=60 -MYSQL_PASSWORD=admin -DATABASE_PASSWORD=admin -RABBIT_PASSWORD=admin -ADMIN_PASSWORD=admin -SERVICE_PASSWORD=admin -SERVICE_TOKEN=ADMIN_TOKEN -ROOTSLEEP=0 -# ERROR_ON_CLONE should never be set to FALSE in gate jobs. -# Setting up git trees must be done by zuul -# because it needs specific git references directly from gerrit -# to correctly do testing. Otherwise you are not testing -# the code you have posted for review. -ERROR_ON_CLONE=True -ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key,mysql,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,q-dhcp,q-meta,quantum,rabbit,tempest -# SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca -# Screen console logs will capture service logs. -SYSLOG=False -SCREEN_LOGDIR=/opt/stack/new/screen-logs -LOGFILE=/opt/stack/new/devstacklog.txt -VERBOSE=True -FIXED_RANGE=10.1.0.0/20 -IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20 -FLOATING_RANGE=172.24.5.0/24 -PUBLIC_NETWORK_GATEWAY=172.24.5.1 -FIXED_NETWORK_SIZE=4096 -VIRT_DRIVER=libvirt -LOG_COLOR=False -# Don't reset the requirements.txt files after g-r updates -UNDO_REQUIREMENTS=False -CINDER_PERIODIC_INTERVAL=10 -export OS_NO_CACHE=True -# set this until all testing platforms have libvirt >= 1.2.11 -# see bug #1501558 -EBTABLES_RACE_FIX=True -PUBLIC_BRIDGE_MTU=1450 -CINDER_SECURE_DELETE=False -CINDER_VOLUME_CLEAR=none -LIBVIRT_TYPE=qemu -VOLUME_BACKING_FILE_SIZE=24G -FORCE_CONFIG_DRIVE=False -NOVA_ALLOW_MOVE_TO_SAME_HOST=False -export LIVE_MIGRATION_AVAILABLE=True -export USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=True -SERVICE_HOST=10.210.32.97 -HOST_IP=10.210.32.97 -enable_plugin networking-odl git://git.openstack.org/openstack/networking-odl -LIVE_MIGRATION_AVAILABLE=False -USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=False -LIBVIRT_TYPE=qemu diff --git a/vagrant/integration/multinode/setup_compute.sh b/vagrant/integration/multinode/setup_compute.sh deleted file mode 100755 index 9ded9bdb3..000000000 --- a/vagrant/integration/multinode/setup_compute.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -set -e - -#simulate devstack-gate -sudo apt-get update -y -sudo apt-get install -y git openvswitch-switch -sudo ovs-vsctl add-br br-ex -sudo ifconfig br-ex 172.24.5.2/24 -sudo ovs-vsctl add-port br-ex vxlan -- set Interface vxlan type=vxlan options:local_ip=192.168.0.20 options:remote_ip=192.168.0.10 options:dst_port=8888 - -sudo rm -rf /opt/stack; -sudo mkdir -p /opt/stack -sudo chown vagrant /opt/stack - -git clone https://github.com/openstack-dev/devstack -cd devstack -cp /vagrant/compute.conf local.conf -shost=`grep -ri 'SERVICE_HOST=' local.conf | cut -f2 -d'='` -sed -i -e "1i[[local|localrc]]" \ - -e "s/ERROR_ON_CLONE=.*/ERROR_ON_CLONE=False/" \ - -e "s/$shost/192.168.0.10/" \ - -e "s/HOST_IP=.*/HOST_IP=192.168.0.20/" \ - local.conf -./stack.sh - -echo "vagrant ssh control -c 'cd tempest; tempest run'" diff --git a/vagrant/integration/multinode/setup_control.sh b/vagrant/integration/multinode/setup_control.sh deleted file mode 100755 index 6aadb78f9..000000000 --- a/vagrant/integration/multinode/setup_control.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -e -#simulate devstack-gate -sudo apt-get update -y -sudo apt-get install -y git openvswitch-switch -sudo ovs-vsctl add-br br-ex -sudo ifconfig br-ex 172.24.5.1/24 -sudo ovs-vsctl add-port br-ex vxlan -- set Interface vxlan type=vxlan options:local_ip=192.168.0.10 options:remote_ip=192.168.0.20 options:dst_port=8888 - -sudo rm -rf /opt/stack -sudo mkdir -p /opt/stack -sudo chown vagrant /opt/stack - -git clone https://github.com/openstack-dev/devstack -cd devstack -cp /vagrant/control.conf local.conf -shost=`grep -ri 'SERVICE_HOST=' local.conf | cut -f2 -d'='` -sed -i -e "1i[[local|localrc]]" \ - -e "s/ERROR_ON_CLONE=.*/ERROR_ON_CLONE=False/" \ - -e "s/$shost/192.168.0.10/" \ - local.conf -./stack.sh - -tempest init ~/tempest -cp /opt/stack/new/tempest/etc/tempest.conf ~/tempest/etc diff --git a/vagrant/setup_proxy.sh b/vagrant/setup_proxy.sh deleted file mode 100755 index b8ccb8a0e..000000000 --- a/vagrant/setup_proxy.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh - -SOCKS5_IP=${SOCKS5_IP:-127.0.0.1} -SOCKS5_PORT=${SOCKS5:-1080} -RED_TCPORT=${RED_TCPORT:-6666} - -sudo apt-get update -y -sudo apt-get install redsocks -y - -cat <